title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Updated URL for spam test
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index e1e40edd48873..078b5f8448d46 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -99,10 +99,9 @@ def test_banklist_url(self): assert_framelist_equal(df1, df2) - @pytest.mark.xfail(reason="no tables found: gh-20685") @network def test_spam_url(self): - url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&' + url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&' 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam') df1 = self.read_html(url, '.*Water.*') df2 = self.read_html(url, 'Unit')
- [X] closes #20685 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20689
2018-04-13T16:34:22Z
2018-04-13T17:13:50Z
2018-04-13T17:13:49Z
2018-05-14T21:11:07Z
README.md
diff --git a/README.md b/README.md index 86cf95508a5d9..7f252dd205278 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ in automatically (the `-e` option is for installing it in [development mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs)): ```sh -pip install -e . +pip install -e ``` See the full instructions for [installing from source](https://pandas.pydata.org/pandas-docs/stable/install.html#installing-from-source).
Checklist for PRs: - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20687
2018-04-13T14:27:30Z
2018-04-14T12:31:58Z
null
2018-04-14T12:31:58Z
Update README.md
diff --git a/README.md b/README.md index 86cf95508a5d9..7f252dd205278 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ in automatically (the `-e` option is for installing it in [development mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs)): ```sh -pip install -e . +pip install -e ``` See the full instructions for [installing from source](https://pandas.pydata.org/pandas-docs/stable/install.html#installing-from-source).
Remove dot (".") from end of pip command Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20683
2018-04-13T11:23:13Z
2018-04-14T12:32:27Z
null
2018-04-14T12:32:27Z
BUG: Fix problems in group rank when both nans and infinity are present #20561
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e340acc17fe9f..e19aedac80213 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -221,6 +221,12 @@ Current Behavior: s.rank(na_option='top') +These bugs were squashed: + +- Bug in :meth:`DataFrame.rank` and :meth:`Series.rank` when ``method='dense'`` and ``pct=True`` in which percentile ranks were not being used with the number of distinct observations (:issue:`15630`) +- Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) +- Bug in :func:`DataFrameGroupBy.rank` where ranks were incorrect when both infinity and ``NaN`` were present (:issue:`20561`) + .. _whatsnew_0230.enhancements.round-trippable_json: JSON read/write round-trippable with ``orient='table'`` @@ -1082,14 +1088,12 @@ Offsets Numeric ^^^^^^^ -- Bug in :meth:`DataFrame.rank` and :meth:`Series.rank` when ``method='dense'`` and ``pct=True`` in which percentile ranks were not being used with the number of distinct observations (:issue:`15630`) - Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) - Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) - Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) - Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) - Bug in :class:`DataFrame` flex arithmetic (e.g. ``df.add(other, fill_value=foo)``) with a ``fill_value`` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`) - Multiplication and division of numeric-dtyped :class:`Index` objects with timedelta-like scalars returns ``TimedeltaIndex`` instead of raising ``TypeError`` (:issue:`19333`) -- Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) - Bug where ``NaN`` was returned instead of 0 by :func:`Series.pct_change` and :func:`DataFrame.pct_change` when ``fill_method`` is not ``None`` (:issue:`19873`) diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index de802f4a72277..6a33e4a09476d 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -417,7 +417,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, ndarray[int64_t] labels, bint is_datetimelike, object ties_method, bint ascending, bint pct, object na_option): - """Provides the rank of values within each group + """ + Provides the rank of values within each group. Parameters ---------- @@ -425,17 +426,24 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, values : array of {{c_type}} values to be ranked labels : array containing unique label for each group, with its ordering matching up to the corresponding record in `values` - is_datetimelike : bool + is_datetimelike : bool, default False unused in this method but provided for call compatibility with other Cython transformations - ties_method : {'keep', 'top', 'bottom'} + ties_method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + * average: average rank of group + * min: lowest rank in group + * max: highest rank in group + * first: ranks assigned in order they appear in the array + * dense: like 'min', but rank always increases by 1 between groups + ascending : boolean, default True + False for ranks by high (1) to low (N) + na_option : {'keep', 'top', 'bottom'}, default 'keep' + pct : boolean, default False + Compute percentage rank of data within each group + na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending - ascending : boolean - False for ranks by high (1) to low (N) - pct : boolean - Compute percentage rank of data within each group Notes ----- @@ -508,7 +516,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # if keep_na, check for missing values and assign back # to the result where appropriate - if keep_na and masked_vals[_as[i]] == nan_fill_val: + + if keep_na and mask[_as[i]]: grp_na_count += 1 out[_as[i], 0] = nan else: @@ -548,9 +557,9 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # reset the dups and sum_ranks, knowing that a new value is coming # up. the conditional also needs to handle nan equality and the # end of iteration - if (i == N - 1 or ( - (masked_vals[_as[i]] != masked_vals[_as[i+1]]) and not - (mask[_as[i]] and mask[_as[i+1]]))): + if (i == N - 1 or + (masked_vals[_as[i]] != masked_vals[_as[i+1]]) or + (mask[_as[i]] ^ mask[_as[i+1]])): dups = sum_ranks = 0 val_start = i grp_vals_seen += 1 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index cdb4e3072c65d..c3400b6b710e5 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1965,6 +1965,55 @@ def test_rank_args(self, grps, vals, ties_method, ascending, pct, exp): exp_df = DataFrame(exp * len(grps), columns=['val']) assert_frame_equal(result, exp_df) + @pytest.mark.parametrize("grps", [ + ['qux'], ['qux', 'quux']]) + @pytest.mark.parametrize("vals", [ + [-np.inf, -np.inf, np.nan, 1., np.nan, np.inf, np.inf], + ]) + @pytest.mark.parametrize("ties_method,ascending,na_option,exp", [ + ('average', True, 'keep', [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]), + ('average', True, 'top', [3.5, 3.5, 1.5, 5., 1.5, 6.5, 6.5]), + ('average', True, 'bottom', [1.5, 1.5, 6.5, 3., 6.5, 4.5, 4.5]), + ('average', False, 'keep', [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]), + ('average', False, 'top', [6.5, 6.5, 1.5, 5., 1.5, 3.5, 3.5]), + ('average', False, 'bottom', [4.5, 4.5, 6.5, 3., 6.5, 1.5, 1.5]), + ('min', True, 'keep', [1., 1., np.nan, 3., np.nan, 4., 4.]), + ('min', True, 'top', [3., 3., 1., 5., 1., 6., 6.]), + ('min', True, 'bottom', [1., 1., 6., 3., 6., 4., 4.]), + ('min', False, 'keep', [4., 4., np.nan, 3., np.nan, 1., 1.]), + ('min', False, 'top', [6., 6., 1., 5., 1., 3., 3.]), + ('min', False, 'bottom', [4., 4., 6., 3., 6., 1., 1.]), + ('max', True, 'keep', [2., 2., np.nan, 3., np.nan, 5., 5.]), + ('max', True, 'top', [4., 4., 2., 5., 2., 7., 7.]), + ('max', True, 'bottom', [2., 2., 7., 3., 7., 5., 5.]), + ('max', False, 'keep', [5., 5., np.nan, 3., np.nan, 2., 2.]), + ('max', False, 'top', [7., 7., 2., 5., 2., 4., 4.]), + ('max', False, 'bottom', [5., 5., 7., 3., 7., 2., 2.]), + ('first', True, 'keep', [1., 2., np.nan, 3., np.nan, 4., 5.]), + ('first', True, 'top', [3., 4., 1., 5., 2., 6., 7.]), + ('first', True, 'bottom', [1., 2., 6., 3., 7., 4., 5.]), + ('first', False, 'keep', [4., 5., np.nan, 3., np.nan, 1., 2.]), + ('first', False, 'top', [6., 7., 1., 5., 2., 3., 4.]), + ('first', False, 'bottom', [4., 5., 6., 3., 7., 1., 2.]), + ('dense', True, 'keep', [1., 1., np.nan, 2., np.nan, 3., 3.]), + ('dense', True, 'top', [2., 2., 1., 3., 1., 4., 4.]), + ('dense', True, 'bottom', [1., 1., 4., 2., 4., 3., 3.]), + ('dense', False, 'keep', [3., 3., np.nan, 2., np.nan, 1., 1.]), + ('dense', False, 'top', [4., 4., 1., 3., 1., 2., 2.]), + ('dense', False, 'bottom', [3., 3., 4., 2., 4., 1., 1.]) + ]) + def test_infs_n_nans(self, grps, vals, ties_method, ascending, na_option, + exp): + # GH 20561 + key = np.repeat(grps, len(vals)) + vals = vals * len(grps) + df = DataFrame({'key': key, 'val': vals}) + result = df.groupby('key').rank(method=ties_method, + ascending=ascending, + na_option=na_option) + exp_df = DataFrame(exp * len(grps), columns=['val']) + assert_frame_equal(result, exp_df) + @pytest.mark.parametrize("grps", [ ['qux'], ['qux', 'quux']]) @pytest.mark.parametrize("vals", [
Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ############# Docstring (pandas._libs.groupby.group_rank_float64) ############# ################################################################################ Provides the rank of values within each group Parameters ---------- out : array of float64_t values which this method will write its results to values : array of float64_t values to be ranked labels : array containing unique label for each group, with its ordering matching up to the corresponding record in `values` is_datetimelike : bool unused in this method but provided for call compatibility with other Cython transformations ties_method : {'keep', 'top', 'bottom'} * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending ascending : boolean False for ranks by high (1) to low (N) pct : boolean Compute percentage rank of data within each group Notes ----- This method modifies the `out` parameter rather than returning an object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Docstring text (summary) should start in the line immediately after the opening quotes (not in the same line, or leaving a blank line in between) Summary does not end with dot No extended summary found Errors in parameters section Unknown parameters {'values', 'pct', 'labels', 'out', 'ties_method', 'ascending', 'is_datetimelike'} Parameter "out" has no description Parameter "values" has no description Parameter "labels" description should start with capital letter Parameter "labels" description should finish with "." Parameter "is_datetimelike" description should start with capital letter Parameter "is_datetimelike" description should finish with "." Parameter "ties_method" description should start with capital letter Parameter "ties_method" description should finish with "." Parameter "ascending" description should finish with "." Parameter "pct" description should finish with "." No returns section found See Also section not found No examples section found ``` Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20561 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20681
2018-04-13T09:35:40Z
2018-04-21T18:23:55Z
2018-04-21T18:23:55Z
2018-05-09T10:22:30Z
BUG: fixes indexing with monotonic decreasing DTI (#19362)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1992c27fd11ed..0a46b444026b5 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1079,6 +1079,7 @@ Indexing - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`). - Bug in :class:`IntervalIndex` where some indexing operations were not supported for overlapping or non-monotonic ``uint64`` data (:issue:`20636`) - Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`) +- Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 95e1f8438c704..95186b2e79a16 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -342,7 +342,8 @@ def _format_with_header(self, header, **kwargs): def __contains__(self, key): try: res = self.get_loc(key) - return is_scalar(res) or type(res) == slice or np.any(res) + return (is_scalar(res) or isinstance(res, slice) or + (is_list_like(res) and len(res))) except (KeyError, TypeError, ValueError): return False diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index f263ac78cd343..4580d9fff31d5 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -91,6 +91,27 @@ def test_slice_duplicate_monotonic(self): expected = Timestamp('2017-01-01') assert result == expected + def test_monotone_DTI_indexing_bug(self): + # GH 19362 + # Testing accessing the first element in a montononic descending + # partial string indexing. + + df = pd.DataFrame(list(range(5))) + date_list = ['2018-01-02', '2017-02-10', '2016-03-10', + '2015-03-15', '2014-03-16'] + date_index = pd.to_datetime(date_list) + df['date'] = date_index + expected = pd.DataFrame({0: list(range(5)), 'date': date_index}) + tm.assert_frame_equal(df, expected) + + df = pd.DataFrame({'A': [1, 2, 3]}, + index=pd.date_range('20170101', + periods=3)[::-1]) + expected = pd.DataFrame({'A': 1}, + index=pd.date_range('20170103', + periods=1)) + tm.assert_frame_equal(df.loc['2017-01-03'], expected) + def test_slice_year(self): dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
- [x] closes #19362 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Seems that #19362 is caused by the fact that `np.any([0])` is false. Here's a suggested solution. Also added a test.
https://api.github.com/repos/pandas-dev/pandas/pulls/20677
2018-04-13T06:30:02Z
2018-04-20T10:39:04Z
2018-04-20T10:39:04Z
2018-04-20T12:46:02Z
BUG: Fixed exception when Series.str.get is used with dict values and the index is not an existing key (#20671)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index eee0f1997d081..24bea024bc9e5 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1053,6 +1053,10 @@ Numeric - Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) - Bug where ``NaN`` was returned instead of 0 by :func:`Series.pct_change` and :func:`DataFrame.pct_change` when ``fill_method`` is not ``None`` (:issue:`19873`) +Strings +^^^^^^^ +- Bug in :func:`Series.str.get` with a dictionary in the values and the index not in the keys, raising `KeyError` (:issue:`20671`) + Indexing ^^^^^^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 23c891ec4fcd0..f5415172d54a7 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1663,7 +1663,12 @@ def str_get(arr, i): ------- items : Series/Index of objects """ - f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan + def f(x): + if isinstance(x, dict): + return x.get(i) + elif len(x) > i >= -len(x): + return x[i] + return np.nan return _na_map(f, arr) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index aa94b992facfc..ac8d269c75f52 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2568,6 +2568,31 @@ def test_get(self): expected = Series(['3', '8', np.nan]) tm.assert_series_equal(result, expected) + def test_get_complex(self): + # GH 20671, getting value not in dict raising `KeyError` + values = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, + {1: 'a', 2: 'b', 3: 'c'}]) + + result = values.str.get(1) + expected = Series([2, 2, np.nan, 'a']) + tm.assert_series_equal(result, expected) + + result = values.str.get(-1) + expected = Series([3, 3, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('to_type', [tuple, list, np.array]) + def test_get_complex_nested(self, to_type): + values = Series([to_type([to_type([1, 2])])]) + + result = values.str.get(0) + expected = Series([to_type([1, 2])]) + tm.assert_series_equal(result, expected) + + result = values.str.get(1) + expected = Series([np.nan]) + tm.assert_series_equal(result, expected) + def test_more_contains(self): # PR #1179 s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
- [X] closes #20671 - [X] tests added / passed (* unrelated tests failed, with locale problems, not sure if caused by something on my system or something on pandas) - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20672
2018-04-12T22:50:28Z
2018-04-24T10:36:47Z
2018-04-24T10:36:47Z
2018-04-24T10:36:50Z
ERR: Consistent errors for non-numeric ranking. (#19560)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 408a52e0526ee..3ed1e6d869aa1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -418,6 +418,8 @@ Other Enhancements Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Using :func:`DataFrame.rank` on a data frame with non-numeric entries other than ordered categoricals will raise a ValueError. + .. _whatsnew_0230.api_breaking.deps: Dependencies have increased minimum versions diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 065a5782aced1..9a2363c31d066 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -859,6 +859,17 @@ def rank(values, axis=0, method='average', na_option='keep', Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ + if is_object_dtype(values): + def raise_non_numeric_error(): + raise ValueError("pandas.core.algorithms.rank " + "not supported for unordered " + "non-numeric data") + if is_categorical_dtype(values): + if not values.ordered: + raise_non_numeric_error() + else: + raise_non_numeric_error() + if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py index b8ba408b54715..785b470d6ef48 100644 --- a/pandas/tests/frame/test_rank.py +++ b/pandas/tests/frame/test_rank.py @@ -71,23 +71,22 @@ def test_rank2(self): result = df.rank(0, pct=True) tm.assert_frame_equal(result, expected) - df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']]) - expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]]) - result = df.rank(1, numeric_only=False) - tm.assert_frame_equal(result, expected) + # See #19560 + error_msg = ("pandas.core.algorithms.rank " + "not supported for unordered " + "non-numeric data") - expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) - result = df.rank(0, numeric_only=False) - tm.assert_frame_equal(result, expected) + df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']]) + with tm.assert_raises_regex(ValueError, error_msg): + df.rank(1, numeric_only=False) + with tm.assert_raises_regex(ValueError, error_msg): + df.rank(0, numeric_only=False) df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']]) - expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]]) - result = df.rank(1, numeric_only=False) - tm.assert_frame_equal(result, expected) - - expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]]) - result = df.rank(0, numeric_only=False) - tm.assert_frame_equal(result, expected) + with tm.assert_raises_regex(ValueError, error_msg): + df.rank(1, numeric_only=False) + with tm.assert_raises_regex(ValueError, error_msg): + df.rank(0, numeric_only=False) # f7u12, this does not work without extensive workaround data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)], @@ -110,9 +109,9 @@ def test_rank2(self): self.mixed_frame['datetime'] = datetime.now() self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1) - result = self.mixed_frame.rank(1) - expected = self.mixed_frame.rank(1, numeric_only=True) - tm.assert_frame_equal(result, expected) + # mixed_frame["foo"] is of string-type + with tm.assert_raises_regex(ValueError, error_msg): + self.mixed_frame.rank(1) df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]}) @@ -218,7 +217,7 @@ def test_rank_methods_frame(self): tm.assert_frame_equal(result, expected) def test_rank_descending(self): - dtypes = ['O', 'f8', 'i8'] + dtypes = ['f8', 'i8'] for dtype, method in product(dtypes, self.results): if 'i' in dtype: @@ -230,15 +229,11 @@ def test_rank_descending(self): expected = (df.max() - df).rank() assert_frame_equal(res, expected) - if method == 'first' and dtype == 'O': - continue - expected = (df.max() - df).rank(method=method) - if dtype != 'O': - res2 = df.rank(method=method, ascending=False, - numeric_only=True) - assert_frame_equal(res2, expected) + res2 = df.rank(method=method, ascending=False, + numeric_only=True) + assert_frame_equal(res2, expected) res3 = df.rank(method=method, ascending=False, numeric_only=False) @@ -258,11 +253,10 @@ def _check2d(df, expected, method='average', axis=0): assert_frame_equal(result, exp_df) dtypes = [None, object] - disabled = set([(object, 'first')]) results = self.results for method, axis, dtype in product(results, [0, 1], dtypes): - if (dtype, method) in disabled: + if dtype == object: continue frame = df if dtype is None else df.astype(dtype) _check2d(frame, results[method], method=method, axis=axis) diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index 004e42e14cb93..25853e6a19fb3 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -134,22 +134,27 @@ def test_rank_categorical(self): assert_series_equal(ordered.rank(), exp) assert_series_equal(ordered.rank(ascending=False), exp_desc) - # Unordered categoricals should be ranked as objects + # See #19560 + error_msg = ("pandas.core.algorithms.rank " + "not supported for unordered " + "non-numeric data") + + # Ranking unordered categorials depreciated per #19560 unordered = Series(['first', 'second', 'third', 'fourth', 'fifth', 'sixth']).astype( CategoricalDtype(categories=['first', 'second', 'third', 'fourth', 'fifth', 'sixth'], ordered=False)) - exp_unordered = Series([2., 4., 6., 3., 1., 5.]) - res = unordered.rank() - assert_series_equal(res, exp_unordered) + + with tm.assert_raises_regex(ValueError, error_msg): + unordered.rank() unordered1 = Series( [1, 2, 3, 4, 5, 6], ).astype(CategoricalDtype([1, 2, 3, 4, 5, 6], False)) - exp_unordered1 = Series([1., 2., 3., 4., 5., 6.]) - res1 = unordered1.rank() - assert_series_equal(res1, exp_unordered1) + + # Won't raise ValueError because entries not objects. + unordered1.rank() # Test na_option for rank data na_ser = Series( @@ -213,16 +218,13 @@ def test_rank_signature(self): 'int64', marks=pytest.mark.xfail( reason="iNaT is equivalent to minimum value of dtype" - "int64 pending issue #16674")), - ([NegInfinity(), '1', 'A', 'BA', 'Ba', 'C', Infinity()], - 'object') + "int64 pending issue #16674")) ]) def test_rank_inf(self, contents, dtype): dtype_na_map = { 'float64': np.nan, 'float32': np.nan, - 'int64': iNaT, - 'object': None + 'int64': iNaT } # Insert nans at random positions if underlying dtype has missing # value. Then adjust the expected order by adding nans accordingly @@ -249,13 +251,10 @@ def _check(s, expected, method='average'): result = s.rank(method=method) tm.assert_series_equal(result, Series(expected)) - dtypes = [None, object] - disabled = set([(object, 'first')]) + dtypes = [None] results = self.results for method, dtype in product(results, dtypes): - if (dtype, method) in disabled: - continue series = s if dtype is None else s.astype(dtype) _check(series, results[method], method=method) @@ -294,7 +293,7 @@ def _check(s, method, na_option, ascending): for dtype, na_value, pos_inf, neg_inf in dtypes: in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk iseries = Series(in_arr, dtype=dtype) - if (dtype, method) in disabled: + if dtype == 'object': continue _check(iseries, method, na_option, ascending) @@ -330,7 +329,7 @@ def test_rank_methods_series(self): tm.assert_series_equal(result, expected) def test_rank_dense_method(self): - dtypes = ['O', 'f8', 'i8'] + dtypes = ['f8', 'i8'] in_out = [([1], [1]), ([2], [1]), ([0], [1]), @@ -348,7 +347,7 @@ def test_rank_dense_method(self): assert_series_equal(result, expected) def test_rank_descending(self): - dtypes = ['O', 'f8', 'i8'] + dtypes = ['f8', 'i8'] for dtype, method in product(dtypes, self.results): if 'i' in dtype: @@ -360,9 +359,6 @@ def test_rank_descending(self): expected = (s.max() - s).rank() assert_series_equal(res, expected) - if method == 'first' and dtype == 'O': - continue - expected = (s.max() - s).rank(method=method) res2 = s.rank(method=method, ascending=False) assert_series_equal(res2, expected) @@ -379,9 +375,15 @@ def test_rank_int(self): def test_rank_object_bug(self): # GH 13445 - # smoke tests - Series([np.nan] * 32).astype(object).rank(ascending=True) - Series([np.nan] * 32).astype(object).rank(ascending=False) + # See #19560 + error_msg = ("pandas.core.algorithms.rank " + "not supported for unordered " + "non-numeric data") + + with tm.assert_raises_regex(ValueError, error_msg): + Series([np.nan] * 32).astype(object).rank(ascending=True) + with tm.assert_raises_regex(ValueError, error_msg): + Series([np.nan] * 32).astype(object).rank(ascending=False) def test_rank_modify_inplace(self): # GH 18521 @@ -396,7 +398,7 @@ def test_rank_modify_inplace(self): # GH15630, pct should be on 100% basis when method='dense' -@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8']) +@pytest.mark.parametrize('dtype', ['f8', 'i8']) @pytest.mark.parametrize('ser, exp', [ ([1], [1.]), ([1, 2], [1. / 2, 2. / 2]), @@ -414,7 +416,7 @@ def test_rank_dense_pct(dtype, ser, exp): assert_series_equal(result, expected) -@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8']) +@pytest.mark.parametrize('dtype', ['f8', 'i8']) @pytest.mark.parametrize('ser, exp', [ ([1], [1.]), ([1, 2], [1. / 2, 2. / 2]), @@ -432,7 +434,7 @@ def test_rank_min_pct(dtype, ser, exp): assert_series_equal(result, expected) -@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8']) +@pytest.mark.parametrize('dtype', ['f8', 'i8']) @pytest.mark.parametrize('ser, exp', [ ([1], [1.]), ([1, 2], [1. / 2, 2. / 2]), @@ -450,7 +452,7 @@ def test_rank_max_pct(dtype, ser, exp): assert_series_equal(result, expected) -@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8']) +@pytest.mark.parametrize('dtype', ['f8', 'i8']) @pytest.mark.parametrize('ser, exp', [ ([1], [1.]), ([1, 2], [1. / 2, 2. / 2]),
- [ ] closes #19560 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry *This is only partial solution to issue #19560. *There were some errors with the tests, but I guess they are unrelated to these changes since I also have them with master. *I modified some tests so that they don't contradict with the update "don't allow objects to be ranked unless they are ordered categorials" that was suggested in #19560.
https://api.github.com/repos/pandas-dev/pandas/pulls/20670
2018-04-12T17:21:59Z
2018-11-23T03:28:18Z
null
2018-11-23T03:28:18Z
Split test multi
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py new file mode 100644 index 0000000000000..a68dde8543412 --- /dev/null +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +import re + +import numpy as np + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestConstructor(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_copy_in_constructor(self): + levels = np.array(["a", "b", "c"]) + labels = np.array([1, 1, 2, 0, 0, 1, 1]) + val = labels[0] + mi = MultiIndex(levels=[levels, levels], labels=[labels, labels], + copy=True) + assert mi.labels[0][0] == val + labels[0] = 15 + assert mi.labels[0][0] == val + val = levels[0] + levels[0] = "PANDA" + assert mi.levels[0][0] == val + + def test_constructor_single_level(self): + result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], + labels=[[0, 1, 2, 3]], names=['first']) + assert isinstance(result, MultiIndex) + expected = Index(['foo', 'bar', 'baz', 'qux'], name='first') + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ['first'] + + def test_constructor_no_levels(self): + tm.assert_raises_regex(ValueError, "non-zero number " + "of levels/labels", + MultiIndex, levels=[], labels=[]) + both_re = re.compile('Must pass both levels and labels') + with tm.assert_raises_regex(TypeError, both_re): + MultiIndex(levels=[]) + with tm.assert_raises_regex(TypeError, both_re): + MultiIndex(labels=[]) + + def test_constructor_mismatched_label_levels(self): + labels = [np.array([1]), np.array([2]), np.array([3])] + levels = ["a"] + tm.assert_raises_regex(ValueError, "Length of levels and labels " + "must be the same", MultiIndex, + levels=levels, labels=labels) + length_error = re.compile('>= length of level') + label_error = re.compile(r'Unequal label lengths: \[4, 2\]') + + # important to check that it's looking at the right thing. + with tm.assert_raises_regex(ValueError, length_error): + MultiIndex(levels=[['a'], ['b']], + labels=[[0, 1, 2, 3], [0, 3, 4, 1]]) + + with tm.assert_raises_regex(ValueError, label_error): + MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]]) + + # external API + with tm.assert_raises_regex(ValueError, length_error): + self.index.copy().set_levels([['a'], ['b']]) + + with tm.assert_raises_regex(ValueError, label_error): + self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py new file mode 100644 index 0000000000000..e3d1379862528 --- /dev/null +++ b/pandas/tests/indexes/multi/test_contains.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +from pandas.tests.indexes.common import Base + + +class TestContains(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_contains(self): + assert ('foo', 'two') in self.index + assert ('bar', 'two') not in self.index + assert None not in self.index + + def test_contains_top_level(self): + midx = MultiIndex.from_product([['A', 'B'], [1, 2]]) + assert 'A' in midx + assert 'A' not in midx._engine + + def test_contains_with_nat(self): + # MI with a NaT + mi = MultiIndex(levels=[['C'], + pd.date_range('2012-01-01', periods=5)], + labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], + names=[None, 'B']) + assert ('C', pd.Timestamp('2012-01-01')) in mi + for val in mi.values: + assert val in mi diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py new file mode 100644 index 0000000000000..8dac69ec796f1 --- /dev/null +++ b/pandas/tests/indexes/multi/test_drop.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) +from pandas.compat import lrange +from pandas.errors import PerformanceWarning + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestDrop(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_drop(self): + dropped = self.index.drop([('foo', 'two'), ('qux', 'one')]) + + index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')]) + dropped2 = self.index.drop(index) + + expected = self.index[[0, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + tm.assert_index_equal(dropped2, expected) + + dropped = self.index.drop(['bar']) + expected = self.index[[0, 1, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = self.index.drop('foo') + expected = self.index[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + index = MultiIndex.from_tuples([('bar', 'two')]) + pytest.raises(KeyError, self.index.drop, [('bar', 'two')]) + pytest.raises(KeyError, self.index.drop, index) + pytest.raises(KeyError, self.index.drop, ['foo', 'two']) + + # partially correct argument + mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) + pytest.raises(KeyError, self.index.drop, mixed_index) + + # error='ignore' + dropped = self.index.drop(index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = self.index.drop(['foo', 'two'], errors='ignore') + expected = self.index[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop + dropped = self.index.drop(['foo', ('qux', 'one')]) + expected = self.index[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop / error='ignore' + mixed_index = ['foo', ('qux', 'one'), 'two'] + pytest.raises(KeyError, self.index.drop, mixed_index) + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + def test_droplevel_with_names(self): + index = self.index[self.index.get_loc('foo')] + dropped = index.droplevel(0) + assert dropped.name == 'second' + + index = MultiIndex( + levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], + labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], + names=['one', 'two', 'three']) + dropped = index.droplevel(0) + assert dropped.names == ('two', 'three') + + dropped = index.droplevel('two') + expected = index.droplevel(1) + assert dropped.equals(expected) + + def test_droplevel_multiple(self): + index = MultiIndex( + levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], + labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], + names=['one', 'two', 'three']) + + dropped = index[:2].droplevel(['three', 'one']) + expected = index[:2].droplevel(2).droplevel(0) + assert dropped.equals(expected) + + def test_drop_not_lexsorted(self): + # GH 12078 + + # define the lexsorted version of the multi-index + tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')] + lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c']) + assert lexsorted_mi.is_lexsorted() + + # and the not-lexsorted version + df = pd.DataFrame(columns=['a', 'b', 'c', 'd'], + data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]]) + df = df.pivot_table(index='a', columns=['b', 'c'], values='d') + df = df.reset_index() + not_lexsorted_mi = df.columns + assert not not_lexsorted_mi.is_lexsorted() + + # compare the results + tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) + with tm.assert_produces_warning(PerformanceWarning): + tm.assert_index_equal(lexsorted_mi.drop('a'), + not_lexsorted_mi.drop('a')) diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py new file mode 100644 index 0000000000000..b272ea04514e1 --- /dev/null +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +from itertools import product + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) +from pandas.compat import range, u + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestDuplicates(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], + [1, 'a', 1]]) + def test_duplicate_level_names(self, names): + # GH18872 + pytest.raises(ValueError, pd.MultiIndex.from_product, + [[0, 1]] * 3, names=names) + + # With .rename() + mi = pd.MultiIndex.from_product([[0, 1]] * 3) + tm.assert_raises_regex(ValueError, "Duplicated level name:", + mi.rename, names) + + # With .rename(., level=) + mi.rename(names[0], level=1, inplace=True) + tm.assert_raises_regex(ValueError, "Duplicated level name:", + mi.rename, names[:2], level=[0, 2]) + + def test_duplicates(self): + assert not self.index.has_duplicates + assert self.index.append(self.index).has_duplicates + + index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[ + [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) + assert index.has_duplicates + + # GH 9075 + t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), + (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), + (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), + (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), + (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), + (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), + (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), + (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), + (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), + (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), + (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), + (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), + (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), + (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), + (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), + (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), + (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), + (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] + + index = pd.MultiIndex.from_tuples(t) + assert not index.has_duplicates + + # handle int64 overflow if possible + def check(nlevels, with_nulls): + labels = np.tile(np.arange(500), 2) + level = np.arange(500) + + if with_nulls: # inject some null values + labels[500] = -1 # common nan value + labels = [labels.copy() for i in range(nlevels)] + for i in range(nlevels): + labels[i][500 + i - nlevels // 2] = -1 + + labels += [np.array([-1, 1]).repeat(500)] + else: + labels = [labels] * nlevels + [np.arange(2).repeat(500)] + + levels = [level] * nlevels + [[0, 1]] + + # no dups + index = MultiIndex(levels=levels, labels=labels) + assert not index.has_duplicates + + # with a dup + if with_nulls: + f = lambda a: np.insert(a, 1000, a[0]) + labels = list(map(f, labels)) + index = MultiIndex(levels=levels, labels=labels) + else: + values = index.values.tolist() + index = MultiIndex.from_tuples(values + [values[0]]) + + assert index.has_duplicates + + # no overflow + check(4, False) + check(4, True) + + # overflow possible + check(8, False) + check(8, True) + + # GH 9125 + n, k = 200, 5000 + levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] + labels = [np.random.choice(n, k * n) for lev in levels] + mi = MultiIndex(levels=levels, labels=labels) + + for keep in ['first', 'last', False]: + left = mi.duplicated(keep=keep) + right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep) + tm.assert_numpy_array_equal(left, right) + + # GH5873 + for a in [101, 102]: + mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) + assert not mi.has_duplicates + assert mi.get_duplicates() == [] + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( + 2, dtype='bool')) + + for n in range(1, 6): # 1st level shape + for m in range(1, 5): # 2nd level shape + # all possible unique combinations, including nan + lab = product(range(-1, n), range(-1, m)) + mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], + labels=np.random.permutation(list(lab)).T) + assert len(mi) == (n + 1) * (m + 1) + assert not mi.has_duplicates + assert mi.get_duplicates() == [] + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( + len(mi), dtype='bool')) + + def test_duplicate_meta_data(self): + # GH 10115 + index = MultiIndex( + levels=[[0, 1], [0, 1, 2]], + labels=[[0, 0, 0, 0, 1, 1, 1], + [0, 1, 2, 0, 0, 1, 2]]) + + for idx in [index, + index.set_names([None, None]), + index.set_names([None, 'Num']), + index.set_names(['Upper', 'Num']), ]: + assert idx.has_duplicates + assert idx.drop_duplicates().names == idx.names + + def test_duplicate_multiindex_labels(self): + # GH 17464 + # Make sure that a MultiIndex with duplicate levels throws a ValueError + with pytest.raises(ValueError): + ind = pd.MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) + + # And that using set_levels with duplicate levels fails + ind = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], + [1, 2, 1, 2, 3]]) + with pytest.raises(ValueError): + ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], + inplace=True) diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py new file mode 100644 index 0000000000000..488b7e6efdc61 --- /dev/null +++ b/pandas/tests/indexes/multi/test_format.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +import warnings + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestFormat(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_format(self): + self.index.format() + self.index[:0].format() + + def test_format_integer_names(self): + index = MultiIndex(levels=[[0, 1], [0, 1]], + labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]) + index.format(names=True) + + def test_format_sparse_display(self): + index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]], + labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) + + result = index.format() + assert result[3] == '1 0 0 0' + + def test_format_sparse_config(self): + warn_filters = warnings.filters + warnings.filterwarnings('ignore', category=FutureWarning, + module=".*format") + # GH1538 + pd.set_option('display.multi_sparse', False) + + result = self.index.format() + assert result[1] == 'foo two' + + tm.reset_display_options() + + warnings.filters = warn_filters diff --git a/pandas/tests/indexes/multi/test_from_arrays.py b/pandas/tests/indexes/multi/test_from_arrays.py new file mode 100644 index 0000000000000..220682400ce54 --- /dev/null +++ b/pandas/tests/indexes/multi/test_from_arrays.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import Index, MultiIndex +from pandas._libs.tslib import Timestamp + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestFromArrays(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_from_arrays(self): + arrays = [] + for lev, lab in zip(self.index.levels, self.index.labels): + arrays.append(np.asarray(lev).take(lab)) + + # list of arrays as input + result = MultiIndex.from_arrays(arrays, names=self.index.names) + tm.assert_index_equal(result, self.index) + + # infer correctly + result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], + ['a', 'b']]) + assert result.levels[0].equals(Index([Timestamp('20130101')])) + assert result.levels[1].equals(Index(['a', 'b'])) + + def test_from_arrays_iterator(self): + # GH 18434 + arrays = [] + for lev, lab in zip(self.index.levels, self.index.labels): + arrays.append(np.asarray(lev).take(lab)) + + # iterator as input + result = MultiIndex.from_arrays(iter(arrays), names=self.index.names) + tm.assert_index_equal(result, self.index) + + # invalid iterator input + with tm.assert_raises_regex( + TypeError, "Input must be a list / sequence of array-likes."): + MultiIndex.from_arrays(0) + + def test_from_arrays_index_series_datetimetz(self): + idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, + tz='US/Eastern') + idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3, + tz='Asia/Tokyo') + result = pd.MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + def test_from_arrays_index_series_timedelta(self): + idx1 = pd.timedelta_range('1 days', freq='D', periods=3) + idx2 = pd.timedelta_range('2 hours', freq='H', periods=3) + result = pd.MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + def test_from_arrays_index_series_period(self): + idx1 = pd.period_range('2011-01-01', freq='D', periods=3) + idx2 = pd.period_range('2015-01-01', freq='H', periods=3) + result = pd.MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + def test_from_arrays_index_datetimelike_mixed(self): + idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, + tz='US/Eastern') + idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3) + idx3 = pd.timedelta_range('1 days', freq='D', periods=3) + idx4 = pd.period_range('2011-01-01', freq='D', periods=3) + + result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + tm.assert_index_equal(result.get_level_values(2), idx3) + tm.assert_index_equal(result.get_level_values(3), idx4) + + result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), + pd.Series(idx2), + pd.Series(idx3), + pd.Series(idx4)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + tm.assert_index_equal(result2.get_level_values(2), idx3) + tm.assert_index_equal(result2.get_level_values(3), idx4) + + tm.assert_index_equal(result, result2) + + def test_from_arrays_index_series_categorical(self): + # GH13743 + idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), + ordered=False) + idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), + ordered=True) + + result = pd.MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values]) + tm.assert_index_equal(result3.get_level_values(0), idx1) + tm.assert_index_equal(result3.get_level_values(1), idx2) + + def test_from_arrays_empty(self): + # 0 levels + with tm.assert_raises_regex( + ValueError, "Must pass non-zero number of levels/labels"): + MultiIndex.from_arrays(arrays=[]) + + # 1 level + result = MultiIndex.from_arrays(arrays=[[]], names=['A']) + assert isinstance(result, MultiIndex) + expected = Index([], name='A') + tm.assert_index_equal(result.levels[0], expected) + + # N levels + for N in [2, 3]: + arrays = [[]] * N + names = list('ABC')[:N] + result = MultiIndex.from_arrays(arrays=arrays, names=names) + expected = MultiIndex(levels=[[]] * N, labels=[[]] * N, + names=names) + tm.assert_index_equal(result, expected) + + def test_from_arrays_invalid_input(self): + invalid_inputs = [1, [1], [1, 2], [[1], 2], + 'a', ['a'], ['a', 'b'], [['a'], 'b']] + for i in invalid_inputs: + pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) + + def test_from_arrays_different_lengths(self): + # see gh-13599 + idx1 = [1, 2, 3] + idx2 = ['a', 'b'] + tm.assert_raises_regex(ValueError, '^all arrays must ' + 'be same length$', + MultiIndex.from_arrays, [idx1, idx2]) + + idx1 = [] + idx2 = ['a', 'b'] + tm.assert_raises_regex(ValueError, '^all arrays must ' + 'be same length$', + MultiIndex.from_arrays, [idx1, idx2]) + + idx1 = [1, 2, 3] + idx2 = [] + tm.assert_raises_regex(ValueError, '^all arrays must ' + 'be same length$', + MultiIndex.from_arrays, [idx1, idx2]) diff --git a/pandas/tests/indexes/multi/test_from_product.py b/pandas/tests/indexes/multi/test_from_product.py new file mode 100644 index 0000000000000..877bc4569c161 --- /dev/null +++ b/pandas/tests/indexes/multi/test_from_product.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex, date_range) +from pandas.compat import lrange +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestFromProduct(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_from_product(self): + + first = ['foo', 'bar', 'buz'] + second = ['a', 'b', 'c'] + names = ['first', 'second'] + result = MultiIndex.from_product([first, second], names=names) + + tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), + ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), + ('buz', 'c')] + expected = MultiIndex.from_tuples(tuples, names=names) + + tm.assert_index_equal(result, expected) + + def test_from_product_iterator(self): + # GH 18434 + first = ['foo', 'bar', 'buz'] + second = ['a', 'b', 'c'] + names = ['first', 'second'] + tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), + ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), + ('buz', 'c')] + expected = MultiIndex.from_tuples(tuples, names=names) + + # iterator as input + result = MultiIndex.from_product(iter([first, second]), names=names) + tm.assert_index_equal(result, expected) + + # Invalid non-iterable input + with tm.assert_raises_regex( + TypeError, "Input must be a list / sequence of iterables."): + MultiIndex.from_product(0) + + def test_from_product_empty(self): + # 0 levels + with tm.assert_raises_regex( + ValueError, "Must pass non-zero number of levels/labels"): + MultiIndex.from_product([]) + + # 1 level + result = MultiIndex.from_product([[]], names=['A']) + expected = pd.Index([], name='A') + tm.assert_index_equal(result.levels[0], expected) + + # 2 levels + l1 = [[], ['foo', 'bar', 'baz'], []] + l2 = [[], [], ['a', 'b', 'c']] + names = ['A', 'B'] + for first, second in zip(l1, l2): + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], + labels=[[], []], names=names) + tm.assert_index_equal(result, expected) + + # GH12258 + names = ['A', 'B', 'C'] + for N in range(4): + lvl2 = lrange(N) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], + labels=[[], [], []], names=names) + tm.assert_index_equal(result, expected) + + def test_from_product_invalid_input(self): + invalid_inputs = [1, [1], [1, 2], [[1], 2], + 'a', ['a'], ['a', 'b'], [['a'], 'b']] + for i in invalid_inputs: + pytest.raises(TypeError, MultiIndex.from_product, iterables=i) + + def test_from_product_datetimeindex(self): + dt_index = date_range('2000-01-01', periods=2) + mi = pd.MultiIndex.from_product([[1, 2], dt_index]) + etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( + '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( + '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) + tm.assert_numpy_array_equal(mi.values, etalon) + + def test_from_product_index_series_categorical(self): + # GH13743 + first = ['foo', 'bar'] + for ordered in [False, True]: + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), + ordered=ordered) + expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), + categories=list("bac"), + ordered=ordered) + + for arr in [idx, pd.Series(idx), idx.values]: + result = pd.MultiIndex.from_product([first, arr]) + tm.assert_index_equal(result.get_level_values(1), expected) diff --git a/pandas/tests/indexes/multi/test_get.py b/pandas/tests/indexes/multi/test_get.py new file mode 100644 index 0000000000000..d9e5046fe1c69 --- /dev/null +++ b/pandas/tests/indexes/multi/test_get.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (CategoricalIndex, Index, MultiIndex) +from pandas.compat import lrange +from pandas.core.indexes.base import InvalidIndexError + +import pandas.util.testing as tm + +from pandas.util.testing import assert_almost_equal + +from pandas.tests.indexes.common import Base + + +class TestGet(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_getitem(self): + # scalar + assert self.index[2] == ('bar', 'one') + + # slice + result = self.index[2:5] + expected = self.index[[2, 3, 4]] + assert result.equals(expected) + + # boolean + result = self.index[[True, False, True, False, True, True]] + result2 = self.index[np.array([True, False, True, False, True, True])] + expected = self.index[[0, 2, 4, 5]] + assert result.equals(expected) + assert result2.equals(expected) + + def test_getitem_group_select(self): + sorted_idx, _ = self.index.sortlevel(0) + assert sorted_idx.get_loc('baz') == slice(3, 4) + assert sorted_idx.get_loc('foo') == slice(0, 2) + + def test_get_loc(self): + assert self.index.get_loc(('foo', 'two')) == 1 + assert self.index.get_loc(('baz', 'two')) == 3 + pytest.raises(KeyError, self.index.get_loc, ('bar', 'two')) + pytest.raises(KeyError, self.index.get_loc, 'quux') + + pytest.raises(NotImplementedError, self.index.get_loc, 'foo', + method='nearest') + + # 3 levels + index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( + lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) + pytest.raises(KeyError, index.get_loc, (1, 1)) + assert index.get_loc((2, 0)) == slice(3, 5) + + def test_get_loc_duplicates(self): + index = Index([2, 2, 2, 2]) + result = index.get_loc(2) + expected = slice(0, 4) + assert result == expected + # pytest.raises(Exception, index.get_loc, 2) + + index = Index(['c', 'a', 'a', 'b', 'b']) + rs = index.get_loc('c') + xp = 0 + assert rs == xp + + def test_get_value_duplicates(self): + index = MultiIndex(levels=[['D', 'B', 'C'], + [0, 26, 27, 37, 57, 67, 75, 82]], + labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], + [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], + names=['tag', 'day']) + + assert index.get_loc('D') == slice(0, 3) + with pytest.raises(KeyError): + index._engine.get_value(np.array([]), 'D') + + def test_get_loc_level(self): + index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( + lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) + + loc, new_index = index.get_loc_level((0, 1)) + expected = slice(1, 2) + exp_index = index[expected].droplevel(0).droplevel(0) + assert loc == expected + assert new_index.equals(exp_index) + + loc, new_index = index.get_loc_level((0, 1, 0)) + expected = 1 + assert loc == expected + assert new_index is None + + pytest.raises(KeyError, index.get_loc_level, (2, 2)) + + index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array( + [0, 0, 0, 0]), np.array([0, 1, 2, 3])]) + result, new_index = index.get_loc_level((2000, slice(None, None))) + expected = slice(None, None) + assert result == expected + assert new_index.equals(index.droplevel(0)) + + @pytest.mark.parametrize('level', [0, 1]) + @pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None]) + def test_get_loc_nan(self, level, null_val): + # GH 18485 : NaN in MultiIndex + levels = [['a', 'b'], ['c', 'd']] + key = ['b', 'd'] + levels[level] = np.array([0, null_val], dtype=type(null_val)) + key[level] = null_val + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + + def test_get_loc_missing_nan(self): + # GH 8569 + idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) + assert isinstance(idx.get_loc(1), slice) + pytest.raises(KeyError, idx.get_loc, 3) + pytest.raises(KeyError, idx.get_loc, np.nan) + pytest.raises(KeyError, idx.get_loc, [np.nan]) + + @pytest.mark.parametrize('dtype1', [int, float, bool, str]) + @pytest.mark.parametrize('dtype2', [int, float, bool, str]) + def test_get_loc_multiple_dtypes(self, dtype1, dtype2): + # GH 18520 + levels = [np.array([0, 1]).astype(dtype1), + np.array([0, 1]).astype(dtype2)] + idx = pd.MultiIndex.from_product(levels) + assert idx.get_loc(idx[2]) == 2 + + @pytest.mark.parametrize('level', [0, 1]) + @pytest.mark.parametrize('dtypes', [[int, float], [float, int]]) + def test_get_loc_implicit_cast(self, level, dtypes): + # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa + levels = [['a', 'b'], ['c', 'd']] + key = ['b', 'd'] + lev_dtype, key_dtype = dtypes + levels[level] = np.array([0, 1], dtype=lev_dtype) + key[level] = key_dtype(1) + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + + def test_get_loc_cast_bool(self): + # GH 19086 : int is casted to bool, but not vice-versa + levels = [[False, True], np.arange(2, dtype='int64')] + idx = MultiIndex.from_product(levels) + + assert idx.get_loc((0, 1)) == 1 + assert idx.get_loc((1, 0)) == 2 + + pytest.raises(KeyError, idx.get_loc, (False, True)) + pytest.raises(KeyError, idx.get_loc, (True, False)) + + def test_get_indexer(self): + major_axis = Index(lrange(4)) + minor_axis = Index(lrange(2)) + + major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp) + minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + idx1 = index[:5] + idx2 = index[[1, 3, 5]] + + r1 = idx1.get_indexer(idx2) + assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp)) + + r1 = idx2.get_indexer(idx1, method='pad') + e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp) + assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method='pad') + assert_almost_equal(r2, e1[::-1]) + + rffill1 = idx2.get_indexer(idx1, method='ffill') + assert_almost_equal(r1, rffill1) + + r1 = idx2.get_indexer(idx1, method='backfill') + e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp) + assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method='backfill') + assert_almost_equal(r2, e1[::-1]) + + rbfill1 = idx2.get_indexer(idx1, method='bfill') + assert_almost_equal(r1, rbfill1) + + # pass non-MultiIndex + r1 = idx1.get_indexer(idx2.values) + rexp1 = idx1.get_indexer(idx2) + assert_almost_equal(r1, rexp1) + + r1 = idx1.get_indexer([1, 2, 3]) + assert (r1 == [-1, -1, -1]).all() + + # create index with duplicates + idx1 = Index(lrange(10) + lrange(10)) + idx2 = Index(lrange(20)) + + msg = "Reindexing only valid with uniquely valued Index objects" + with tm.assert_raises_regex(InvalidIndexError, msg): + idx1.get_indexer(idx2) + + def test_get_indexer_nearest(self): + midx = MultiIndex.from_tuples([('a', 1), ('b', 2)]) + with pytest.raises(NotImplementedError): + midx.get_indexer(['a'], method='nearest') + with pytest.raises(NotImplementedError): + midx.get_indexer(['a'], method='pad', tolerance=2) + + def test_get_level_values(self): + result = self.index.get_level_values(0) + expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'], + name='first') + tm.assert_index_equal(result, expected) + assert result.name == 'first' + + result = self.index.get_level_values('first') + expected = self.index.get_level_values(0) + tm.assert_index_equal(result, expected) + + # GH 10460 + index = MultiIndex( + levels=[CategoricalIndex(['A', 'B']), + CategoricalIndex([1, 2, 3])], + labels=[np.array([0, 0, 0, 1, 1, 1]), + np.array([0, 1, 2, 0, 1, 2])]) + + exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B']) + tm.assert_index_equal(index.get_level_values(0), exp) + exp = CategoricalIndex([1, 2, 3, 1, 2, 3]) + tm.assert_index_equal(index.get_level_values(1), exp) + + def test_get_level_values_int_with_na(self): + # GH 17924 + arrays = [['a', 'b', 'b'], [1, np.nan, 2]] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([1, np.nan, 2]) + tm.assert_index_equal(result, expected) + + arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([np.nan, np.nan, 2]) + tm.assert_index_equal(result, expected) + + def test_get_level_values_na(self): + arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = pd.Index([np.nan, np.nan, np.nan]) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = pd.Index(['a', np.nan, 1]) + tm.assert_index_equal(result, expected) + + arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = pd.DatetimeIndex([0, 1, pd.NaT]) + tm.assert_index_equal(result, expected) + + arrays = [[], []] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = pd.Index([], dtype=object) + tm.assert_index_equal(result, expected) + + def test_get_level_values_all_na(self): + # GH 17924 when level entirely consists of nan + arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] + index = pd.MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = pd.Index(['a', np.nan, 1], dtype=object) + tm.assert_index_equal(result, expected) + + def test_get_unique_index(self): + idx = self.index[[0, 1, 0, 1, 1, 0, 0]] + expected = self.index._shallow_copy(idx[[0, 1]]) + + for dropna in [False, True]: + result = idx._get_unique_index(dropna=dropna) + assert result.unique + tm.assert_index_equal(result, expected) + + def test_get_level_number_integer(self): + self.index.names = [1, 0] + assert self.index._get_level_number(1) == 0 + assert self.index._get_level_number(0) == 1 + pytest.raises(IndexError, self.index._get_level_number, 2) + tm.assert_raises_regex(KeyError, 'Level fourth not found', + self.index._get_level_number, 'fourth') diff --git a/pandas/tests/indexes/multi/test_isin.py b/pandas/tests/indexes/multi/test_isin.py new file mode 100644 index 0000000000000..c9be34cbea702 --- /dev/null +++ b/pandas/tests/indexes/multi/test_isin.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- + + +import pytest + +import numpy as np + + +from pandas import (Index, MultiIndex) +from pandas.compat import PYPY +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestIsIn(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_isin(self): + values = [('foo', 2), ('bar', 3), ('quux', 4)] + + idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( + 4)]) + result = idx.isin(values) + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # empty, return dtype bool + idx = MultiIndex.from_arrays([[], []]) + result = idx.isin(values) + assert len(result) == 0 + assert result.dtype == np.bool_ + + @pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy") + def test_isin_nan_not_pypy(self): + idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]]) + tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]), + np.array([False, False])) + tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]), + np.array([False, False])) + + @pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy") + def test_isin_nan_pypy(self): + idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]]) + tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]), + np.array([False, True])) + tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]), + np.array([False, True])) + + def test_isin_level_kwarg(self): + idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( + 4)]) + + vals_0 = ['foo', 'bar', 'quux'] + vals_1 = [2, 3, 10] + + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2)) + + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1)) + + pytest.raises(IndexError, idx.isin, vals_0, level=5) + pytest.raises(IndexError, idx.isin, vals_0, level=-5) + + pytest.raises(KeyError, idx.isin, vals_0, level=1.0) + pytest.raises(KeyError, idx.isin, vals_1, level=-1.0) + pytest.raises(KeyError, idx.isin, vals_1, level='A') + + idx.names = ['A', 'B'] + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A')) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B')) + + pytest.raises(KeyError, idx.isin, vals_1, level='C') diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py new file mode 100644 index 0000000000000..e64dfcf240ac2 --- /dev/null +++ b/pandas/tests/indexes/multi/test_join.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestJoin(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + @pytest.mark.parametrize('other', + [Index(['three', 'one', 'two']), + Index(['one']), + Index(['one', 'three'])]) + def test_join_level(self, other, join_type): + join_index, lidx, ridx = other.join(self.index, how=join_type, + level='second', + return_indexers=True) + + exp_level = other.join(self.index.levels[1], how=join_type) + assert join_index.levels[0].equals(self.index.levels[0]) + assert join_index.levels[1].equals(exp_level) + + # pare down levels + mask = np.array( + [x[1] in exp_level for x in self.index], dtype=bool) + exp_values = self.index.values[mask] + tm.assert_numpy_array_equal(join_index.values, exp_values) + + if join_type in ('outer', 'inner'): + join_index2, ridx2, lidx2 = \ + self.index.join(other, how=join_type, level='second', + return_indexers=True) + + assert join_index.equals(join_index2) + tm.assert_numpy_array_equal(lidx, lidx2) + tm.assert_numpy_array_equal(ridx, ridx2) + tm.assert_numpy_array_equal(join_index2.values, exp_values) + + def test_join_level_corner_case(self): + # some corner cases + idx = Index(['three', 'one', 'two']) + result = idx.join(self.index, level='second') + assert isinstance(result, MultiIndex) + + tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous", + self.index.join, self.index, level=1) + + def test_join_self(self, join_type): + res = self.index + joined = res.join(res, how=join_type) + assert res is joined + + def test_join_multi(self): + # GH 10665 + midx = pd.MultiIndex.from_product( + [np.arange(4), np.arange(4)], names=['a', 'b']) + idx = pd.Index([1, 2, 5], name='b') + + # inner + jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True) + exp_idx = pd.MultiIndex.from_product( + [np.arange(4), [1, 2]], names=['a', 'b']) + exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) + exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + # keep MultiIndex + jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True) + exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, + 1, -1], dtype=np.intp) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) diff --git a/pandas/tests/indexes/multi/test_misc.py b/pandas/tests/indexes/multi/test_misc.py new file mode 100644 index 0000000000000..fcbea198a84dd --- /dev/null +++ b/pandas/tests/indexes/multi/test_misc.py @@ -0,0 +1,1289 @@ +# -*- coding: utf-8 -*- + +import re + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (DataFrame, Index, MultiIndex, date_range, period_range) +from pandas.compat import PY3, long, lrange, lzip, range +from pandas.errors import PerformanceWarning, UnsortedIndexError +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas.util.testing as tm + +from pandas.util.testing import assert_almost_equal, assert_copy + +from pandas.tests.indexes.common import Base + + +class TestMultiIndex(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_boolean_context_compat2(self): + + # boolean context compat + # GH7897 + i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)]) + common = i1.intersection(i2) + + def f(): + if common: + pass + + tm.assert_raises_regex(ValueError, 'The truth value of a', f) + + def test_labels_dtypes(self): + + # GH 8456 + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + assert i.labels[0].dtype == 'int8' + assert i.labels[1].dtype == 'int8' + + i = MultiIndex.from_product([['a'], range(40)]) + assert i.labels[1].dtype == 'int8' + i = MultiIndex.from_product([['a'], range(400)]) + assert i.labels[1].dtype == 'int16' + i = MultiIndex.from_product([['a'], range(40000)]) + assert i.labels[1].dtype == 'int32' + + i = pd.MultiIndex.from_product([['a'], range(1000)]) + assert (i.labels[0] >= 0).all() + assert (i.labels[1] >= 0).all() + + def test_where(self): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + + def f(): + i.where(True) + + pytest.raises(NotImplementedError, f) + + def test_where_array_like(self): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + klasses = [list, tuple, np.array, pd.Series] + cond = [False, True] + + for klass in klasses: + f = lambda: i.where(klass(cond)) + pytest.raises(NotImplementedError, f) + + def test_repeat(self): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(m.repeat(reps), expected) + + with tm.assert_produces_warning(FutureWarning): + result = m.repeat(n=reps) + tm.assert_index_equal(result, expected) + + def test_numpy_repeat(self): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(np.repeat(m, reps), expected) + + msg = "the 'axis' parameter is not supported" + tm.assert_raises_regex( + ValueError, msg, np.repeat, m, reps, axis=1) + + def test_metadata_immutable(self): + levels, labels = self.index.levels, self.index.labels + # shouldn't be able to set at either the top level or base level + mutable_regex = re.compile('does not support mutable operations') + with tm.assert_raises_regex(TypeError, mutable_regex): + levels[0] = levels[0] + with tm.assert_raises_regex(TypeError, mutable_regex): + levels[0][0] = levels[0][0] + # ditto for labels + with tm.assert_raises_regex(TypeError, mutable_regex): + labels[0] = labels[0] + with tm.assert_raises_regex(TypeError, mutable_regex): + labels[0][0] = labels[0][0] + # and for names + names = self.index.names + with tm.assert_raises_regex(TypeError, mutable_regex): + names[0] = names[0] + + def test_inplace_mutation_resets_values(self): + levels = [['a', 'b', 'c'], [4]] + levels2 = [[1, 2, 3], ['a']] + labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] + + mi1 = MultiIndex(levels=levels, labels=labels) + mi2 = MultiIndex(levels=levels2, labels=labels) + vals = mi1.values.copy() + vals2 = mi2.values.copy() + + assert mi1._tuples is not None + + # Make sure level setting works + new_vals = mi1.set_levels(levels2).values + tm.assert_almost_equal(vals2, new_vals) + + # Non-inplace doesn't kill _tuples [implementation detail] + tm.assert_almost_equal(mi1._tuples, vals) + + # ...and values is still same too + tm.assert_almost_equal(mi1.values, vals) + + # Inplace should kill _tuples + mi1.set_levels(levels2, inplace=True) + tm.assert_almost_equal(mi1.values, vals2) + + # Make sure label setting works too + labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] + exp_values = np.empty((6,), dtype=object) + exp_values[:] = [(long(1), 'a')] * 6 + + # Must be 1d array of tuples + assert exp_values.shape == (6,) + new_values = mi2.set_labels(labels2).values + + # Not inplace shouldn't change + tm.assert_almost_equal(mi2._tuples, vals2) + + # Should have correct values + tm.assert_almost_equal(exp_values, new_values) + + # ...and again setting inplace should kill _tuples, etc + mi2.set_labels(labels2, inplace=True) + tm.assert_almost_equal(mi2.values, new_values) + + def test_astype(self): + expected = self.index.copy() + actual = self.index.astype('O') + assert_copy(actual.levels, expected.levels) + assert_copy(actual.labels, expected.labels) + self.check_level_names(actual, expected.names) + + with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): + self.index.astype(np.dtype(int)) + + @pytest.mark.parametrize('ordered', [True, False]) + def test_astype_category(self, ordered): + # GH 18630 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + self.index.astype(CategoricalDtype(ordered=ordered)) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + with tm.assert_raises_regex(NotImplementedError, msg): + self.index.astype('category') + + def assert_multiindex_copied(self, copy, original): + # Levels should be (at least, shallow copied) + tm.assert_copy(copy.levels, original.levels) + tm.assert_almost_equal(copy.labels, original.labels) + + # Labels doesn't matter which way copied + tm.assert_almost_equal(copy.labels, original.labels) + assert copy.labels is not original.labels + + # Names doesn't matter which way copied + assert copy.names == original.names + assert copy.names is not original.names + + # Sort order should be copied + assert copy.sortorder == original.sortorder + + def test_copy(self): + i_copy = self.index.copy() + + self.assert_multiindex_copied(i_copy, self.index) + + def test_shallow_copy(self): + i_copy = self.index._shallow_copy() + + self.assert_multiindex_copied(i_copy, self.index) + + def test_view(self): + i_view = self.index.view() + + self.assert_multiindex_copied(i_view, self.index) + + def test_values_boxed(self): + tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT), + (3, pd.Timestamp('2000-01-03')), + (1, pd.Timestamp('2000-01-04')), + (2, pd.Timestamp('2000-01-02')), + (3, pd.Timestamp('2000-01-03'))] + result = pd.MultiIndex.from_tuples(tuples) + expected = construct_1d_object_array_from_listlike(tuples) + tm.assert_numpy_array_equal(result.values, expected) + # Check that code branches for boxed values produce identical results + tm.assert_numpy_array_equal(result.values[:4], result[:4].values) + + def test_values_multiindex_datetimeindex(self): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(10 ** 18, 10 ** 18 + 5) + naive = pd.DatetimeIndex(ints) + aware = pd.DatetimeIndex(ints, tz='US/Central') + + idx = pd.MultiIndex.from_arrays([naive, aware]) + result = idx.values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive[:2]) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware[:2]) + + def test_values_multiindex_periodindex(self): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(2007, 2012) + pidx = pd.PeriodIndex(ints, freq='D') + + idx = pd.MultiIndex.from_arrays([ints, pidx]) + result = idx.values + + outer = pd.Int64Index([x[0] for x in result]) + tm.assert_index_equal(outer, pd.Int64Index(ints)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.Int64Index([x[0] for x in result]) + tm.assert_index_equal(outer, pd.Int64Index(ints[:2])) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx[:2]) + + def test_append(self): + result = self.index[:3].append(self.index[3:]) + assert result.equals(self.index) + + foos = [self.index[:1], self.index[1:3], self.index[3:]] + result = foos[0].append(foos[1:]) + assert result.equals(self.index) + + # empty + result = self.index.append([]) + assert result.equals(self.index) + + def test_append_mixed_dtypes(self): + # GH 13660 + dti = date_range('2011-01-01', freq='M', periods=3, ) + dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') + pi = period_range('2011-01', freq='M', periods=3) + + mi = MultiIndex.from_arrays([[1, 2, 3], + [1.1, np.nan, 3.3], + ['a', 'b', 'c'], + dti, dti_tz, pi]) + assert mi.nlevels == 6 + + res = mi.append(mi) + exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], + [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], + ['a', 'b', 'c', 'a', 'b', 'c'], + dti.append(dti), + dti_tz.append(dti_tz), + pi.append(pi)]) + tm.assert_index_equal(res, exp) + + other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z']]) + + res = mi.append(other) + exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], + [1.1, np.nan, 3.3, 'x', 'y', 'z'], + ['a', 'b', 'c', 'x', 'y', 'z'], + dti.append(pd.Index(['x', 'y', 'z'])), + dti_tz.append(pd.Index(['x', 'y', 'z'])), + pi.append(pd.Index(['x', 'y', 'z']))]) + tm.assert_index_equal(res, exp) + + def test_reorder_levels(self): + # this blows up + tm.assert_raises_regex(IndexError, '^Too many levels', + self.index.reorder_levels, [2, 1, 0]) + + def test_nlevels(self): + assert self.index.nlevels == 2 + + def test_iter(self): + result = list(self.index) + expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), + ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] + assert result == expected + + def test_legacy_pickle(self): + if PY3: + pytest.skip("testing for legacy pickles not " + "support on py3") + + path = tm.get_data_path('multiindex_v1.pickle') + obj = pd.read_pickle(path) + + obj2 = MultiIndex.from_tuples(obj.values) + assert obj.equals(obj2) + + res = obj.get_indexer(obj) + exp = np.arange(len(obj), dtype=np.intp) + assert_almost_equal(res, exp) + + res = obj.get_indexer(obj2[::-1]) + exp = obj.get_indexer(obj[::-1]) + exp2 = obj2.get_indexer(obj2[::-1]) + assert_almost_equal(res, exp) + assert_almost_equal(exp, exp2) + + def test_legacy_v2_unpickle(self): + + # 0.7.3 -> 0.8.0 format manage + path = tm.get_data_path('mindex_073.pickle') + obj = pd.read_pickle(path) + + obj2 = MultiIndex.from_tuples(obj.values) + assert obj.equals(obj2) + + res = obj.get_indexer(obj) + exp = np.arange(len(obj), dtype=np.intp) + assert_almost_equal(res, exp) + + res = obj.get_indexer(obj2[::-1]) + exp = obj.get_indexer(obj[::-1]) + exp2 = obj2.get_indexer(obj2[::-1]) + assert_almost_equal(res, exp) + assert_almost_equal(exp, exp2) + + def test_roundtrip_pickle_with_tz(self): + + # GH 8367 + # round-trip of timezone + index = MultiIndex.from_product( + [[1, 2], ['a', 'b'], date_range('20130101', periods=3, + tz='US/Eastern') + ], names=['one', 'two', 'three']) + unpickled = tm.round_trip_pickle(index) + assert index.equal_levels(unpickled) + + def test_from_tuples_index_values(self): + result = MultiIndex.from_tuples(self.index) + assert (result.values == self.index.values).all() + + def test_is_all_dates(self): + assert not self.index.is_all_dates + + def test_is_numeric(self): + # MultiIndex is never numeric + assert not self.index.is_numeric() + + def test_consistency(self): + # need to construct an overflow + major_axis = lrange(70000) + minor_axis = lrange(10) + + major_labels = np.arange(70000) + minor_labels = np.repeat(lrange(10), 7000) + + # the fact that is works means it's consistent + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + + # inconsistent + major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + + assert not index.is_unique + + def test_truncate(self): + major_axis = Index(lrange(4)) + minor_axis = Index(lrange(2)) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + + result = index.truncate(before=1) + assert 'foo' not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(after=1) + assert 2 not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(before=1, after=2) + assert len(result.levels[0]) == 2 + + # after < before + pytest.raises(ValueError, index.truncate, 3, 1) + + def test_hash_collisions(self): + # non-smoke test that we don't get hash collisions + + index = MultiIndex.from_product([np.arange(1000), np.arange(1000)], + names=['one', 'two']) + result = index.get_indexer(index.values) + tm.assert_numpy_array_equal(result, np.arange( + len(index), dtype='intp')) + + for i in [0, 1, len(index) - 2, len(index) - 1]: + result = index.get_loc(index[i]) + assert result == i + + def test_to_frame(self): + tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] + + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] + index = MultiIndex.from_tuples(tuples, names=['first', 'second']) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + expected.columns = ['first', 'second'] + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + index = MultiIndex.from_product([range(5), + pd.date_range('20130101', periods=3)]) + result = index.to_frame(index=False) + expected = DataFrame( + {0: np.repeat(np.arange(5, dtype='int64'), 3), + 1: np.tile(pd.date_range('20130101', periods=3), 5)}) + tm.assert_frame_equal(result, expected) + + index = MultiIndex.from_product([range(5), + pd.date_range('20130101', periods=3)]) + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + def test_to_hierarchical(self): + index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), ( + 2, 'two')]) + result = index.to_hierarchical(3) + expected = MultiIndex(levels=[[1, 2], ['one', 'two']], + labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) + tm.assert_index_equal(result, expected) + assert result.names == index.names + + # K > 1 + result = index.to_hierarchical(3, 2) + expected = MultiIndex(levels=[[1, 2], ['one', 'two']], + labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]]) + tm.assert_index_equal(result, expected) + assert result.names == index.names + + # non-sorted + index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'), + (2, 'a'), (2, 'b')], + names=['N1', 'N2']) + + result = index.to_hierarchical(2) + expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), + (1, 'b'), + (2, 'a'), (2, 'a'), + (2, 'b'), (2, 'b')], + names=['N1', 'N2']) + tm.assert_index_equal(result, expected) + assert result.names == index.names + + def test_bounds(self): + self.index._bounds + + def test_equals_multi(self): + assert self.index.equals(self.index) + assert not self.index.equals(self.index.values) + assert self.index.equals(Index(self.index.values)) + + assert self.index.equal_levels(self.index) + assert not self.index.equals(self.index[:-1]) + assert not self.index.equals(self.index[-1]) + + # different number of levels + index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( + lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) + + index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1]) + assert not index.equals(index2) + assert not index.equal_levels(index2) + + # levels are different + major_axis = Index(lrange(4)) + minor_axis = Index(lrange(2)) + + major_labels = np.array([0, 0, 1, 2, 2, 3]) + minor_labels = np.array([0, 1, 0, 0, 1, 0]) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + assert not self.index.equals(index) + assert not self.index.equal_levels(index) + + # some of the labels are different + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 2, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + assert not self.index.equals(index) + + def test_equals_missing_values(self): + # make sure take is not using -1 + i = pd.MultiIndex.from_tuples([(0, pd.NaT), + (0, pd.Timestamp('20130101'))]) + result = i[0:1].equals(i[0]) + assert not result + result = i[1:2].equals(i[1]) + assert not result + + def test_identical(self): + mi = self.index.copy() + mi2 = self.index.copy() + assert mi.identical(mi2) + + mi = mi.set_names(['new1', 'new2']) + assert mi.equals(mi2) + assert not mi.identical(mi2) + + mi2 = mi2.set_names(['new1', 'new2']) + assert mi.identical(mi2) + + mi3 = Index(mi.tolist(), names=mi.names) + mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False) + assert mi.identical(mi3) + assert not mi.identical(mi4) + assert mi.equals(mi4) + + def test_is_(self): + mi = MultiIndex.from_tuples(lzip(range(10), range(10))) + assert mi.is_(mi) + assert mi.is_(mi.view()) + assert mi.is_(mi.view().view().view().view()) + mi2 = mi.view() + # names are metadata, they don't change id + mi2.names = ["A", "B"] + assert mi2.is_(mi) + assert mi.is_(mi2) + + assert mi.is_(mi.set_names(["C", "D"])) + mi2 = mi.view() + mi2.set_names(["E", "F"], inplace=True) + assert mi.is_(mi2) + # levels are inherent properties, they change identity + mi3 = mi2.set_levels([lrange(10), lrange(10)]) + assert not mi3.is_(mi2) + # shouldn't change + assert mi2.is_(mi) + mi4 = mi3.view() + + # GH 17464 - Remove duplicate MultiIndex levels + mi4.set_levels([lrange(10), lrange(10)], inplace=True) + assert not mi4.is_(mi3) + mi5 = mi.view() + mi5.set_levels(mi5.levels, inplace=True) + assert not mi5.is_(mi) + + def test_union(self): + piece1 = self.index[:5][::-1] + piece2 = self.index[3:] + + the_union = piece1 | piece2 + + tups = sorted(self.index.values) + expected = MultiIndex.from_tuples(tups) + + assert the_union.equals(expected) + + # corner case, pass self or empty thing: + the_union = self.index.union(self.index) + assert the_union is self.index + + the_union = self.index.union(self.index[:0]) + assert the_union is self.index + + # won't work in python 3 + # tuples = self.index.values + # result = self.index[:4] | tuples[4:] + # assert result.equals(tuples) + + # not valid for python 3 + # def test_union_with_regular_index(self): + # other = Index(['A', 'B', 'C']) + + # result = other.union(self.index) + # assert ('foo', 'one') in result + # assert 'B' in result + + # result2 = self.index.union(other) + # assert result.equals(result2) + + def test_intersection(self): + piece1 = self.index[:5][::-1] + piece2 = self.index[3:] + + the_int = piece1 & piece2 + tups = sorted(self.index[3:5].values) + expected = MultiIndex.from_tuples(tups) + assert the_int.equals(expected) + + # corner case, pass self + the_int = self.index.intersection(self.index) + assert the_int is self.index + + # empty intersection: disjoint + empty = self.index[:2] & self.index[2:] + expected = self.index[:0] + assert empty.equals(expected) + + # can't do in python 3 + # tuples = self.index.values + # result = self.index & tuples + # assert result.equals(tuples) + + def test_sub(self): + + first = self.index + + # - now raises (previously was set op difference) + with pytest.raises(TypeError): + first - self.index[-3:] + with pytest.raises(TypeError): + self.index[-3:] - first + with pytest.raises(TypeError): + self.index[-3:] - first.tolist() + with pytest.raises(TypeError): + first.tolist() - self.index[-3:] + + def test_difference(self): + + first = self.index + result = first.difference(self.index[-3:]) + expected = MultiIndex.from_tuples(sorted(self.index[:-3].values), + sortorder=0, + names=self.index.names) + + assert isinstance(result, MultiIndex) + assert result.equals(expected) + assert result.names == self.index.names + + # empty difference: reflexive + result = self.index.difference(self.index) + expected = self.index[:0] + assert result.equals(expected) + assert result.names == self.index.names + + # empty difference: superset + result = self.index[-3:].difference(self.index) + expected = self.index[:0] + assert result.equals(expected) + assert result.names == self.index.names + + # empty difference: degenerate + result = self.index[:0].difference(self.index) + expected = self.index[:0] + assert result.equals(expected) + assert result.names == self.index.names + + # names not the same + chunklet = self.index[-3:] + chunklet.names = ['foo', 'baz'] + result = first.difference(chunklet) + assert result.names == (None, None) + + # empty, but non-equal + result = self.index.difference(self.index.sortlevel(1)[0]) + assert len(result) == 0 + + # raise Exception called with non-MultiIndex + result = first.difference(first.values) + assert result.equals(first[:0]) + + # name from empty array + result = first.difference([]) + assert first.equals(result) + assert first.names == result.names + + # name from non-empty array + result = first.difference([('foo', 'one')]) + expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ( + 'foo', 'two'), ('qux', 'one'), ('qux', 'two')]) + expected.names = first.names + assert first.names == result.names + tm.assert_raises_regex(TypeError, "other must be a MultiIndex " + "or a list of tuples", + first.difference, [1, 2, 3, 4, 5]) + + def test_argsort(self): + result = self.index.argsort() + expected = self.index.values.argsort() + tm.assert_numpy_array_equal(result, expected) + + def test_dims(self): + pass + + def test_insert(self): + # key contained in all levels + new_index = self.index.insert(0, ('bar', 'two')) + assert new_index.equal_levels(self.index) + assert new_index[0] == ('bar', 'two') + + # key not contained in all levels + new_index = self.index.insert(0, ('abc', 'three')) + + exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first') + tm.assert_index_equal(new_index.levels[0], exp0) + + exp1 = Index(list(self.index.levels[1]) + ['three'], name='second') + tm.assert_index_equal(new_index.levels[1], exp1) + assert new_index[0] == ('abc', 'three') + + # key wrong length + msg = "Item must have length equal to number of levels" + with tm.assert_raises_regex(ValueError, msg): + self.index.insert(0, ('foo2',)) + + left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], + columns=['1st', '2nd', '3rd']) + left.set_index(['1st', '2nd'], inplace=True) + ts = left['3rd'].copy(deep=True) + + left.loc[('b', 'x'), '3rd'] = 2 + left.loc[('b', 'a'), '3rd'] = -1 + left.loc[('b', 'b'), '3rd'] = 3 + left.loc[('a', 'x'), '3rd'] = 4 + left.loc[('a', 'w'), '3rd'] = 5 + left.loc[('a', 'a'), '3rd'] = 6 + + ts.loc[('b', 'x')] = 2 + ts.loc['b', 'a'] = -1 + ts.loc[('b', 'b')] = 3 + ts.loc['a', 'x'] = 4 + ts.loc[('a', 'w')] = 5 + ts.loc['a', 'a'] = 6 + + right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], + ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], + ['a', 'w', 5], ['a', 'a', 6]], + columns=['1st', '2nd', '3rd']) + right.set_index(['1st', '2nd'], inplace=True) + # FIXME data types changes to float because + # of intermediate nan insertion; + tm.assert_frame_equal(left, right, check_dtype=False) + tm.assert_series_equal(ts, right['3rd']) + + # GH9250 + idx = [('test1', i) for i in range(5)] + \ + [('test2', i) for i in range(6)] + \ + [('test', 17), ('test', 18)] + + left = pd.Series(np.linspace(0, 10, 11), + pd.MultiIndex.from_tuples(idx[:-2])) + + left.loc[('test', 17)] = 11 + left.loc[('test', 18)] = 12 + + right = pd.Series(np.linspace(0, 12, 13), + pd.MultiIndex.from_tuples(idx)) + + tm.assert_series_equal(left, right) + + def test_take_fill_value(self): + # GH 12631 + vals = [['A', 'B'], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] + idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) + + result = idx.take(np.array([1, 0, -1])) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + (np.nan, pd.NaT)] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + def take_invalid_kwargs(self): + vals = [['A', 'B'], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] + idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + def test_tolist(self): + result = self.index.tolist() + exp = list(self.index.values) + assert result == exp + + def test_str(self): + # tested elsewhere + pass + + def test_isna_behavior(self): + # should not segfault GH5123 + # NOTE: if MI representation changes, may make sense to allow + # isna(MI) + with pytest.raises(NotImplementedError): + pd.isna(self.index) + + def test_level_setting_resets_attributes(self): + ind = pd.MultiIndex.from_arrays([ + ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] + ]) + assert ind.is_monotonic + ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True) + # if this fails, probably didn't reset the cache correctly. + assert not ind.is_monotonic + + def test_reconstruct_sort(self): + + # starts off lexsorted & monotonic + mi = MultiIndex.from_arrays([ + ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] + ]) + assert mi.is_lexsorted() + assert mi.is_monotonic + + recons = mi._sort_levels_monotonic() + assert recons.is_lexsorted() + assert recons.is_monotonic + assert mi is recons + + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + # cannot convert to lexsorted + mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'), + ('x', 'b'), ('y', 'a'), ('z', 'b')], + names=['one', 'two']) + assert not mi.is_lexsorted() + assert not mi.is_monotonic + + recons = mi._sort_levels_monotonic() + assert not recons.is_lexsorted() + assert not recons.is_monotonic + + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + # cannot convert to lexsorted + mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]], + labels=[[0, 1, 0, 2], [2, 0, 0, 1]], + names=['col1', 'col2']) + assert not mi.is_lexsorted() + assert not mi.is_monotonic + + recons = mi._sort_levels_monotonic() + assert not recons.is_lexsorted() + assert not recons.is_monotonic + + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + def test_reconstruct_remove_unused(self): + # xref to GH 2770 + df = DataFrame([['deleteMe', 1, 9], + ['keepMe', 2, 9], + ['keepMeToo', 3, 9]], + columns=['first', 'second', 'third']) + df2 = df.set_index(['first', 'second'], drop=False) + df2 = df2[df2['first'] != 'deleteMe'] + + # removed levels are there + expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'], + [1, 2, 3]], + labels=[[1, 2], [1, 2]], + names=['first', 'second']) + result = df2.index + tm.assert_index_equal(result, expected) + + expected = MultiIndex(levels=[['keepMe', 'keepMeToo'], + [2, 3]], + labels=[[0, 1], [0, 1]], + names=['first', 'second']) + result = df2.index.remove_unused_levels() + tm.assert_index_equal(result, expected) + + # idempotent + result2 = result.remove_unused_levels() + tm.assert_index_equal(result2, expected) + assert result2.is_(result) + + @pytest.mark.parametrize('level0', [['a', 'd', 'b'], + ['a', 'd', 'b', 'unused']]) + @pytest.mark.parametrize('level1', [['w', 'x', 'y', 'z'], + ['w', 'x', 'y', 'z', 'unused']]) + def test_remove_unused_nan(self, level0, level1): + # GH 18417 + mi = pd.MultiIndex(levels=[level0, level1], + labels=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]) + + result = mi.remove_unused_levels() + tm.assert_index_equal(result, mi) + for level in 0, 1: + assert('unused' not in result.levels[level]) + + @pytest.mark.parametrize('first_type,second_type', [ + ('int64', 'int64'), + ('datetime64[D]', 'str')]) + def test_remove_unused_levels_large(self, first_type, second_type): + # GH16556 + + # because tests should be deterministic (and this test in particular + # checks that levels are removed, which is not the case for every + # random input): + rng = np.random.RandomState(4) # seed is arbitrary value that works + + size = 1 << 16 + df = DataFrame(dict( + first=rng.randint(0, 1 << 13, size).astype(first_type), + second=rng.randint(0, 1 << 10, size).astype(second_type), + third=rng.rand(size))) + df = df.groupby(['first', 'second']).sum() + df = df[df.third < 0.1] + + result = df.index.remove_unused_levels() + assert len(result.levels[0]) < len(df.index.levels[0]) + assert len(result.levels[1]) < len(df.index.levels[1]) + assert result.equals(df.index) + + expected = df.reset_index().set_index(['first', 'second']).index + tm.assert_index_equal(result, expected) + + def test_groupby(self): + groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2])) + labels = self.index.get_values().tolist() + exp = {1: labels[:3], 2: labels[3:]} + tm.assert_dict_equal(groups, exp) + + # GH5620 + groups = self.index.groupby(self.index) + exp = {key: [key] for key in self.index} + tm.assert_dict_equal(groups, exp) + + def test_equals_operator(self): + # GH9785 + assert (self.index == self.index).all() + + def test_large_multiindex_error(self): + # GH12527 + df_below_1000000 = pd.DataFrame( + 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), + columns=['dest']) + with pytest.raises(KeyError): + df_below_1000000.loc[(-1, 0), 'dest'] + with pytest.raises(KeyError): + df_below_1000000.loc[(3, 0), 'dest'] + df_above_1000000 = pd.DataFrame( + 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), + columns=['dest']) + with pytest.raises(KeyError): + df_above_1000000.loc[(-1, 0), 'dest'] + with pytest.raises(KeyError): + df_above_1000000.loc[(3, 0), 'dest'] + + def test_partial_string_timestamp_multiindex(self): + # GH10331 + dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H') + abc = ['a', 'b', 'c'] + ix = pd.MultiIndex.from_product([dr, abc]) + df = pd.DataFrame({'c1': range(0, 15)}, index=ix) + idx = pd.IndexSlice + + # c1 + # 2016-01-01 00:00:00 a 0 + # b 1 + # c 2 + # 2016-01-01 12:00:00 a 3 + # b 4 + # c 5 + # 2016-01-02 00:00:00 a 6 + # b 7 + # c 8 + # 2016-01-02 12:00:00 a 9 + # b 10 + # c 11 + # 2016-01-03 00:00:00 a 12 + # b 13 + # c 14 + + # partial string matching on a single index + for df_swap in (df.swaplevel(), + df.swaplevel(0), + df.swaplevel(0, 1)): + df_swap = df_swap.sort_index() + just_a = df_swap.loc['a'] + result = just_a.loc['2016-01-01'] + expected = df.loc[idx[:, 'a'], :].iloc[0:2] + expected.index = expected.index.droplevel(1) + tm.assert_frame_equal(result, expected) + + # indexing with IndexSlice + result = df.loc[idx['2016-01-01':'2016-02-01', :], :] + expected = df + tm.assert_frame_equal(result, expected) + + # match on secondary index + result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :] + expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]] + tm.assert_frame_equal(result, expected) + + # Even though this syntax works on a single index, this is somewhat + # ambiguous and we don't want to extend this behavior forward to work + # in multi-indexes. This would amount to selecting a scalar from a + # column. + with pytest.raises(KeyError): + df['2016-01-01'] + + # partial string match on year only + result = df.loc['2016'] + expected = df + tm.assert_frame_equal(result, expected) + + # partial string match on date + result = df.loc['2016-01-01'] + expected = df.iloc[0:6] + tm.assert_frame_equal(result, expected) + + # partial string match on date and hour, from middle + result = df.loc['2016-01-02 12'] + expected = df.iloc[9:12] + tm.assert_frame_equal(result, expected) + + # partial string match on secondary index + result = df_swap.loc[idx[:, '2016-01-02'], :] + expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]] + tm.assert_frame_equal(result, expected) + + # tuple selector with partial string match on date + result = df.loc[('2016-01-01', 'a'), :] + expected = df.iloc[[0, 3]] + tm.assert_frame_equal(result, expected) + + # Slicing date on first level should break (of course) + with pytest.raises(KeyError): + df_swap.loc['2016-01-01'] + + # GH12685 (partial string with daily resolution or below) + dr = date_range('2013-01-01', periods=100, freq='D') + ix = MultiIndex.from_product([dr, ['a', 'b']]) + df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix) + + result = df.loc[idx['2013-03':'2013-03', :], :] + expected = df.iloc[118:180] + tm.assert_frame_equal(result, expected) + + def test_rangeindex_fallback_coercion_bug(self): + # GH 12893 + foo = pd.DataFrame(np.arange(100).reshape((10, 10))) + bar = pd.DataFrame(np.arange(100).reshape((10, 10))) + df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1) + df.index.names = ['fizz', 'buzz'] + + str(df) + expected = pd.DataFrame({'bar': np.arange(100), + 'foo': np.arange(100)}, + index=pd.MultiIndex.from_product( + [range(10), range(10)], + names=['fizz', 'buzz'])) + tm.assert_frame_equal(df, expected, check_like=True) + + result = df.index.get_level_values('fizz') + expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10) + tm.assert_index_equal(result, expected) + + result = df.index.get_level_values('buzz') + expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz') + tm.assert_index_equal(result, expected) + + def test_dropna(self): + # GH 6194 + idx = pd.MultiIndex.from_arrays([[1, np.nan, 3, np.nan, 5], + [1, 2, np.nan, np.nan, 5], + ['a', 'b', 'c', np.nan, 'e']]) + + exp = pd.MultiIndex.from_arrays([[1, 5], + [1, 5], + ['a', 'e']]) + tm.assert_index_equal(idx.dropna(), exp) + tm.assert_index_equal(idx.dropna(how='any'), exp) + + exp = pd.MultiIndex.from_arrays([[1, np.nan, 3, 5], + [1, 2, np.nan, 5], + ['a', 'b', 'c', 'e']]) + tm.assert_index_equal(idx.dropna(how='all'), exp) + + msg = "invalid how option: xxx" + with tm.assert_raises_regex(ValueError, msg): + idx.dropna(how='xxx') + + def test_unsortedindex(self): + # GH 11897 + mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'), + ('x', 'b'), ('y', 'a'), ('z', 'b')], + names=['one', 'two']) + df = pd.DataFrame([[i, 10 * i] for i in lrange(6)], index=mi, + columns=['one', 'two']) + + # GH 16734: not sorted, but no real slicing + result = df.loc(axis=0)['z', 'a'] + expected = df.iloc[0] + tm.assert_series_equal(result, expected) + + with pytest.raises(UnsortedIndexError): + df.loc(axis=0)['z', slice('a')] + df.sort_index(inplace=True) + assert len(df.loc(axis=0)['z', :]) == 2 + + with pytest.raises(KeyError): + df.loc(axis=0)['q', :] + + def test_unsortedindex_doc_examples(self): + # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa + dfm = DataFrame({'jim': [0, 0, 1, 1], + 'joe': ['x', 'x', 'z', 'y'], + 'jolie': np.random.rand(4)}) + + dfm = dfm.set_index(['jim', 'joe']) + with tm.assert_produces_warning(PerformanceWarning): + dfm.loc[(1, 'z')] + + with pytest.raises(UnsortedIndexError): + dfm.loc[(0, 'y'):(1, 'z')] + + assert not dfm.index.is_lexsorted() + assert dfm.index.lexsort_depth == 1 + + # sort it + dfm = dfm.sort_index() + dfm.loc[(1, 'z')] + dfm.loc[(0, 'y'):(1, 'z')] + + assert dfm.index.is_lexsorted() + assert dfm.index.lexsort_depth == 2 + + def test_nan_stays_float(self): + + # GH 7031 + idx0 = pd.MultiIndex(levels=[["A", "B"], []], + labels=[[1, 0], [-1, -1]], + names=[0, 1]) + idx1 = pd.MultiIndex(levels=[["C"], ["D"]], + labels=[[0], [0]], + names=[0, 1]) + idxm = idx0.join(idx1, how='outer') + assert pd.isna(idx0.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(idxm.get_level_values(1)[:-1]).all() + + df0 = pd.DataFrame([[1, 2]], index=idx0) + df1 = pd.DataFrame([[3, 4]], index=idx1) + dfm = df0 - df1 + assert pd.isna(df0.index.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + def test_million_record_attribute_error(self): + # GH 18165 + r = list(range(1000000)) + df = pd.DataFrame({'a': r, 'b': r}, + index=pd.MultiIndex.from_tuples([(x, x) for x in r])) + + with tm.assert_raises_regex(AttributeError, + "'Series' object has no attribute 'foo'"): + df['a'].foo() diff --git a/pandas/tests/indexes/multi/test_monotonic.py b/pandas/tests/indexes/multi/test_monotonic.py new file mode 100644 index 0000000000000..16f8a602ccfee --- /dev/null +++ b/pandas/tests/indexes/multi/test_monotonic.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +from pandas.tests.indexes.common import Base + + +class TestMonotonic(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_is_monotonic_increasing(self): + i = MultiIndex.from_product([np.arange(10), + np.arange(10)], names=['one', 'two']) + assert i.is_monotonic + assert i._is_strictly_monotonic_increasing + assert Index(i.values).is_monotonic + assert i._is_strictly_monotonic_increasing + + i = MultiIndex.from_product([np.arange(10, 0, -1), + np.arange(10)], names=['one', 'two']) + assert not i.is_monotonic + assert not i._is_strictly_monotonic_increasing + assert not Index(i.values).is_monotonic + assert not Index(i.values)._is_strictly_monotonic_increasing + + i = MultiIndex.from_product([np.arange(10), + np.arange(10, 0, -1)], + names=['one', 'two']) + assert not i.is_monotonic + assert not i._is_strictly_monotonic_increasing + assert not Index(i.values).is_monotonic + assert not Index(i.values)._is_strictly_monotonic_increasing + + i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']]) + assert not i.is_monotonic + assert not i._is_strictly_monotonic_increasing + assert not Index(i.values).is_monotonic + assert not Index(i.values)._is_strictly_monotonic_increasing + + # string ordering + i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + assert not i.is_monotonic + assert not Index(i.values).is_monotonic + assert not i._is_strictly_monotonic_increasing + assert not Index(i.values)._is_strictly_monotonic_increasing + + i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'], + ['mom', 'next', 'zenith']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + assert i.is_monotonic + assert Index(i.values).is_monotonic + assert i._is_strictly_monotonic_increasing + assert Index(i.values)._is_strictly_monotonic_increasing + + # mixed levels, hits the TypeError + i = MultiIndex( + levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237', + 'nl0000289783', + 'nl0000289965', 'nl0000301109']], + labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], + names=['household_id', 'asset_id']) + + assert not i.is_monotonic + assert not i._is_strictly_monotonic_increasing + + # empty + i = MultiIndex.from_arrays([[], []]) + assert i.is_monotonic + assert Index(i.values).is_monotonic + assert i._is_strictly_monotonic_increasing + assert Index(i.values)._is_strictly_monotonic_increasing + + def test_is_monotonic_decreasing(self): + i = MultiIndex.from_product([np.arange(9, -1, -1), + np.arange(9, -1, -1)], + names=['one', 'two']) + assert i.is_monotonic_decreasing + assert i._is_strictly_monotonic_decreasing + assert Index(i.values).is_monotonic_decreasing + assert i._is_strictly_monotonic_decreasing + + i = MultiIndex.from_product([np.arange(10), + np.arange(10, 0, -1)], + names=['one', 'two']) + assert not i.is_monotonic_decreasing + assert not i._is_strictly_monotonic_decreasing + assert not Index(i.values).is_monotonic_decreasing + assert not Index(i.values)._is_strictly_monotonic_decreasing + + i = MultiIndex.from_product([np.arange(10, 0, -1), + np.arange(10)], names=['one', 'two']) + assert not i.is_monotonic_decreasing + assert not i._is_strictly_monotonic_decreasing + assert not Index(i.values).is_monotonic_decreasing + assert not Index(i.values)._is_strictly_monotonic_decreasing + + i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']]) + assert not i.is_monotonic_decreasing + assert not i._is_strictly_monotonic_decreasing + assert not Index(i.values).is_monotonic_decreasing + assert not Index(i.values)._is_strictly_monotonic_decreasing + + # string ordering + i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], + ['three', 'two', 'one']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + assert not i.is_monotonic_decreasing + assert not Index(i.values).is_monotonic_decreasing + assert not i._is_strictly_monotonic_decreasing + assert not Index(i.values)._is_strictly_monotonic_decreasing + + i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], + ['zenith', 'next', 'mom']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + assert i.is_monotonic_decreasing + assert Index(i.values).is_monotonic_decreasing + assert i._is_strictly_monotonic_decreasing + assert Index(i.values)._is_strictly_monotonic_decreasing + + # mixed levels, hits the TypeError + i = MultiIndex( + levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965', + 'nl0000289783', 'lu0197800237', + 'gb00b03mlx29']], + labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], + names=['household_id', 'asset_id']) + + assert not i.is_monotonic_decreasing + assert not i._is_strictly_monotonic_decreasing + + # empty + i = MultiIndex.from_arrays([[], []]) + assert i.is_monotonic_decreasing + assert Index(i.values).is_monotonic_decreasing + assert i._is_strictly_monotonic_decreasing + assert Index(i.values)._is_strictly_monotonic_decreasing + + def test_is_strictly_monotonic_increasing(self): + idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']], + labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) + assert idx.is_monotonic_increasing + assert not idx._is_strictly_monotonic_increasing + + def test_is_strictly_monotonic_decreasing(self): + idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']], + labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) + assert idx.is_monotonic_decreasing + assert not idx._is_strictly_monotonic_decreasing diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py new file mode 100644 index 0000000000000..fdf8fde822741 --- /dev/null +++ b/pandas/tests/indexes/multi/test_names.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestNames(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_copy_names(self): + # Check that adding a "names" parameter to the copy is honored + # GH14302 + multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2']) + multi_idx1 = multi_idx.copy() + + assert multi_idx.equals(multi_idx1) + assert multi_idx.names == ['MyName1', 'MyName2'] + assert multi_idx1.names == ['MyName1', 'MyName2'] + + multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2']) + + assert multi_idx.equals(multi_idx2) + assert multi_idx.names == ['MyName1', 'MyName2'] + assert multi_idx2.names == ['NewName1', 'NewName2'] + + multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2']) + + assert multi_idx.equals(multi_idx3) + assert multi_idx.names == ['MyName1', 'MyName2'] + assert multi_idx3.names == ['NewName1', 'NewName2'] + + def test_names(self): + + # names are assigned in setup + names = self.index_names + level_names = [level.name for level in self.index.levels] + assert names == level_names + + # setting bad names on existing + index = self.index + tm.assert_raises_regex(ValueError, "^Length of names", + setattr, index, "names", + list(index.names) + ["third"]) + tm.assert_raises_regex(ValueError, "^Length of names", + setattr, index, "names", []) + + # initializing with bad names (should always be equivalent) + major_axis, minor_axis = self.index.levels + major_labels, minor_labels = self.index.labels + tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=['first']) + tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=['first', 'second', 'third']) + + # names are assigned + index.names = ["a", "b"] + ind_names = list(index.names) + level_names = [level.name for level in index.levels] + assert ind_names == level_names + + def check_level_names(self, index, names): + assert [level.name for level in index.levels] == list(names) + + def test_changing_names(self): + + # names should be applied to levels + level_names = [level.name for level in self.index.levels] + self.check_level_names(self.index, self.index.names) + + view = self.index.view() + copy = self.index.copy() + shallow_copy = self.index._shallow_copy() + + # changing names should change level names on object + new_names = [name + "a" for name in self.index.names] + self.index.names = new_names + self.check_level_names(self.index, new_names) + + # but not on copies + self.check_level_names(view, level_names) + self.check_level_names(copy, level_names) + self.check_level_names(shallow_copy, level_names) + + # and copies shouldn't change original + shallow_copy.names = [name + "c" for name in shallow_copy.names] + self.check_level_names(self.index, new_names) + + def test_take_preserve_name(self): + taken = self.index.take([3, 0, 1]) + assert taken.names == self.index.names + + def test_index_name_retained(self): + # GH9857 + result = pd.DataFrame({'x': [1, 2, 6], + 'y': [2, 2, 8], + 'z': [-5, 0, 5]}) + result = result.set_index('z') + result.loc[10] = [9, 10] + df_expected = pd.DataFrame({'x': [1, 2, 6, 9], + 'y': [2, 2, 8, 10], + 'z': [-5, 0, 5, 10]}) + df_expected = df_expected.set_index('z') + tm.assert_frame_equal(result, df_expected) + + def test_tuples_with_name_string(self): + # GH 15110 and GH 14848 + + li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] + with pytest.raises(ValueError): + pd.Index(li, name='abc') + with pytest.raises(ValueError): + pd.Index(li, name='a') diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py new file mode 100644 index 0000000000000..a97e6315f4cb2 --- /dev/null +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestReIndex(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_reindex(self): + result, indexer = self.index.reindex(list(self.index[:4])) + assert isinstance(result, MultiIndex) + self.check_level_names(result, self.index[:4].names) + + result, indexer = self.index.reindex(list(self.index)) + assert isinstance(result, MultiIndex) + assert indexer is None + self.check_level_names(result, self.index.names) + + def test_reindex_level(self): + idx = Index(['one']) + + target, indexer = self.index.reindex(idx, level='second') + target2, indexer2 = idx.reindex(self.index, level='second') + + exp_index = self.index.join(idx, level='second', how='right') + exp_index2 = self.index.join(idx, level='second', how='left') + + assert target.equals(exp_index) + exp_indexer = np.array([0, 2, 4]) + tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False) + + assert target2.equals(exp_index2) + exp_indexer2 = np.array([0, -1, 0, -1, 0, -1]) + tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False) + + tm.assert_raises_regex(TypeError, "Fill method not supported", + self.index.reindex, self.index, + method='pad', level='second') + + tm.assert_raises_regex(TypeError, "Fill method not supported", + idx.reindex, idx, method='bfill', + level='first') + + def test_reindex_preserves_names_when_target_is_list_or_ndarray(self): + # GH6552 + idx = self.index.copy() + target = idx.copy() + idx.names = target.names = [None, None] + + other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]]) + + # list & ndarray cases + assert idx.reindex([])[0].names == [None, None] + assert idx.reindex(np.array([]))[0].names == [None, None] + assert idx.reindex(target.tolist())[0].names == [None, None] + assert idx.reindex(target.values)[0].names == [None, None] + assert idx.reindex(other_dtype.tolist())[0].names == [None, None] + assert idx.reindex(other_dtype.values)[0].names == [None, None] + + idx.names = ['foo', 'bar'] + assert idx.reindex([])[0].names == ['foo', 'bar'] + assert idx.reindex(np.array([]))[0].names == ['foo', 'bar'] + assert idx.reindex(target.tolist())[0].names == ['foo', 'bar'] + assert idx.reindex(target.values)[0].names == ['foo', 'bar'] + assert idx.reindex(other_dtype.tolist())[0].names == ['foo', 'bar'] + assert idx.reindex(other_dtype.values)[0].names == ['foo', 'bar'] + + def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self): + # GH7774 + idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']], + names=['foo', 'bar']) + assert idx.reindex([], level=0)[0].names == ['foo', 'bar'] + assert idx.reindex([], level=1)[0].names == ['foo', 'bar'] + + def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self): + # GH7774 + idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']]) + assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64 + assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_ diff --git a/pandas/tests/indexes/multi/test_set.py b/pandas/tests/indexes/multi/test_set.py new file mode 100644 index 0000000000000..64b2e6f733a73 --- /dev/null +++ b/pandas/tests/indexes/multi/test_set.py @@ -0,0 +1,328 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, CategoricalIndex, MultiIndex) +from pandas.compat import range + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestSet(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_set_name_methods(self): + # so long as these are synonyms, we don't need to test set_names + assert self.index.rename == self.index.set_names + new_names = [name + "SUFFIX" for name in self.index_names] + ind = self.index.set_names(new_names) + assert self.index.names == self.index_names + assert ind.names == new_names + with tm.assert_raises_regex(ValueError, "^Length"): + ind.set_names(new_names + new_names) + new_names2 = [name + "SUFFIX2" for name in new_names] + res = ind.set_names(new_names2, inplace=True) + assert res is None + assert ind.names == new_names2 + + # set names for specific level (# GH7792) + ind = self.index.set_names(new_names[0], level=0) + assert self.index.names == self.index_names + assert ind.names == [new_names[0], self.index_names[1]] + + res = ind.set_names(new_names2[0], level=0, inplace=True) + assert res is None + assert ind.names == [new_names2[0], self.index_names[1]] + + # set names for multiple levels + ind = self.index.set_names(new_names, level=[0, 1]) + assert self.index.names == self.index_names + assert ind.names == new_names + + res = ind.set_names(new_names2, level=[0, 1], inplace=True) + assert res is None + assert ind.names == new_names2 + + def test_set_levels_labels_directly(self): + # setting levels/labels directly raises AttributeError + + levels = self.index.levels + new_levels = [[lev + 'a' for lev in level] for level in levels] + + labels = self.index.labels + major_labels, minor_labels = labels + major_labels = [(x + 1) % 3 for x in major_labels] + minor_labels = [(x + 1) % 1 for x in minor_labels] + new_labels = [major_labels, minor_labels] + + with pytest.raises(AttributeError): + self.index.levels = new_levels + + with pytest.raises(AttributeError): + self.index.labels = new_labels + + def test_set_levels(self): + # side note - you probably wouldn't want to use levels and labels + # directly like this - but it is possible. + levels = self.index.levels + new_levels = [[lev + 'a' for lev in level] for level in levels] + + def assert_matching(actual, expected, check_dtype=False): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) + + # level changing [w/o mutation] + ind2 = self.index.set_levels(new_levels) + assert_matching(ind2.levels, new_levels) + assert_matching(self.index.levels, levels) + + # level changing [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_levels(new_levels, inplace=True) + assert inplace_return is None + assert_matching(ind2.levels, new_levels) + + # level changing specific level [w/o mutation] + ind2 = self.index.set_levels(new_levels[0], level=0) + assert_matching(ind2.levels, [new_levels[0], levels[1]]) + assert_matching(self.index.levels, levels) + + ind2 = self.index.set_levels(new_levels[1], level=1) + assert_matching(ind2.levels, [levels[0], new_levels[1]]) + assert_matching(self.index.levels, levels) + + # level changing multiple levels [w/o mutation] + ind2 = self.index.set_levels(new_levels, level=[0, 1]) + assert_matching(ind2.levels, new_levels) + assert_matching(self.index.levels, levels) + + # level changing specific level [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) + assert inplace_return is None + assert_matching(ind2.levels, [new_levels[0], levels[1]]) + assert_matching(self.index.levels, levels) + + ind2 = self.index.copy() + inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) + assert inplace_return is None + assert_matching(ind2.levels, [levels[0], new_levels[1]]) + assert_matching(self.index.levels, levels) + + # level changing multiple levels [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_levels(new_levels, level=[0, 1], + inplace=True) + assert inplace_return is None + assert_matching(ind2.levels, new_levels) + assert_matching(self.index.levels, levels) + + # illegal level changing should not change levels + # GH 13754 + original_index = self.index.copy() + for inplace in [True, False]: + with tm.assert_raises_regex(ValueError, "^On"): + self.index.set_levels(['c'], level=0, inplace=inplace) + assert_matching(self.index.levels, original_index.levels, + check_dtype=True) + + with tm.assert_raises_regex(ValueError, "^On"): + self.index.set_labels([0, 1, 2, 3, 4, 5], level=0, + inplace=inplace) + assert_matching(self.index.labels, original_index.labels, + check_dtype=True) + + with tm.assert_raises_regex(TypeError, "^Levels"): + self.index.set_levels('c', level=0, inplace=inplace) + assert_matching(self.index.levels, original_index.levels, + check_dtype=True) + + with tm.assert_raises_regex(TypeError, "^Labels"): + self.index.set_labels(1, level=0, inplace=inplace) + assert_matching(self.index.labels, original_index.labels, + check_dtype=True) + + def test_set_labels(self): + # side note - you probably wouldn't want to use levels and labels + # directly like this - but it is possible. + labels = self.index.labels + major_labels, minor_labels = labels + major_labels = [(x + 1) % 3 for x in major_labels] + minor_labels = [(x + 1) % 1 for x in minor_labels] + new_labels = [major_labels, minor_labels] + + def assert_matching(actual, expected): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp, dtype=np.int8) + tm.assert_numpy_array_equal(act, exp) + + # label changing [w/o mutation] + ind2 = self.index.set_labels(new_labels) + assert_matching(ind2.labels, new_labels) + assert_matching(self.index.labels, labels) + + # label changing [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_labels(new_labels, inplace=True) + assert inplace_return is None + assert_matching(ind2.labels, new_labels) + + # label changing specific level [w/o mutation] + ind2 = self.index.set_labels(new_labels[0], level=0) + assert_matching(ind2.labels, [new_labels[0], labels[1]]) + assert_matching(self.index.labels, labels) + + ind2 = self.index.set_labels(new_labels[1], level=1) + assert_matching(ind2.labels, [labels[0], new_labels[1]]) + assert_matching(self.index.labels, labels) + + # label changing multiple levels [w/o mutation] + ind2 = self.index.set_labels(new_labels, level=[0, 1]) + assert_matching(ind2.labels, new_labels) + assert_matching(self.index.labels, labels) + + # label changing specific level [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True) + assert inplace_return is None + assert_matching(ind2.labels, [new_labels[0], labels[1]]) + assert_matching(self.index.labels, labels) + + ind2 = self.index.copy() + inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True) + assert inplace_return is None + assert_matching(ind2.labels, [labels[0], new_labels[1]]) + assert_matching(self.index.labels, labels) + + # label changing multiple levels [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_labels(new_labels, level=[0, 1], + inplace=True) + assert inplace_return is None + assert_matching(ind2.labels, new_labels) + assert_matching(self.index.labels, labels) + + # label changing for levels of different magnitude of categories + ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)]) + new_labels = range(129, -1, -1) + expected = pd.MultiIndex.from_tuples( + [(0, i) for i in new_labels]) + + # [w/o mutation] + result = ind.set_labels(labels=new_labels, level=1) + assert result.equals(expected) + + # [w/ mutation] + result = ind.copy() + result.set_labels(labels=new_labels, level=1, inplace=True) + assert result.equals(expected) + + def test_set_levels_labels_names_bad_input(self): + levels, labels = self.index.levels, self.index.labels + names = self.index.names + + with tm.assert_raises_regex(ValueError, 'Length of levels'): + self.index.set_levels([levels[0]]) + + with tm.assert_raises_regex(ValueError, 'Length of labels'): + self.index.set_labels([labels[0]]) + + with tm.assert_raises_regex(ValueError, 'Length of names'): + self.index.set_names([names[0]]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assert_raises_regex(TypeError, 'list of lists-like'): + self.index.set_levels(levels[0]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assert_raises_regex(TypeError, 'list of lists-like'): + self.index.set_labels(labels[0]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assert_raises_regex(TypeError, 'list-like'): + self.index.set_names(names[0]) + + # should have equal lengths + with tm.assert_raises_regex(TypeError, 'list of lists-like'): + self.index.set_levels(levels[0], level=[0, 1]) + + with tm.assert_raises_regex(TypeError, 'list-like'): + self.index.set_levels(levels, level=0) + + # should have equal lengths + with tm.assert_raises_regex(TypeError, 'list of lists-like'): + self.index.set_labels(labels[0], level=[0, 1]) + + with tm.assert_raises_regex(TypeError, 'list-like'): + self.index.set_labels(labels, level=0) + + # should have equal lengths + with tm.assert_raises_regex(ValueError, 'Length of names'): + self.index.set_names(names[0], level=[0, 1]) + + with tm.assert_raises_regex(TypeError, 'string'): + self.index.set_names(names, level=0) + + def test_set_levels_categorical(self): + # GH13854 + index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) + for ordered in [False, True]: + cidx = CategoricalIndex(list("bac"), ordered=ordered) + result = index.set_levels(cidx, 0) + expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], + labels=index.labels) + tm.assert_index_equal(result, expected) + + result_lvl = result.get_level_values(0) + expected_lvl = CategoricalIndex(list("bacb"), + categories=cidx.categories, + ordered=cidx.ordered) + tm.assert_index_equal(result_lvl, expected_lvl) + + def test_set_value_keeps_names(self): + # motivating example from #3742 + lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe'] + lev2 = ['1', '2', '3'] * 2 + idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number']) + df = pd.DataFrame( + np.random.randn(6, 4), + columns=['one', 'two', 'three', 'four'], + index=idx) + df = df.sort_index() + assert df._is_copy is None + assert df.index.names == ('Name', 'Number') + df.at[('grethe', '4'), 'one'] = 99.34 + assert df._is_copy is None + assert df.index.names == ('Name', 'Number') diff --git a/pandas/tests/indexes/multi/test_slice.py b/pandas/tests/indexes/multi/test_slice.py new file mode 100644 index 0000000000000..b05bc7c6e1347 --- /dev/null +++ b/pandas/tests/indexes/multi/test_slice.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +from datetime import timedelta + +import numpy as np + +from pandas import (Index, MultiIndex) +from pandas.compat import lrange + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestSlice(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_slice_keep_name(self): + x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')], + names=['x', 'y']) + assert x[1:].names == x.names + + def test_slice_locs(self): + df = tm.makeTimeDataFrame() + stacked = df.stack() + idx = stacked.index + + slob = slice(*idx.slice_locs(df.index[5], df.index[15])) + sliced = stacked[slob] + expected = df[5:16].stack() + tm.assert_almost_equal(sliced.values, expected.values) + + slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30), + df.index[15] - timedelta(seconds=30))) + sliced = stacked[slob] + expected = df[6:15].stack() + tm.assert_almost_equal(sliced.values, expected.values) + + def test_slice_locs_with_type_mismatch(self): + df = tm.makeTimeDataFrame() + stacked = df.stack() + idx = stacked.index + tm.assert_raises_regex(TypeError, '^Level type mismatch', + idx.slice_locs, (1, 3)) + tm.assert_raises_regex(TypeError, '^Level type mismatch', + idx.slice_locs, + df.index[5] + timedelta( + seconds=30), (5, 2)) + df = tm.makeCustomDataframe(5, 5) + stacked = df.stack() + idx = stacked.index + with tm.assert_raises_regex(TypeError, '^Level type mismatch'): + idx.slice_locs(timedelta(seconds=30)) + # TODO: Try creating a UnicodeDecodeError in exception message + with tm.assert_raises_regex(TypeError, '^Level type mismatch'): + idx.slice_locs(df.index[1], (16, "a")) + + def test_slice_locs_not_sorted(self): + index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( + lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( + [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) + + tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than " + "MultiIndex lexsort depth", + index.slice_locs, (1, 0, 1), (2, 1, 0)) + + # works + sorted_index, _ = index.sortlevel(0) + # should there be a test case here??? + sorted_index.slice_locs((1, 0, 1), (2, 1, 0)) + + def test_slice_locs_partial(self): + sorted_idx, _ = self.index.sortlevel(0) + + result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one')) + assert result == (1, 5) + + result = sorted_idx.slice_locs(None, ('qux', 'one')) + assert result == (0, 5) + + result = sorted_idx.slice_locs(('foo', 'two'), None) + assert result == (1, len(sorted_idx)) + + result = sorted_idx.slice_locs('bar', 'baz') + assert result == (2, 4) + + def test_slice_locs_not_contained(self): + # some searchsorted action + + index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]], + labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3], + [0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0) + + result = index.slice_locs((1, 0), (5, 2)) + assert result == (3, 6) + + result = index.slice_locs(1, 5) + assert result == (3, 6) + + result = index.slice_locs((2, 2), (5, 2)) + assert result == (3, 6) + + result = index.slice_locs(2, 5) + assert result == (3, 6) + + result = index.slice_locs((1, 0), (6, 3)) + assert result == (3, 8) + + result = index.slice_locs(-1, 10) + assert result == (0, len(index)) diff --git a/pandas/tests/indexes/multi/test_sort_level.py b/pandas/tests/indexes/multi/test_sort_level.py new file mode 100644 index 0000000000000..776be46083d3f --- /dev/null +++ b/pandas/tests/indexes/multi/test_sort_level.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +from pandas import (Index, MultiIndex) + +from pandas.tests.indexes.common import Base + + +class TestSortLevel(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_sortlevel(self): + import random + + tuples = list(self.index) + random.shuffle(tuples) + + index = MultiIndex.from_tuples(tuples) + + sorted_idx, _ = index.sortlevel(0) + expected = MultiIndex.from_tuples(sorted(tuples)) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(0, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + sorted_idx, _ = index.sortlevel(1) + by1 = sorted(tuples, key=lambda x: (x[1], x[0])) + expected = MultiIndex.from_tuples(by1) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(1, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + def test_sortlevel_not_sort_remaining(self): + mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) + sorted_idx, _ = mi.sortlevel('A', sort_remaining=False) + assert sorted_idx.equals(mi) + + def test_sortlevel_deterministic(self): + tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'), + ('foo', 'one'), ('baz', 'two'), ('qux', 'one')] + + index = MultiIndex.from_tuples(tuples) + + sorted_idx, _ = index.sortlevel(0) + expected = MultiIndex.from_tuples(sorted(tuples)) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(0, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + sorted_idx, _ = index.sortlevel(1) + by1 = sorted(tuples, key=lambda x: (x[1], x[0])) + expected = MultiIndex.from_tuples(by1) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(1, ascending=False) + assert sorted_idx.equals(expected[::-1]) diff --git a/pandas/tests/indexes/multi/test_tuples.py b/pandas/tests/indexes/multi/test_tuples.py new file mode 100644 index 0000000000000..a12381a260637 --- /dev/null +++ b/pandas/tests/indexes/multi/test_tuples.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestTuples(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_from_tuples(self): + tm.assert_raises_regex(TypeError, 'Cannot infer number of levels ' + 'from empty list', + MultiIndex.from_tuples, []) + + expected = MultiIndex(levels=[[1, 3], [2, 4]], + labels=[[0, 1], [0, 1]], + names=['a', 'b']) + + # input tuples + result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b']) + tm.assert_index_equal(result, expected) + + def test_from_tuples_iterator(self): + # GH 18434 + # input iterator for tuples + expected = MultiIndex(levels=[[1, 3], [2, 4]], + labels=[[0, 1], [0, 1]], + names=['a', 'b']) + + result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b']) + tm.assert_index_equal(result, expected) + + # input non-iterables + with tm.assert_raises_regex( + TypeError, 'Input must be a list / sequence of tuple-likes.'): + MultiIndex.from_tuples(0) + + def test_from_tuples_empty(self): + # GH 16777 + result = MultiIndex.from_tuples([], names=['a', 'b']) + expected = MultiIndex.from_arrays(arrays=[[], []], + names=['a', 'b']) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_unicode.py b/pandas/tests/indexes/multi/test_unicode.py new file mode 100644 index 0000000000000..83ce407a5b1ca --- /dev/null +++ b/pandas/tests/indexes/multi/test_unicode.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex, compat) +from pandas.compat import PY3, range, u + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestUnicode(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + def test_repr_roundtrip(self): + + mi = MultiIndex.from_product([list('ab'), range(3)], + names=['first', 'second']) + str(mi) + + if PY3: + tm.assert_index_equal(eval(repr(mi)), mi, exact=True) + else: + result = eval(repr(mi)) + # string coerces to unicode + tm.assert_index_equal(result, mi, exact=False) + assert mi.get_level_values('first').inferred_type == 'string' + assert result.get_level_values('first').inferred_type == 'unicode' + + mi_u = MultiIndex.from_product( + [list(u'ab'), range(3)], names=['first', 'second']) + result = eval(repr(mi_u)) + tm.assert_index_equal(result, mi_u, exact=True) + + # formatting + if PY3: + str(mi) + else: + compat.text_type(mi) + + # long format + mi = MultiIndex.from_product([list('abcdefg'), range(10)], + names=['first', 'second']) + + if PY3: + tm.assert_index_equal(eval(repr(mi)), mi, exact=True) + else: + result = eval(repr(mi)) + # string coerces to unicode + tm.assert_index_equal(result, mi, exact=False) + assert mi.get_level_values('first').inferred_type == 'string' + assert result.get_level_values('first').inferred_type == 'unicode' + + result = eval(repr(mi_u)) + tm.assert_index_equal(result, mi_u, exact=True) + + def test_repr_with_unicode_data(self): + with pd.core.config.option_context("display.encoding", 'UTF-8'): + d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + index = pd.DataFrame(d).set_index(["a", "b"]).index + assert "\\u" not in repr(index) # we don't want unicode-escaped + + def test_unicode_string_with_unicode(self): + d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + idx = pd.DataFrame(d).set_index(["a", "b"]).index + + if PY3: + str(idx) + else: + compat.text_type(idx) + + def test_bytestring_with_unicode(self): + d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + idx = pd.DataFrame(d).set_index(["a", "b"]).index + + if PY3: + bytes(idx) + else: + str(idx) diff --git a/pandas/tests/indexes/multi/test_unique.py b/pandas/tests/indexes/multi/test_unique.py new file mode 100644 index 0000000000000..f38b5a1f2d547 --- /dev/null +++ b/pandas/tests/indexes/multi/test_unique.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +import pytest + +import numpy as np + +import pandas as pd + +from pandas import (Index, MultiIndex) + +import pandas.util.testing as tm + +from pandas.tests.indexes.common import Base + + +class TestUnique(Base): + _holder = MultiIndex + _compat_props = ['shape', 'ndim', 'size', 'itemsize'] + + def setup_method(self, method): + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + self.index_names = ['first', 'second'] + self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels + ], names=self.index_names, + verify_integrity=False)) + self.setup_indices() + + def create_index(self): + return self.index + + @pytest.mark.parametrize('names', [None, ['first', 'second']]) + def test_unique(self, names): + mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], + names=names) + + res = mi.unique() + exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) + tm.assert_index_equal(res, exp) + + mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], + names=names) + res = mi.unique() + exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], + names=mi.names) + tm.assert_index_equal(res, exp) + + mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], + names=names) + res = mi.unique() + exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) + tm.assert_index_equal(res, exp) + + # GH #20568 - empty MI + mi = pd.MultiIndex.from_arrays([[], []], names=names) + res = mi.unique() + tm.assert_index_equal(mi, res) + + @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) + def test_unique_level(self, level): + # GH #17896 - with level= argument + result = self.index.unique(level=level) + expected = self.index.get_level_values(level).unique() + tm.assert_index_equal(result, expected) + + # With already unique level + mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], + names=['first', 'second']) + result = mi.unique(level=level) + expected = mi.get_level_values(level) + tm.assert_index_equal(result, expected) + + # With empty MI + mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) + result = mi.unique(level=level) + expected = mi.get_level_values(level) + + def test_unique_datetimelike(self): + idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', + '2015-01-01', 'NaT', 'NaT']) + idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', + '2015-01-02', 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + result = pd.MultiIndex.from_arrays([idx1, idx2]).unique() + + eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) + eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02', + 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + exp = pd.MultiIndex.from_arrays([eidx1, eidx2]) + tm.assert_index_equal(result, exp) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py deleted file mode 100644 index 984f37042d600..0000000000000 --- a/pandas/tests/indexes/test_multi.py +++ /dev/null @@ -1,3247 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import warnings - -from datetime import timedelta -from itertools import product - -import pytest - -import numpy as np - -import pandas as pd - -from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex, - compat, date_range, period_range) -from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY -from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.indexes.base import InvalidIndexError -from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike -from pandas._libs.tslib import Timestamp - -import pandas.util.testing as tm - -from pandas.util.testing import assert_almost_equal, assert_copy - -from .common import Base - - -class TestMultiIndex(Base): - _holder = MultiIndex - _compat_props = ['shape', 'ndim', 'size', 'itemsize'] - - def setup_method(self, method): - major_axis = Index(['foo', 'bar', 'baz', 'qux']) - minor_axis = Index(['one', 'two']) - - major_labels = np.array([0, 0, 1, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 0, 1]) - self.index_names = ['first', 'second'] - self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels - ], names=self.index_names, - verify_integrity=False)) - self.setup_indices() - - def create_index(self): - return self.index - - def test_boolean_context_compat2(self): - - # boolean context compat - # GH7897 - i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)]) - common = i1.intersection(i2) - - def f(): - if common: - pass - - tm.assert_raises_regex(ValueError, 'The truth value of a', f) - - def test_labels_dtypes(self): - - # GH 8456 - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - assert i.labels[0].dtype == 'int8' - assert i.labels[1].dtype == 'int8' - - i = MultiIndex.from_product([['a'], range(40)]) - assert i.labels[1].dtype == 'int8' - i = MultiIndex.from_product([['a'], range(400)]) - assert i.labels[1].dtype == 'int16' - i = MultiIndex.from_product([['a'], range(40000)]) - assert i.labels[1].dtype == 'int32' - - i = pd.MultiIndex.from_product([['a'], range(1000)]) - assert (i.labels[0] >= 0).all() - assert (i.labels[1] >= 0).all() - - def test_where(self): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - - def f(): - i.where(True) - - pytest.raises(NotImplementedError, f) - - def test_where_array_like(self): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - klasses = [list, tuple, np.array, pd.Series] - cond = [False, True] - - for klass in klasses: - f = lambda: i.where(klass(cond)) - pytest.raises(NotImplementedError, f) - - def test_repeat(self): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(m.repeat(reps), expected) - - with tm.assert_produces_warning(FutureWarning): - result = m.repeat(n=reps) - tm.assert_index_equal(result, expected) - - def test_numpy_repeat(self): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(np.repeat(m, reps), expected) - - msg = "the 'axis' parameter is not supported" - tm.assert_raises_regex( - ValueError, msg, np.repeat, m, reps, axis=1) - - def test_set_name_methods(self): - # so long as these are synonyms, we don't need to test set_names - assert self.index.rename == self.index.set_names - new_names = [name + "SUFFIX" for name in self.index_names] - ind = self.index.set_names(new_names) - assert self.index.names == self.index_names - assert ind.names == new_names - with tm.assert_raises_regex(ValueError, "^Length"): - ind.set_names(new_names + new_names) - new_names2 = [name + "SUFFIX2" for name in new_names] - res = ind.set_names(new_names2, inplace=True) - assert res is None - assert ind.names == new_names2 - - # set names for specific level (# GH7792) - ind = self.index.set_names(new_names[0], level=0) - assert self.index.names == self.index_names - assert ind.names == [new_names[0], self.index_names[1]] - - res = ind.set_names(new_names2[0], level=0, inplace=True) - assert res is None - assert ind.names == [new_names2[0], self.index_names[1]] - - # set names for multiple levels - ind = self.index.set_names(new_names, level=[0, 1]) - assert self.index.names == self.index_names - assert ind.names == new_names - - res = ind.set_names(new_names2, level=[0, 1], inplace=True) - assert res is None - assert ind.names == new_names2 - - def test_set_levels_labels_directly(self): - # setting levels/labels directly raises AttributeError - - levels = self.index.levels - new_levels = [[lev + 'a' for lev in level] for level in levels] - - labels = self.index.labels - major_labels, minor_labels = labels - major_labels = [(x + 1) % 3 for x in major_labels] - minor_labels = [(x + 1) % 1 for x in minor_labels] - new_labels = [major_labels, minor_labels] - - with pytest.raises(AttributeError): - self.index.levels = new_levels - - with pytest.raises(AttributeError): - self.index.labels = new_labels - - def test_set_levels(self): - # side note - you probably wouldn't want to use levels and labels - # directly like this - but it is possible. - levels = self.index.levels - new_levels = [[lev + 'a' for lev in level] for level in levels] - - def assert_matching(actual, expected, check_dtype=False): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp) - tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) - - # level changing [w/o mutation] - ind2 = self.index.set_levels(new_levels) - assert_matching(ind2.levels, new_levels) - assert_matching(self.index.levels, levels) - - # level changing [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_levels(new_levels, inplace=True) - assert inplace_return is None - assert_matching(ind2.levels, new_levels) - - # level changing specific level [w/o mutation] - ind2 = self.index.set_levels(new_levels[0], level=0) - assert_matching(ind2.levels, [new_levels[0], levels[1]]) - assert_matching(self.index.levels, levels) - - ind2 = self.index.set_levels(new_levels[1], level=1) - assert_matching(ind2.levels, [levels[0], new_levels[1]]) - assert_matching(self.index.levels, levels) - - # level changing multiple levels [w/o mutation] - ind2 = self.index.set_levels(new_levels, level=[0, 1]) - assert_matching(ind2.levels, new_levels) - assert_matching(self.index.levels, levels) - - # level changing specific level [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) - assert inplace_return is None - assert_matching(ind2.levels, [new_levels[0], levels[1]]) - assert_matching(self.index.levels, levels) - - ind2 = self.index.copy() - inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) - assert inplace_return is None - assert_matching(ind2.levels, [levels[0], new_levels[1]]) - assert_matching(self.index.levels, levels) - - # level changing multiple levels [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_levels(new_levels, level=[0, 1], - inplace=True) - assert inplace_return is None - assert_matching(ind2.levels, new_levels) - assert_matching(self.index.levels, levels) - - # illegal level changing should not change levels - # GH 13754 - original_index = self.index.copy() - for inplace in [True, False]: - with tm.assert_raises_regex(ValueError, "^On"): - self.index.set_levels(['c'], level=0, inplace=inplace) - assert_matching(self.index.levels, original_index.levels, - check_dtype=True) - - with tm.assert_raises_regex(ValueError, "^On"): - self.index.set_labels([0, 1, 2, 3, 4, 5], level=0, - inplace=inplace) - assert_matching(self.index.labels, original_index.labels, - check_dtype=True) - - with tm.assert_raises_regex(TypeError, "^Levels"): - self.index.set_levels('c', level=0, inplace=inplace) - assert_matching(self.index.levels, original_index.levels, - check_dtype=True) - - with tm.assert_raises_regex(TypeError, "^Labels"): - self.index.set_labels(1, level=0, inplace=inplace) - assert_matching(self.index.labels, original_index.labels, - check_dtype=True) - - def test_set_labels(self): - # side note - you probably wouldn't want to use levels and labels - # directly like this - but it is possible. - labels = self.index.labels - major_labels, minor_labels = labels - major_labels = [(x + 1) % 3 for x in major_labels] - minor_labels = [(x + 1) % 1 for x in minor_labels] - new_labels = [major_labels, minor_labels] - - def assert_matching(actual, expected): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp, dtype=np.int8) - tm.assert_numpy_array_equal(act, exp) - - # label changing [w/o mutation] - ind2 = self.index.set_labels(new_labels) - assert_matching(ind2.labels, new_labels) - assert_matching(self.index.labels, labels) - - # label changing [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_labels(new_labels, inplace=True) - assert inplace_return is None - assert_matching(ind2.labels, new_labels) - - # label changing specific level [w/o mutation] - ind2 = self.index.set_labels(new_labels[0], level=0) - assert_matching(ind2.labels, [new_labels[0], labels[1]]) - assert_matching(self.index.labels, labels) - - ind2 = self.index.set_labels(new_labels[1], level=1) - assert_matching(ind2.labels, [labels[0], new_labels[1]]) - assert_matching(self.index.labels, labels) - - # label changing multiple levels [w/o mutation] - ind2 = self.index.set_labels(new_labels, level=[0, 1]) - assert_matching(ind2.labels, new_labels) - assert_matching(self.index.labels, labels) - - # label changing specific level [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True) - assert inplace_return is None - assert_matching(ind2.labels, [new_labels[0], labels[1]]) - assert_matching(self.index.labels, labels) - - ind2 = self.index.copy() - inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True) - assert inplace_return is None - assert_matching(ind2.labels, [labels[0], new_labels[1]]) - assert_matching(self.index.labels, labels) - - # label changing multiple levels [w/ mutation] - ind2 = self.index.copy() - inplace_return = ind2.set_labels(new_labels, level=[0, 1], - inplace=True) - assert inplace_return is None - assert_matching(ind2.labels, new_labels) - assert_matching(self.index.labels, labels) - - # label changing for levels of different magnitude of categories - ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)]) - new_labels = range(129, -1, -1) - expected = pd.MultiIndex.from_tuples( - [(0, i) for i in new_labels]) - - # [w/o mutation] - result = ind.set_labels(labels=new_labels, level=1) - assert result.equals(expected) - - # [w/ mutation] - result = ind.copy() - result.set_labels(labels=new_labels, level=1, inplace=True) - assert result.equals(expected) - - def test_set_levels_labels_names_bad_input(self): - levels, labels = self.index.levels, self.index.labels - names = self.index.names - - with tm.assert_raises_regex(ValueError, 'Length of levels'): - self.index.set_levels([levels[0]]) - - with tm.assert_raises_regex(ValueError, 'Length of labels'): - self.index.set_labels([labels[0]]) - - with tm.assert_raises_regex(ValueError, 'Length of names'): - self.index.set_names([names[0]]) - - # shouldn't scalar data error, instead should demand list-like - with tm.assert_raises_regex(TypeError, 'list of lists-like'): - self.index.set_levels(levels[0]) - - # shouldn't scalar data error, instead should demand list-like - with tm.assert_raises_regex(TypeError, 'list of lists-like'): - self.index.set_labels(labels[0]) - - # shouldn't scalar data error, instead should demand list-like - with tm.assert_raises_regex(TypeError, 'list-like'): - self.index.set_names(names[0]) - - # should have equal lengths - with tm.assert_raises_regex(TypeError, 'list of lists-like'): - self.index.set_levels(levels[0], level=[0, 1]) - - with tm.assert_raises_regex(TypeError, 'list-like'): - self.index.set_levels(levels, level=0) - - # should have equal lengths - with tm.assert_raises_regex(TypeError, 'list of lists-like'): - self.index.set_labels(labels[0], level=[0, 1]) - - with tm.assert_raises_regex(TypeError, 'list-like'): - self.index.set_labels(labels, level=0) - - # should have equal lengths - with tm.assert_raises_regex(ValueError, 'Length of names'): - self.index.set_names(names[0], level=[0, 1]) - - with tm.assert_raises_regex(TypeError, 'string'): - self.index.set_names(names, level=0) - - def test_set_levels_categorical(self): - # GH13854 - index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) - for ordered in [False, True]: - cidx = CategoricalIndex(list("bac"), ordered=ordered) - result = index.set_levels(cidx, 0) - expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], - labels=index.labels) - tm.assert_index_equal(result, expected) - - result_lvl = result.get_level_values(0) - expected_lvl = CategoricalIndex(list("bacb"), - categories=cidx.categories, - ordered=cidx.ordered) - tm.assert_index_equal(result_lvl, expected_lvl) - - def test_metadata_immutable(self): - levels, labels = self.index.levels, self.index.labels - # shouldn't be able to set at either the top level or base level - mutable_regex = re.compile('does not support mutable operations') - with tm.assert_raises_regex(TypeError, mutable_regex): - levels[0] = levels[0] - with tm.assert_raises_regex(TypeError, mutable_regex): - levels[0][0] = levels[0][0] - # ditto for labels - with tm.assert_raises_regex(TypeError, mutable_regex): - labels[0] = labels[0] - with tm.assert_raises_regex(TypeError, mutable_regex): - labels[0][0] = labels[0][0] - # and for names - names = self.index.names - with tm.assert_raises_regex(TypeError, mutable_regex): - names[0] = names[0] - - def test_inplace_mutation_resets_values(self): - levels = [['a', 'b', 'c'], [4]] - levels2 = [[1, 2, 3], ['a']] - labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] - - mi1 = MultiIndex(levels=levels, labels=labels) - mi2 = MultiIndex(levels=levels2, labels=labels) - vals = mi1.values.copy() - vals2 = mi2.values.copy() - - assert mi1._tuples is not None - - # Make sure level setting works - new_vals = mi1.set_levels(levels2).values - tm.assert_almost_equal(vals2, new_vals) - - # Non-inplace doesn't kill _tuples [implementation detail] - tm.assert_almost_equal(mi1._tuples, vals) - - # ...and values is still same too - tm.assert_almost_equal(mi1.values, vals) - - # Inplace should kill _tuples - mi1.set_levels(levels2, inplace=True) - tm.assert_almost_equal(mi1.values, vals2) - - # Make sure label setting works too - labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] - exp_values = np.empty((6,), dtype=object) - exp_values[:] = [(long(1), 'a')] * 6 - - # Must be 1d array of tuples - assert exp_values.shape == (6,) - new_values = mi2.set_labels(labels2).values - - # Not inplace shouldn't change - tm.assert_almost_equal(mi2._tuples, vals2) - - # Should have correct values - tm.assert_almost_equal(exp_values, new_values) - - # ...and again setting inplace should kill _tuples, etc - mi2.set_labels(labels2, inplace=True) - tm.assert_almost_equal(mi2.values, new_values) - - def test_copy_in_constructor(self): - levels = np.array(["a", "b", "c"]) - labels = np.array([1, 1, 2, 0, 0, 1, 1]) - val = labels[0] - mi = MultiIndex(levels=[levels, levels], labels=[labels, labels], - copy=True) - assert mi.labels[0][0] == val - labels[0] = 15 - assert mi.labels[0][0] == val - val = levels[0] - levels[0] = "PANDA" - assert mi.levels[0][0] == val - - def test_set_value_keeps_names(self): - # motivating example from #3742 - lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe'] - lev2 = ['1', '2', '3'] * 2 - idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number']) - df = pd.DataFrame( - np.random.randn(6, 4), - columns=['one', 'two', 'three', 'four'], - index=idx) - df = df.sort_index() - assert df._is_copy is None - assert df.index.names == ('Name', 'Number') - df.at[('grethe', '4'), 'one'] = 99.34 - assert df._is_copy is None - assert df.index.names == ('Name', 'Number') - - def test_copy_names(self): - # Check that adding a "names" parameter to the copy is honored - # GH14302 - multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2']) - multi_idx1 = multi_idx.copy() - - assert multi_idx.equals(multi_idx1) - assert multi_idx.names == ['MyName1', 'MyName2'] - assert multi_idx1.names == ['MyName1', 'MyName2'] - - multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2']) - - assert multi_idx.equals(multi_idx2) - assert multi_idx.names == ['MyName1', 'MyName2'] - assert multi_idx2.names == ['NewName1', 'NewName2'] - - multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2']) - - assert multi_idx.equals(multi_idx3) - assert multi_idx.names == ['MyName1', 'MyName2'] - assert multi_idx3.names == ['NewName1', 'NewName2'] - - def test_names(self): - - # names are assigned in setup - names = self.index_names - level_names = [level.name for level in self.index.levels] - assert names == level_names - - # setting bad names on existing - index = self.index - tm.assert_raises_regex(ValueError, "^Length of names", - setattr, index, "names", - list(index.names) + ["third"]) - tm.assert_raises_regex(ValueError, "^Length of names", - setattr, index, "names", []) - - # initializing with bad names (should always be equivalent) - major_axis, minor_axis = self.index.levels - major_labels, minor_labels = self.index.labels - tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, - levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels], - names=['first']) - tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, - levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels], - names=['first', 'second', 'third']) - - # names are assigned - index.names = ["a", "b"] - ind_names = list(index.names) - level_names = [level.name for level in index.levels] - assert ind_names == level_names - - def test_astype(self): - expected = self.index.copy() - actual = self.index.astype('O') - assert_copy(actual.levels, expected.levels) - assert_copy(actual.labels, expected.labels) - self.check_level_names(actual, expected.names) - - with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): - self.index.astype(np.dtype(int)) - - @pytest.mark.parametrize('ordered', [True, False]) - def test_astype_category(self, ordered): - # GH 18630 - msg = '> 1 ndim Categorical are not supported at this time' - with tm.assert_raises_regex(NotImplementedError, msg): - self.index.astype(CategoricalDtype(ordered=ordered)) - - if ordered is False: - # dtype='category' defaults to ordered=False, so only test once - with tm.assert_raises_regex(NotImplementedError, msg): - self.index.astype('category') - - def test_constructor_single_level(self): - result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], - labels=[[0, 1, 2, 3]], names=['first']) - assert isinstance(result, MultiIndex) - expected = Index(['foo', 'bar', 'baz', 'qux'], name='first') - tm.assert_index_equal(result.levels[0], expected) - assert result.names == ['first'] - - def test_constructor_no_levels(self): - tm.assert_raises_regex(ValueError, "non-zero number " - "of levels/labels", - MultiIndex, levels=[], labels=[]) - both_re = re.compile('Must pass both levels and labels') - with tm.assert_raises_regex(TypeError, both_re): - MultiIndex(levels=[]) - with tm.assert_raises_regex(TypeError, both_re): - MultiIndex(labels=[]) - - def test_constructor_mismatched_label_levels(self): - labels = [np.array([1]), np.array([2]), np.array([3])] - levels = ["a"] - tm.assert_raises_regex(ValueError, "Length of levels and labels " - "must be the same", MultiIndex, - levels=levels, labels=labels) - length_error = re.compile('>= length of level') - label_error = re.compile(r'Unequal label lengths: \[4, 2\]') - - # important to check that it's looking at the right thing. - with tm.assert_raises_regex(ValueError, length_error): - MultiIndex(levels=[['a'], ['b']], - labels=[[0, 1, 2, 3], [0, 3, 4, 1]]) - - with tm.assert_raises_regex(ValueError, label_error): - MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]]) - - # external API - with tm.assert_raises_regex(ValueError, length_error): - self.index.copy().set_levels([['a'], ['b']]) - - with tm.assert_raises_regex(ValueError, label_error): - self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) - - @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], - [1, 'a', 1]]) - def test_duplicate_level_names(self, names): - # GH18872 - pytest.raises(ValueError, pd.MultiIndex.from_product, - [[0, 1]] * 3, names=names) - - # With .rename() - mi = pd.MultiIndex.from_product([[0, 1]] * 3) - tm.assert_raises_regex(ValueError, "Duplicated level name:", - mi.rename, names) - - # With .rename(., level=) - mi.rename(names[0], level=1, inplace=True) - tm.assert_raises_regex(ValueError, "Duplicated level name:", - mi.rename, names[:2], level=[0, 2]) - - def assert_multiindex_copied(self, copy, original): - # Levels should be (at least, shallow copied) - tm.assert_copy(copy.levels, original.levels) - tm.assert_almost_equal(copy.labels, original.labels) - - # Labels doesn't matter which way copied - tm.assert_almost_equal(copy.labels, original.labels) - assert copy.labels is not original.labels - - # Names doesn't matter which way copied - assert copy.names == original.names - assert copy.names is not original.names - - # Sort order should be copied - assert copy.sortorder == original.sortorder - - def test_copy(self): - i_copy = self.index.copy() - - self.assert_multiindex_copied(i_copy, self.index) - - def test_shallow_copy(self): - i_copy = self.index._shallow_copy() - - self.assert_multiindex_copied(i_copy, self.index) - - def test_view(self): - i_view = self.index.view() - - self.assert_multiindex_copied(i_view, self.index) - - def check_level_names(self, index, names): - assert [level.name for level in index.levels] == list(names) - - def test_changing_names(self): - - # names should be applied to levels - level_names = [level.name for level in self.index.levels] - self.check_level_names(self.index, self.index.names) - - view = self.index.view() - copy = self.index.copy() - shallow_copy = self.index._shallow_copy() - - # changing names should change level names on object - new_names = [name + "a" for name in self.index.names] - self.index.names = new_names - self.check_level_names(self.index, new_names) - - # but not on copies - self.check_level_names(view, level_names) - self.check_level_names(copy, level_names) - self.check_level_names(shallow_copy, level_names) - - # and copies shouldn't change original - shallow_copy.names = [name + "c" for name in shallow_copy.names] - self.check_level_names(self.index, new_names) - - def test_get_level_number_integer(self): - self.index.names = [1, 0] - assert self.index._get_level_number(1) == 0 - assert self.index._get_level_number(0) == 1 - pytest.raises(IndexError, self.index._get_level_number, 2) - tm.assert_raises_regex(KeyError, 'Level fourth not found', - self.index._get_level_number, 'fourth') - - def test_from_arrays(self): - arrays = [] - for lev, lab in zip(self.index.levels, self.index.labels): - arrays.append(np.asarray(lev).take(lab)) - - # list of arrays as input - result = MultiIndex.from_arrays(arrays, names=self.index.names) - tm.assert_index_equal(result, self.index) - - # infer correctly - result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], - ['a', 'b']]) - assert result.levels[0].equals(Index([Timestamp('20130101')])) - assert result.levels[1].equals(Index(['a', 'b'])) - - def test_from_arrays_iterator(self): - # GH 18434 - arrays = [] - for lev, lab in zip(self.index.levels, self.index.labels): - arrays.append(np.asarray(lev).take(lab)) - - # iterator as input - result = MultiIndex.from_arrays(iter(arrays), names=self.index.names) - tm.assert_index_equal(result, self.index) - - # invalid iterator input - with tm.assert_raises_regex( - TypeError, "Input must be a list / sequence of array-likes."): - MultiIndex.from_arrays(0) - - def test_from_arrays_index_series_datetimetz(self): - idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, - tz='US/Eastern') - idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3, - tz='Asia/Tokyo') - result = pd.MultiIndex.from_arrays([idx1, idx2]) - tm.assert_index_equal(result.get_level_values(0), idx1) - tm.assert_index_equal(result.get_level_values(1), idx2) - - result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) - tm.assert_index_equal(result2.get_level_values(0), idx1) - tm.assert_index_equal(result2.get_level_values(1), idx2) - - tm.assert_index_equal(result, result2) - - def test_from_arrays_index_series_timedelta(self): - idx1 = pd.timedelta_range('1 days', freq='D', periods=3) - idx2 = pd.timedelta_range('2 hours', freq='H', periods=3) - result = pd.MultiIndex.from_arrays([idx1, idx2]) - tm.assert_index_equal(result.get_level_values(0), idx1) - tm.assert_index_equal(result.get_level_values(1), idx2) - - result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) - tm.assert_index_equal(result2.get_level_values(0), idx1) - tm.assert_index_equal(result2.get_level_values(1), idx2) - - tm.assert_index_equal(result, result2) - - def test_from_arrays_index_series_period(self): - idx1 = pd.period_range('2011-01-01', freq='D', periods=3) - idx2 = pd.period_range('2015-01-01', freq='H', periods=3) - result = pd.MultiIndex.from_arrays([idx1, idx2]) - tm.assert_index_equal(result.get_level_values(0), idx1) - tm.assert_index_equal(result.get_level_values(1), idx2) - - result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) - tm.assert_index_equal(result2.get_level_values(0), idx1) - tm.assert_index_equal(result2.get_level_values(1), idx2) - - tm.assert_index_equal(result, result2) - - def test_from_arrays_index_datetimelike_mixed(self): - idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, - tz='US/Eastern') - idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3) - idx3 = pd.timedelta_range('1 days', freq='D', periods=3) - idx4 = pd.period_range('2011-01-01', freq='D', periods=3) - - result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) - tm.assert_index_equal(result.get_level_values(0), idx1) - tm.assert_index_equal(result.get_level_values(1), idx2) - tm.assert_index_equal(result.get_level_values(2), idx3) - tm.assert_index_equal(result.get_level_values(3), idx4) - - result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), - pd.Series(idx2), - pd.Series(idx3), - pd.Series(idx4)]) - tm.assert_index_equal(result2.get_level_values(0), idx1) - tm.assert_index_equal(result2.get_level_values(1), idx2) - tm.assert_index_equal(result2.get_level_values(2), idx3) - tm.assert_index_equal(result2.get_level_values(3), idx4) - - tm.assert_index_equal(result, result2) - - def test_from_arrays_index_series_categorical(self): - # GH13743 - idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), - ordered=False) - idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), - ordered=True) - - result = pd.MultiIndex.from_arrays([idx1, idx2]) - tm.assert_index_equal(result.get_level_values(0), idx1) - tm.assert_index_equal(result.get_level_values(1), idx2) - - result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) - tm.assert_index_equal(result2.get_level_values(0), idx1) - tm.assert_index_equal(result2.get_level_values(1), idx2) - - result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values]) - tm.assert_index_equal(result3.get_level_values(0), idx1) - tm.assert_index_equal(result3.get_level_values(1), idx2) - - def test_from_arrays_empty(self): - # 0 levels - with tm.assert_raises_regex( - ValueError, "Must pass non-zero number of levels/labels"): - MultiIndex.from_arrays(arrays=[]) - - # 1 level - result = MultiIndex.from_arrays(arrays=[[]], names=['A']) - assert isinstance(result, MultiIndex) - expected = Index([], name='A') - tm.assert_index_equal(result.levels[0], expected) - - # N levels - for N in [2, 3]: - arrays = [[]] * N - names = list('ABC')[:N] - result = MultiIndex.from_arrays(arrays=arrays, names=names) - expected = MultiIndex(levels=[[]] * N, labels=[[]] * N, - names=names) - tm.assert_index_equal(result, expected) - - def test_from_arrays_invalid_input(self): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - for i in invalid_inputs: - pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) - - def test_from_arrays_different_lengths(self): - # see gh-13599 - idx1 = [1, 2, 3] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [1, 2, 3] - idx2 = [] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - def test_from_product(self): - - first = ['foo', 'bar', 'buz'] - second = ['a', 'b', 'c'] - names = ['first', 'second'] - result = MultiIndex.from_product([first, second], names=names) - - tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), - ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), - ('buz', 'c')] - expected = MultiIndex.from_tuples(tuples, names=names) - - tm.assert_index_equal(result, expected) - - def test_from_product_iterator(self): - # GH 18434 - first = ['foo', 'bar', 'buz'] - second = ['a', 'b', 'c'] - names = ['first', 'second'] - tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), - ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), - ('buz', 'c')] - expected = MultiIndex.from_tuples(tuples, names=names) - - # iterator as input - result = MultiIndex.from_product(iter([first, second]), names=names) - tm.assert_index_equal(result, expected) - - # Invalid non-iterable input - with tm.assert_raises_regex( - TypeError, "Input must be a list / sequence of iterables."): - MultiIndex.from_product(0) - - def test_from_product_empty(self): - # 0 levels - with tm.assert_raises_regex( - ValueError, "Must pass non-zero number of levels/labels"): - MultiIndex.from_product([]) - - # 1 level - result = MultiIndex.from_product([[]], names=['A']) - expected = pd.Index([], name='A') - tm.assert_index_equal(result.levels[0], expected) - - # 2 levels - l1 = [[], ['foo', 'bar', 'baz'], []] - l2 = [[], [], ['a', 'b', 'c']] - names = ['A', 'B'] - for first, second in zip(l1, l2): - result = MultiIndex.from_product([first, second], names=names) - expected = MultiIndex(levels=[first, second], - labels=[[], []], names=names) - tm.assert_index_equal(result, expected) - - # GH12258 - names = ['A', 'B', 'C'] - for N in range(4): - lvl2 = lrange(N) - result = MultiIndex.from_product([[], lvl2, []], names=names) - expected = MultiIndex(levels=[[], lvl2, []], - labels=[[], [], []], names=names) - tm.assert_index_equal(result, expected) - - def test_from_product_invalid_input(self): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - for i in invalid_inputs: - pytest.raises(TypeError, MultiIndex.from_product, iterables=i) - - def test_from_product_datetimeindex(self): - dt_index = date_range('2000-01-01', periods=2) - mi = pd.MultiIndex.from_product([[1, 2], dt_index]) - etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( - '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( - '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) - tm.assert_numpy_array_equal(mi.values, etalon) - - def test_from_product_index_series_categorical(self): - # GH13743 - first = ['foo', 'bar'] - for ordered in [False, True]: - idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), - ordered=ordered) - expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), - categories=list("bac"), - ordered=ordered) - - for arr in [idx, pd.Series(idx), idx.values]: - result = pd.MultiIndex.from_product([first, arr]) - tm.assert_index_equal(result.get_level_values(1), expected) - - def test_values_boxed(self): - tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT), - (3, pd.Timestamp('2000-01-03')), - (1, pd.Timestamp('2000-01-04')), - (2, pd.Timestamp('2000-01-02')), - (3, pd.Timestamp('2000-01-03'))] - result = pd.MultiIndex.from_tuples(tuples) - expected = construct_1d_object_array_from_listlike(tuples) - tm.assert_numpy_array_equal(result.values, expected) - # Check that code branches for boxed values produce identical results - tm.assert_numpy_array_equal(result.values[:4], result[:4].values) - - def test_values_multiindex_datetimeindex(self): - # Test to ensure we hit the boxing / nobox part of MI.values - ints = np.arange(10 ** 18, 10 ** 18 + 5) - naive = pd.DatetimeIndex(ints) - aware = pd.DatetimeIndex(ints, tz='US/Central') - - idx = pd.MultiIndex.from_arrays([naive, aware]) - result = idx.values - - outer = pd.DatetimeIndex([x[0] for x in result]) - tm.assert_index_equal(outer, naive) - - inner = pd.DatetimeIndex([x[1] for x in result]) - tm.assert_index_equal(inner, aware) - - # n_lev > n_lab - result = idx[:2].values - - outer = pd.DatetimeIndex([x[0] for x in result]) - tm.assert_index_equal(outer, naive[:2]) - - inner = pd.DatetimeIndex([x[1] for x in result]) - tm.assert_index_equal(inner, aware[:2]) - - def test_values_multiindex_periodindex(self): - # Test to ensure we hit the boxing / nobox part of MI.values - ints = np.arange(2007, 2012) - pidx = pd.PeriodIndex(ints, freq='D') - - idx = pd.MultiIndex.from_arrays([ints, pidx]) - result = idx.values - - outer = pd.Int64Index([x[0] for x in result]) - tm.assert_index_equal(outer, pd.Int64Index(ints)) - - inner = pd.PeriodIndex([x[1] for x in result]) - tm.assert_index_equal(inner, pidx) - - # n_lev > n_lab - result = idx[:2].values - - outer = pd.Int64Index([x[0] for x in result]) - tm.assert_index_equal(outer, pd.Int64Index(ints[:2])) - - inner = pd.PeriodIndex([x[1] for x in result]) - tm.assert_index_equal(inner, pidx[:2]) - - def test_append(self): - result = self.index[:3].append(self.index[3:]) - assert result.equals(self.index) - - foos = [self.index[:1], self.index[1:3], self.index[3:]] - result = foos[0].append(foos[1:]) - assert result.equals(self.index) - - # empty - result = self.index.append([]) - assert result.equals(self.index) - - def test_append_mixed_dtypes(self): - # GH 13660 - dti = date_range('2011-01-01', freq='M', periods=3, ) - dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') - pi = period_range('2011-01', freq='M', periods=3) - - mi = MultiIndex.from_arrays([[1, 2, 3], - [1.1, np.nan, 3.3], - ['a', 'b', 'c'], - dti, dti_tz, pi]) - assert mi.nlevels == 6 - - res = mi.append(mi) - exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], - [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], - ['a', 'b', 'c', 'a', 'b', 'c'], - dti.append(dti), - dti_tz.append(dti_tz), - pi.append(pi)]) - tm.assert_index_equal(res, exp) - - other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z']]) - - res = mi.append(other) - exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], - [1.1, np.nan, 3.3, 'x', 'y', 'z'], - ['a', 'b', 'c', 'x', 'y', 'z'], - dti.append(pd.Index(['x', 'y', 'z'])), - dti_tz.append(pd.Index(['x', 'y', 'z'])), - pi.append(pd.Index(['x', 'y', 'z']))]) - tm.assert_index_equal(res, exp) - - def test_get_level_values(self): - result = self.index.get_level_values(0) - expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'], - name='first') - tm.assert_index_equal(result, expected) - assert result.name == 'first' - - result = self.index.get_level_values('first') - expected = self.index.get_level_values(0) - tm.assert_index_equal(result, expected) - - # GH 10460 - index = MultiIndex( - levels=[CategoricalIndex(['A', 'B']), - CategoricalIndex([1, 2, 3])], - labels=[np.array([0, 0, 0, 1, 1, 1]), - np.array([0, 1, 2, 0, 1, 2])]) - - exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B']) - tm.assert_index_equal(index.get_level_values(0), exp) - exp = CategoricalIndex([1, 2, 3, 1, 2, 3]) - tm.assert_index_equal(index.get_level_values(1), exp) - - def test_get_level_values_int_with_na(self): - # GH 17924 - arrays = [['a', 'b', 'b'], [1, np.nan, 2]] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(1) - expected = Index([1, np.nan, 2]) - tm.assert_index_equal(result, expected) - - arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(1) - expected = Index([np.nan, np.nan, 2]) - tm.assert_index_equal(result, expected) - - def test_get_level_values_na(self): - arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(0) - expected = pd.Index([np.nan, np.nan, np.nan]) - tm.assert_index_equal(result, expected) - - result = index.get_level_values(1) - expected = pd.Index(['a', np.nan, 1]) - tm.assert_index_equal(result, expected) - - arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(1) - expected = pd.DatetimeIndex([0, 1, pd.NaT]) - tm.assert_index_equal(result, expected) - - arrays = [[], []] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(0) - expected = pd.Index([], dtype=object) - tm.assert_index_equal(result, expected) - - def test_get_level_values_all_na(self): - # GH 17924 when level entirely consists of nan - arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] - index = pd.MultiIndex.from_arrays(arrays) - result = index.get_level_values(0) - expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64) - tm.assert_index_equal(result, expected) - - result = index.get_level_values(1) - expected = pd.Index(['a', np.nan, 1], dtype=object) - tm.assert_index_equal(result, expected) - - def test_reorder_levels(self): - # this blows up - tm.assert_raises_regex(IndexError, '^Too many levels', - self.index.reorder_levels, [2, 1, 0]) - - def test_nlevels(self): - assert self.index.nlevels == 2 - - def test_iter(self): - result = list(self.index) - expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), - ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] - assert result == expected - - def test_legacy_pickle(self): - if PY3: - pytest.skip("testing for legacy pickles not " - "support on py3") - - path = tm.get_data_path('multiindex_v1.pickle') - obj = pd.read_pickle(path) - - obj2 = MultiIndex.from_tuples(obj.values) - assert obj.equals(obj2) - - res = obj.get_indexer(obj) - exp = np.arange(len(obj), dtype=np.intp) - assert_almost_equal(res, exp) - - res = obj.get_indexer(obj2[::-1]) - exp = obj.get_indexer(obj[::-1]) - exp2 = obj2.get_indexer(obj2[::-1]) - assert_almost_equal(res, exp) - assert_almost_equal(exp, exp2) - - def test_legacy_v2_unpickle(self): - - # 0.7.3 -> 0.8.0 format manage - path = tm.get_data_path('mindex_073.pickle') - obj = pd.read_pickle(path) - - obj2 = MultiIndex.from_tuples(obj.values) - assert obj.equals(obj2) - - res = obj.get_indexer(obj) - exp = np.arange(len(obj), dtype=np.intp) - assert_almost_equal(res, exp) - - res = obj.get_indexer(obj2[::-1]) - exp = obj.get_indexer(obj[::-1]) - exp2 = obj2.get_indexer(obj2[::-1]) - assert_almost_equal(res, exp) - assert_almost_equal(exp, exp2) - - def test_roundtrip_pickle_with_tz(self): - - # GH 8367 - # round-trip of timezone - index = MultiIndex.from_product( - [[1, 2], ['a', 'b'], date_range('20130101', periods=3, - tz='US/Eastern') - ], names=['one', 'two', 'three']) - unpickled = tm.round_trip_pickle(index) - assert index.equal_levels(unpickled) - - def test_from_tuples_index_values(self): - result = MultiIndex.from_tuples(self.index) - assert (result.values == self.index.values).all() - - def test_contains(self): - assert ('foo', 'two') in self.index - assert ('bar', 'two') not in self.index - assert None not in self.index - - def test_contains_top_level(self): - midx = MultiIndex.from_product([['A', 'B'], [1, 2]]) - assert 'A' in midx - assert 'A' not in midx._engine - - def test_contains_with_nat(self): - # MI with a NaT - mi = MultiIndex(levels=[['C'], - pd.date_range('2012-01-01', periods=5)], - labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], - names=[None, 'B']) - assert ('C', pd.Timestamp('2012-01-01')) in mi - for val in mi.values: - assert val in mi - - def test_is_all_dates(self): - assert not self.index.is_all_dates - - def test_is_numeric(self): - # MultiIndex is never numeric - assert not self.index.is_numeric() - - def test_getitem(self): - # scalar - assert self.index[2] == ('bar', 'one') - - # slice - result = self.index[2:5] - expected = self.index[[2, 3, 4]] - assert result.equals(expected) - - # boolean - result = self.index[[True, False, True, False, True, True]] - result2 = self.index[np.array([True, False, True, False, True, True])] - expected = self.index[[0, 2, 4, 5]] - assert result.equals(expected) - assert result2.equals(expected) - - def test_getitem_group_select(self): - sorted_idx, _ = self.index.sortlevel(0) - assert sorted_idx.get_loc('baz') == slice(3, 4) - assert sorted_idx.get_loc('foo') == slice(0, 2) - - def test_get_loc(self): - assert self.index.get_loc(('foo', 'two')) == 1 - assert self.index.get_loc(('baz', 'two')) == 3 - pytest.raises(KeyError, self.index.get_loc, ('bar', 'two')) - pytest.raises(KeyError, self.index.get_loc, 'quux') - - pytest.raises(NotImplementedError, self.index.get_loc, 'foo', - method='nearest') - - # 3 levels - index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( - lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) - pytest.raises(KeyError, index.get_loc, (1, 1)) - assert index.get_loc((2, 0)) == slice(3, 5) - - def test_get_loc_duplicates(self): - index = Index([2, 2, 2, 2]) - result = index.get_loc(2) - expected = slice(0, 4) - assert result == expected - # pytest.raises(Exception, index.get_loc, 2) - - index = Index(['c', 'a', 'a', 'b', 'b']) - rs = index.get_loc('c') - xp = 0 - assert rs == xp - - def test_get_value_duplicates(self): - index = MultiIndex(levels=[['D', 'B', 'C'], - [0, 26, 27, 37, 57, 67, 75, 82]], - labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], - [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], - names=['tag', 'day']) - - assert index.get_loc('D') == slice(0, 3) - with pytest.raises(KeyError): - index._engine.get_value(np.array([]), 'D') - - def test_get_loc_level(self): - index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( - lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) - - loc, new_index = index.get_loc_level((0, 1)) - expected = slice(1, 2) - exp_index = index[expected].droplevel(0).droplevel(0) - assert loc == expected - assert new_index.equals(exp_index) - - loc, new_index = index.get_loc_level((0, 1, 0)) - expected = 1 - assert loc == expected - assert new_index is None - - pytest.raises(KeyError, index.get_loc_level, (2, 2)) - - index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array( - [0, 0, 0, 0]), np.array([0, 1, 2, 3])]) - result, new_index = index.get_loc_level((2000, slice(None, None))) - expected = slice(None, None) - assert result == expected - assert new_index.equals(index.droplevel(0)) - - @pytest.mark.parametrize('level', [0, 1]) - @pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None]) - def test_get_loc_nan(self, level, null_val): - # GH 18485 : NaN in MultiIndex - levels = [['a', 'b'], ['c', 'd']] - key = ['b', 'd'] - levels[level] = np.array([0, null_val], dtype=type(null_val)) - key[level] = null_val - idx = MultiIndex.from_product(levels) - assert idx.get_loc(tuple(key)) == 3 - - def test_get_loc_missing_nan(self): - # GH 8569 - idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) - assert isinstance(idx.get_loc(1), slice) - pytest.raises(KeyError, idx.get_loc, 3) - pytest.raises(KeyError, idx.get_loc, np.nan) - pytest.raises(KeyError, idx.get_loc, [np.nan]) - - @pytest.mark.parametrize('dtype1', [int, float, bool, str]) - @pytest.mark.parametrize('dtype2', [int, float, bool, str]) - def test_get_loc_multiple_dtypes(self, dtype1, dtype2): - # GH 18520 - levels = [np.array([0, 1]).astype(dtype1), - np.array([0, 1]).astype(dtype2)] - idx = pd.MultiIndex.from_product(levels) - assert idx.get_loc(idx[2]) == 2 - - @pytest.mark.parametrize('level', [0, 1]) - @pytest.mark.parametrize('dtypes', [[int, float], [float, int]]) - def test_get_loc_implicit_cast(self, level, dtypes): - # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa - levels = [['a', 'b'], ['c', 'd']] - key = ['b', 'd'] - lev_dtype, key_dtype = dtypes - levels[level] = np.array([0, 1], dtype=lev_dtype) - key[level] = key_dtype(1) - idx = MultiIndex.from_product(levels) - assert idx.get_loc(tuple(key)) == 3 - - def test_get_loc_cast_bool(self): - # GH 19086 : int is casted to bool, but not vice-versa - levels = [[False, True], np.arange(2, dtype='int64')] - idx = MultiIndex.from_product(levels) - - assert idx.get_loc((0, 1)) == 1 - assert idx.get_loc((1, 0)) == 2 - - pytest.raises(KeyError, idx.get_loc, (False, True)) - pytest.raises(KeyError, idx.get_loc, (True, False)) - - def test_slice_locs(self): - df = tm.makeTimeDataFrame() - stacked = df.stack() - idx = stacked.index - - slob = slice(*idx.slice_locs(df.index[5], df.index[15])) - sliced = stacked[slob] - expected = df[5:16].stack() - tm.assert_almost_equal(sliced.values, expected.values) - - slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30), - df.index[15] - timedelta(seconds=30))) - sliced = stacked[slob] - expected = df[6:15].stack() - tm.assert_almost_equal(sliced.values, expected.values) - - def test_slice_locs_with_type_mismatch(self): - df = tm.makeTimeDataFrame() - stacked = df.stack() - idx = stacked.index - tm.assert_raises_regex(TypeError, '^Level type mismatch', - idx.slice_locs, (1, 3)) - tm.assert_raises_regex(TypeError, '^Level type mismatch', - idx.slice_locs, - df.index[5] + timedelta( - seconds=30), (5, 2)) - df = tm.makeCustomDataframe(5, 5) - stacked = df.stack() - idx = stacked.index - with tm.assert_raises_regex(TypeError, '^Level type mismatch'): - idx.slice_locs(timedelta(seconds=30)) - # TODO: Try creating a UnicodeDecodeError in exception message - with tm.assert_raises_regex(TypeError, '^Level type mismatch'): - idx.slice_locs(df.index[1], (16, "a")) - - def test_slice_locs_not_sorted(self): - index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( - lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) - - tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than " - "MultiIndex lexsort depth", - index.slice_locs, (1, 0, 1), (2, 1, 0)) - - # works - sorted_index, _ = index.sortlevel(0) - # should there be a test case here??? - sorted_index.slice_locs((1, 0, 1), (2, 1, 0)) - - def test_slice_locs_partial(self): - sorted_idx, _ = self.index.sortlevel(0) - - result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one')) - assert result == (1, 5) - - result = sorted_idx.slice_locs(None, ('qux', 'one')) - assert result == (0, 5) - - result = sorted_idx.slice_locs(('foo', 'two'), None) - assert result == (1, len(sorted_idx)) - - result = sorted_idx.slice_locs('bar', 'baz') - assert result == (2, 4) - - def test_slice_locs_not_contained(self): - # some searchsorted action - - index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]], - labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3], - [0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0) - - result = index.slice_locs((1, 0), (5, 2)) - assert result == (3, 6) - - result = index.slice_locs(1, 5) - assert result == (3, 6) - - result = index.slice_locs((2, 2), (5, 2)) - assert result == (3, 6) - - result = index.slice_locs(2, 5) - assert result == (3, 6) - - result = index.slice_locs((1, 0), (6, 3)) - assert result == (3, 8) - - result = index.slice_locs(-1, 10) - assert result == (0, len(index)) - - def test_consistency(self): - # need to construct an overflow - major_axis = lrange(70000) - minor_axis = lrange(10) - - major_labels = np.arange(70000) - minor_labels = np.repeat(lrange(10), 7000) - - # the fact that is works means it's consistent - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - - # inconsistent - major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - - assert not index.is_unique - - def test_truncate(self): - major_axis = Index(lrange(4)) - minor_axis = Index(lrange(2)) - - major_labels = np.array([0, 0, 1, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 0, 1]) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - - result = index.truncate(before=1) - assert 'foo' not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(after=1) - assert 2 not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(before=1, after=2) - assert len(result.levels[0]) == 2 - - # after < before - pytest.raises(ValueError, index.truncate, 3, 1) - - def test_get_indexer(self): - major_axis = Index(lrange(4)) - minor_axis = Index(lrange(2)) - - major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp) - minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - idx1 = index[:5] - idx2 = index[[1, 3, 5]] - - r1 = idx1.get_indexer(idx2) - assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp)) - - r1 = idx2.get_indexer(idx1, method='pad') - e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp) - assert_almost_equal(r1, e1) - - r2 = idx2.get_indexer(idx1[::-1], method='pad') - assert_almost_equal(r2, e1[::-1]) - - rffill1 = idx2.get_indexer(idx1, method='ffill') - assert_almost_equal(r1, rffill1) - - r1 = idx2.get_indexer(idx1, method='backfill') - e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp) - assert_almost_equal(r1, e1) - - r2 = idx2.get_indexer(idx1[::-1], method='backfill') - assert_almost_equal(r2, e1[::-1]) - - rbfill1 = idx2.get_indexer(idx1, method='bfill') - assert_almost_equal(r1, rbfill1) - - # pass non-MultiIndex - r1 = idx1.get_indexer(idx2.values) - rexp1 = idx1.get_indexer(idx2) - assert_almost_equal(r1, rexp1) - - r1 = idx1.get_indexer([1, 2, 3]) - assert (r1 == [-1, -1, -1]).all() - - # create index with duplicates - idx1 = Index(lrange(10) + lrange(10)) - idx2 = Index(lrange(20)) - - msg = "Reindexing only valid with uniquely valued Index objects" - with tm.assert_raises_regex(InvalidIndexError, msg): - idx1.get_indexer(idx2) - - def test_get_indexer_nearest(self): - midx = MultiIndex.from_tuples([('a', 1), ('b', 2)]) - with pytest.raises(NotImplementedError): - midx.get_indexer(['a'], method='nearest') - with pytest.raises(NotImplementedError): - midx.get_indexer(['a'], method='pad', tolerance=2) - - def test_hash_collisions(self): - # non-smoke test that we don't get hash collisions - - index = MultiIndex.from_product([np.arange(1000), np.arange(1000)], - names=['one', 'two']) - result = index.get_indexer(index.values) - tm.assert_numpy_array_equal(result, np.arange( - len(index), dtype='intp')) - - for i in [0, 1, len(index) - 2, len(index) - 1]: - result = index.get_loc(index[i]) - assert result == i - - def test_format(self): - self.index.format() - self.index[:0].format() - - def test_format_integer_names(self): - index = MultiIndex(levels=[[0, 1], [0, 1]], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]) - index.format(names=True) - - def test_format_sparse_display(self): - index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]], - labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1], - [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) - - result = index.format() - assert result[3] == '1 0 0 0' - - def test_format_sparse_config(self): - warn_filters = warnings.filters - warnings.filterwarnings('ignore', category=FutureWarning, - module=".*format") - # GH1538 - pd.set_option('display.multi_sparse', False) - - result = self.index.format() - assert result[1] == 'foo two' - - tm.reset_display_options() - - warnings.filters = warn_filters - - def test_to_frame(self): - tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] - - index = MultiIndex.from_tuples(tuples) - result = index.to_frame(index=False) - expected = DataFrame(tuples) - tm.assert_frame_equal(result, expected) - - result = index.to_frame() - expected.index = index - tm.assert_frame_equal(result, expected) - - tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] - index = MultiIndex.from_tuples(tuples, names=['first', 'second']) - result = index.to_frame(index=False) - expected = DataFrame(tuples) - expected.columns = ['first', 'second'] - tm.assert_frame_equal(result, expected) - - result = index.to_frame() - expected.index = index - tm.assert_frame_equal(result, expected) - - index = MultiIndex.from_product([range(5), - pd.date_range('20130101', periods=3)]) - result = index.to_frame(index=False) - expected = DataFrame( - {0: np.repeat(np.arange(5, dtype='int64'), 3), - 1: np.tile(pd.date_range('20130101', periods=3), 5)}) - tm.assert_frame_equal(result, expected) - - index = MultiIndex.from_product([range(5), - pd.date_range('20130101', periods=3)]) - result = index.to_frame() - expected.index = index - tm.assert_frame_equal(result, expected) - - def test_to_hierarchical(self): - index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), ( - 2, 'two')]) - result = index.to_hierarchical(3) - expected = MultiIndex(levels=[[1, 2], ['one', 'two']], - labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) - tm.assert_index_equal(result, expected) - assert result.names == index.names - - # K > 1 - result = index.to_hierarchical(3, 2) - expected = MultiIndex(levels=[[1, 2], ['one', 'two']], - labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]]) - tm.assert_index_equal(result, expected) - assert result.names == index.names - - # non-sorted - index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'), - (2, 'a'), (2, 'b')], - names=['N1', 'N2']) - - result = index.to_hierarchical(2) - expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), - (1, 'b'), - (2, 'a'), (2, 'a'), - (2, 'b'), (2, 'b')], - names=['N1', 'N2']) - tm.assert_index_equal(result, expected) - assert result.names == index.names - - def test_bounds(self): - self.index._bounds - - def test_equals_multi(self): - assert self.index.equals(self.index) - assert not self.index.equals(self.index.values) - assert self.index.equals(Index(self.index.values)) - - assert self.index.equal_levels(self.index) - assert not self.index.equals(self.index[:-1]) - assert not self.index.equals(self.index[-1]) - - # different number of levels - index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( - lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) - - index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1]) - assert not index.equals(index2) - assert not index.equal_levels(index2) - - # levels are different - major_axis = Index(lrange(4)) - minor_axis = Index(lrange(2)) - - major_labels = np.array([0, 0, 1, 2, 2, 3]) - minor_labels = np.array([0, 1, 0, 0, 1, 0]) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - assert not self.index.equals(index) - assert not self.index.equal_levels(index) - - # some of the labels are different - major_axis = Index(['foo', 'bar', 'baz', 'qux']) - minor_axis = Index(['one', 'two']) - - major_labels = np.array([0, 0, 2, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 0, 1]) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - assert not self.index.equals(index) - - def test_equals_missing_values(self): - # make sure take is not using -1 - i = pd.MultiIndex.from_tuples([(0, pd.NaT), - (0, pd.Timestamp('20130101'))]) - result = i[0:1].equals(i[0]) - assert not result - result = i[1:2].equals(i[1]) - assert not result - - def test_identical(self): - mi = self.index.copy() - mi2 = self.index.copy() - assert mi.identical(mi2) - - mi = mi.set_names(['new1', 'new2']) - assert mi.equals(mi2) - assert not mi.identical(mi2) - - mi2 = mi2.set_names(['new1', 'new2']) - assert mi.identical(mi2) - - mi3 = Index(mi.tolist(), names=mi.names) - mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False) - assert mi.identical(mi3) - assert not mi.identical(mi4) - assert mi.equals(mi4) - - def test_is_(self): - mi = MultiIndex.from_tuples(lzip(range(10), range(10))) - assert mi.is_(mi) - assert mi.is_(mi.view()) - assert mi.is_(mi.view().view().view().view()) - mi2 = mi.view() - # names are metadata, they don't change id - mi2.names = ["A", "B"] - assert mi2.is_(mi) - assert mi.is_(mi2) - - assert mi.is_(mi.set_names(["C", "D"])) - mi2 = mi.view() - mi2.set_names(["E", "F"], inplace=True) - assert mi.is_(mi2) - # levels are inherent properties, they change identity - mi3 = mi2.set_levels([lrange(10), lrange(10)]) - assert not mi3.is_(mi2) - # shouldn't change - assert mi2.is_(mi) - mi4 = mi3.view() - - # GH 17464 - Remove duplicate MultiIndex levels - mi4.set_levels([lrange(10), lrange(10)], inplace=True) - assert not mi4.is_(mi3) - mi5 = mi.view() - mi5.set_levels(mi5.levels, inplace=True) - assert not mi5.is_(mi) - - def test_union(self): - piece1 = self.index[:5][::-1] - piece2 = self.index[3:] - - the_union = piece1 | piece2 - - tups = sorted(self.index.values) - expected = MultiIndex.from_tuples(tups) - - assert the_union.equals(expected) - - # corner case, pass self or empty thing: - the_union = self.index.union(self.index) - assert the_union is self.index - - the_union = self.index.union(self.index[:0]) - assert the_union is self.index - - # won't work in python 3 - # tuples = self.index.values - # result = self.index[:4] | tuples[4:] - # assert result.equals(tuples) - - # not valid for python 3 - # def test_union_with_regular_index(self): - # other = Index(['A', 'B', 'C']) - - # result = other.union(self.index) - # assert ('foo', 'one') in result - # assert 'B' in result - - # result2 = self.index.union(other) - # assert result.equals(result2) - - def test_intersection(self): - piece1 = self.index[:5][::-1] - piece2 = self.index[3:] - - the_int = piece1 & piece2 - tups = sorted(self.index[3:5].values) - expected = MultiIndex.from_tuples(tups) - assert the_int.equals(expected) - - # corner case, pass self - the_int = self.index.intersection(self.index) - assert the_int is self.index - - # empty intersection: disjoint - empty = self.index[:2] & self.index[2:] - expected = self.index[:0] - assert empty.equals(expected) - - # can't do in python 3 - # tuples = self.index.values - # result = self.index & tuples - # assert result.equals(tuples) - - def test_sub(self): - - first = self.index - - # - now raises (previously was set op difference) - with pytest.raises(TypeError): - first - self.index[-3:] - with pytest.raises(TypeError): - self.index[-3:] - first - with pytest.raises(TypeError): - self.index[-3:] - first.tolist() - with pytest.raises(TypeError): - first.tolist() - self.index[-3:] - - def test_difference(self): - - first = self.index - result = first.difference(self.index[-3:]) - expected = MultiIndex.from_tuples(sorted(self.index[:-3].values), - sortorder=0, - names=self.index.names) - - assert isinstance(result, MultiIndex) - assert result.equals(expected) - assert result.names == self.index.names - - # empty difference: reflexive - result = self.index.difference(self.index) - expected = self.index[:0] - assert result.equals(expected) - assert result.names == self.index.names - - # empty difference: superset - result = self.index[-3:].difference(self.index) - expected = self.index[:0] - assert result.equals(expected) - assert result.names == self.index.names - - # empty difference: degenerate - result = self.index[:0].difference(self.index) - expected = self.index[:0] - assert result.equals(expected) - assert result.names == self.index.names - - # names not the same - chunklet = self.index[-3:] - chunklet.names = ['foo', 'baz'] - result = first.difference(chunklet) - assert result.names == (None, None) - - # empty, but non-equal - result = self.index.difference(self.index.sortlevel(1)[0]) - assert len(result) == 0 - - # raise Exception called with non-MultiIndex - result = first.difference(first.values) - assert result.equals(first[:0]) - - # name from empty array - result = first.difference([]) - assert first.equals(result) - assert first.names == result.names - - # name from non-empty array - result = first.difference([('foo', 'one')]) - expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ( - 'foo', 'two'), ('qux', 'one'), ('qux', 'two')]) - expected.names = first.names - assert first.names == result.names - tm.assert_raises_regex(TypeError, "other must be a MultiIndex " - "or a list of tuples", - first.difference, [1, 2, 3, 4, 5]) - - def test_from_tuples(self): - tm.assert_raises_regex(TypeError, 'Cannot infer number of levels ' - 'from empty list', - MultiIndex.from_tuples, []) - - expected = MultiIndex(levels=[[1, 3], [2, 4]], - labels=[[0, 1], [0, 1]], - names=['a', 'b']) - - # input tuples - result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b']) - tm.assert_index_equal(result, expected) - - def test_from_tuples_iterator(self): - # GH 18434 - # input iterator for tuples - expected = MultiIndex(levels=[[1, 3], [2, 4]], - labels=[[0, 1], [0, 1]], - names=['a', 'b']) - - result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b']) - tm.assert_index_equal(result, expected) - - # input non-iterables - with tm.assert_raises_regex( - TypeError, 'Input must be a list / sequence of tuple-likes.'): - MultiIndex.from_tuples(0) - - def test_from_tuples_empty(self): - # GH 16777 - result = MultiIndex.from_tuples([], names=['a', 'b']) - expected = MultiIndex.from_arrays(arrays=[[], []], - names=['a', 'b']) - tm.assert_index_equal(result, expected) - - def test_argsort(self): - result = self.index.argsort() - expected = self.index.values.argsort() - tm.assert_numpy_array_equal(result, expected) - - def test_sortlevel(self): - import random - - tuples = list(self.index) - random.shuffle(tuples) - - index = MultiIndex.from_tuples(tuples) - - sorted_idx, _ = index.sortlevel(0) - expected = MultiIndex.from_tuples(sorted(tuples)) - assert sorted_idx.equals(expected) - - sorted_idx, _ = index.sortlevel(0, ascending=False) - assert sorted_idx.equals(expected[::-1]) - - sorted_idx, _ = index.sortlevel(1) - by1 = sorted(tuples, key=lambda x: (x[1], x[0])) - expected = MultiIndex.from_tuples(by1) - assert sorted_idx.equals(expected) - - sorted_idx, _ = index.sortlevel(1, ascending=False) - assert sorted_idx.equals(expected[::-1]) - - def test_sortlevel_not_sort_remaining(self): - mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) - sorted_idx, _ = mi.sortlevel('A', sort_remaining=False) - assert sorted_idx.equals(mi) - - def test_sortlevel_deterministic(self): - tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'), - ('foo', 'one'), ('baz', 'two'), ('qux', 'one')] - - index = MultiIndex.from_tuples(tuples) - - sorted_idx, _ = index.sortlevel(0) - expected = MultiIndex.from_tuples(sorted(tuples)) - assert sorted_idx.equals(expected) - - sorted_idx, _ = index.sortlevel(0, ascending=False) - assert sorted_idx.equals(expected[::-1]) - - sorted_idx, _ = index.sortlevel(1) - by1 = sorted(tuples, key=lambda x: (x[1], x[0])) - expected = MultiIndex.from_tuples(by1) - assert sorted_idx.equals(expected) - - sorted_idx, _ = index.sortlevel(1, ascending=False) - assert sorted_idx.equals(expected[::-1]) - - def test_dims(self): - pass - - def test_drop(self): - dropped = self.index.drop([('foo', 'two'), ('qux', 'one')]) - - index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')]) - dropped2 = self.index.drop(index) - - expected = self.index[[0, 2, 3, 5]] - tm.assert_index_equal(dropped, expected) - tm.assert_index_equal(dropped2, expected) - - dropped = self.index.drop(['bar']) - expected = self.index[[0, 1, 3, 4, 5]] - tm.assert_index_equal(dropped, expected) - - dropped = self.index.drop('foo') - expected = self.index[[2, 3, 4, 5]] - tm.assert_index_equal(dropped, expected) - - index = MultiIndex.from_tuples([('bar', 'two')]) - pytest.raises(KeyError, self.index.drop, [('bar', 'two')]) - pytest.raises(KeyError, self.index.drop, index) - pytest.raises(KeyError, self.index.drop, ['foo', 'two']) - - # partially correct argument - mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) - pytest.raises(KeyError, self.index.drop, mixed_index) - - # error='ignore' - dropped = self.index.drop(index, errors='ignore') - expected = self.index[[0, 1, 2, 3, 4, 5]] - tm.assert_index_equal(dropped, expected) - - dropped = self.index.drop(mixed_index, errors='ignore') - expected = self.index[[0, 1, 2, 3, 5]] - tm.assert_index_equal(dropped, expected) - - dropped = self.index.drop(['foo', 'two'], errors='ignore') - expected = self.index[[2, 3, 4, 5]] - tm.assert_index_equal(dropped, expected) - - # mixed partial / full drop - dropped = self.index.drop(['foo', ('qux', 'one')]) - expected = self.index[[2, 3, 5]] - tm.assert_index_equal(dropped, expected) - - # mixed partial / full drop / error='ignore' - mixed_index = ['foo', ('qux', 'one'), 'two'] - pytest.raises(KeyError, self.index.drop, mixed_index) - dropped = self.index.drop(mixed_index, errors='ignore') - expected = self.index[[2, 3, 5]] - tm.assert_index_equal(dropped, expected) - - def test_droplevel_with_names(self): - index = self.index[self.index.get_loc('foo')] - dropped = index.droplevel(0) - assert dropped.name == 'second' - - index = MultiIndex( - levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], - labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], - names=['one', 'two', 'three']) - dropped = index.droplevel(0) - assert dropped.names == ('two', 'three') - - dropped = index.droplevel('two') - expected = index.droplevel(1) - assert dropped.equals(expected) - - def test_droplevel_multiple(self): - index = MultiIndex( - levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], - labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( - [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], - names=['one', 'two', 'three']) - - dropped = index[:2].droplevel(['three', 'one']) - expected = index[:2].droplevel(2).droplevel(0) - assert dropped.equals(expected) - - def test_drop_not_lexsorted(self): - # GH 12078 - - # define the lexsorted version of the multi-index - tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')] - lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c']) - assert lexsorted_mi.is_lexsorted() - - # and the not-lexsorted version - df = pd.DataFrame(columns=['a', 'b', 'c', 'd'], - data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]]) - df = df.pivot_table(index='a', columns=['b', 'c'], values='d') - df = df.reset_index() - not_lexsorted_mi = df.columns - assert not not_lexsorted_mi.is_lexsorted() - - # compare the results - tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) - with tm.assert_produces_warning(PerformanceWarning): - tm.assert_index_equal(lexsorted_mi.drop('a'), - not_lexsorted_mi.drop('a')) - - def test_insert(self): - # key contained in all levels - new_index = self.index.insert(0, ('bar', 'two')) - assert new_index.equal_levels(self.index) - assert new_index[0] == ('bar', 'two') - - # key not contained in all levels - new_index = self.index.insert(0, ('abc', 'three')) - - exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first') - tm.assert_index_equal(new_index.levels[0], exp0) - - exp1 = Index(list(self.index.levels[1]) + ['three'], name='second') - tm.assert_index_equal(new_index.levels[1], exp1) - assert new_index[0] == ('abc', 'three') - - # key wrong length - msg = "Item must have length equal to number of levels" - with tm.assert_raises_regex(ValueError, msg): - self.index.insert(0, ('foo2',)) - - left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], - columns=['1st', '2nd', '3rd']) - left.set_index(['1st', '2nd'], inplace=True) - ts = left['3rd'].copy(deep=True) - - left.loc[('b', 'x'), '3rd'] = 2 - left.loc[('b', 'a'), '3rd'] = -1 - left.loc[('b', 'b'), '3rd'] = 3 - left.loc[('a', 'x'), '3rd'] = 4 - left.loc[('a', 'w'), '3rd'] = 5 - left.loc[('a', 'a'), '3rd'] = 6 - - ts.loc[('b', 'x')] = 2 - ts.loc['b', 'a'] = -1 - ts.loc[('b', 'b')] = 3 - ts.loc['a', 'x'] = 4 - ts.loc[('a', 'w')] = 5 - ts.loc['a', 'a'] = 6 - - right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], - ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], - ['a', 'w', 5], ['a', 'a', 6]], - columns=['1st', '2nd', '3rd']) - right.set_index(['1st', '2nd'], inplace=True) - # FIXME data types changes to float because - # of intermediate nan insertion; - tm.assert_frame_equal(left, right, check_dtype=False) - tm.assert_series_equal(ts, right['3rd']) - - # GH9250 - idx = [('test1', i) for i in range(5)] + \ - [('test2', i) for i in range(6)] + \ - [('test', 17), ('test', 18)] - - left = pd.Series(np.linspace(0, 10, 11), - pd.MultiIndex.from_tuples(idx[:-2])) - - left.loc[('test', 17)] = 11 - left.loc[('test', 18)] = 12 - - right = pd.Series(np.linspace(0, 12, 13), - pd.MultiIndex.from_tuples(idx)) - - tm.assert_series_equal(left, right) - - def test_take_preserve_name(self): - taken = self.index.take([3, 0, 1]) - assert taken.names == self.index.names - - def test_take_fill_value(self): - # GH 12631 - vals = [['A', 'B'], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] - idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) - - result = idx.take(np.array([1, 0, -1])) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - (np.nan, pd.NaT)] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - def take_invalid_kwargs(self): - vals = [['A', 'B'], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] - idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) - indices = [1, 2] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - - @pytest.mark.parametrize('other', - [Index(['three', 'one', 'two']), - Index(['one']), - Index(['one', 'three'])]) - def test_join_level(self, other, join_type): - join_index, lidx, ridx = other.join(self.index, how=join_type, - level='second', - return_indexers=True) - - exp_level = other.join(self.index.levels[1], how=join_type) - assert join_index.levels[0].equals(self.index.levels[0]) - assert join_index.levels[1].equals(exp_level) - - # pare down levels - mask = np.array( - [x[1] in exp_level for x in self.index], dtype=bool) - exp_values = self.index.values[mask] - tm.assert_numpy_array_equal(join_index.values, exp_values) - - if join_type in ('outer', 'inner'): - join_index2, ridx2, lidx2 = \ - self.index.join(other, how=join_type, level='second', - return_indexers=True) - - assert join_index.equals(join_index2) - tm.assert_numpy_array_equal(lidx, lidx2) - tm.assert_numpy_array_equal(ridx, ridx2) - tm.assert_numpy_array_equal(join_index2.values, exp_values) - - def test_join_level_corner_case(self): - # some corner cases - idx = Index(['three', 'one', 'two']) - result = idx.join(self.index, level='second') - assert isinstance(result, MultiIndex) - - tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous", - self.index.join, self.index, level=1) - - def test_join_self(self, join_type): - res = self.index - joined = res.join(res, how=join_type) - assert res is joined - - def test_join_multi(self): - # GH 10665 - midx = pd.MultiIndex.from_product( - [np.arange(4), np.arange(4)], names=['a', 'b']) - idx = pd.Index([1, 2, 5], name='b') - - # inner - jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True) - exp_idx = pd.MultiIndex.from_product( - [np.arange(4), [1, 2]], names=['a', 'b']) - exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) - exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) - tm.assert_index_equal(jidx, exp_idx) - tm.assert_numpy_array_equal(lidx, exp_lidx) - tm.assert_numpy_array_equal(ridx, exp_ridx) - # flip - jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True) - tm.assert_index_equal(jidx, exp_idx) - tm.assert_numpy_array_equal(lidx, exp_lidx) - tm.assert_numpy_array_equal(ridx, exp_ridx) - - # keep MultiIndex - jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True) - exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, - 1, -1], dtype=np.intp) - tm.assert_index_equal(jidx, midx) - assert lidx is None - tm.assert_numpy_array_equal(ridx, exp_ridx) - # flip - jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True) - tm.assert_index_equal(jidx, midx) - assert lidx is None - tm.assert_numpy_array_equal(ridx, exp_ridx) - - def test_reindex(self): - result, indexer = self.index.reindex(list(self.index[:4])) - assert isinstance(result, MultiIndex) - self.check_level_names(result, self.index[:4].names) - - result, indexer = self.index.reindex(list(self.index)) - assert isinstance(result, MultiIndex) - assert indexer is None - self.check_level_names(result, self.index.names) - - def test_reindex_level(self): - idx = Index(['one']) - - target, indexer = self.index.reindex(idx, level='second') - target2, indexer2 = idx.reindex(self.index, level='second') - - exp_index = self.index.join(idx, level='second', how='right') - exp_index2 = self.index.join(idx, level='second', how='left') - - assert target.equals(exp_index) - exp_indexer = np.array([0, 2, 4]) - tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False) - - assert target2.equals(exp_index2) - exp_indexer2 = np.array([0, -1, 0, -1, 0, -1]) - tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False) - - tm.assert_raises_regex(TypeError, "Fill method not supported", - self.index.reindex, self.index, - method='pad', level='second') - - tm.assert_raises_regex(TypeError, "Fill method not supported", - idx.reindex, idx, method='bfill', - level='first') - - def test_duplicates(self): - assert not self.index.has_duplicates - assert self.index.append(self.index).has_duplicates - - index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[ - [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) - assert index.has_duplicates - - # GH 9075 - t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), - (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), - (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), - (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), - (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), - (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), - (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), - (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), - (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), - (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), - (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), - (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), - (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), - (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), - (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), - (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), - (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), - (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] - - index = pd.MultiIndex.from_tuples(t) - assert not index.has_duplicates - - # handle int64 overflow if possible - def check(nlevels, with_nulls): - labels = np.tile(np.arange(500), 2) - level = np.arange(500) - - if with_nulls: # inject some null values - labels[500] = -1 # common nan value - labels = [labels.copy() for i in range(nlevels)] - for i in range(nlevels): - labels[i][500 + i - nlevels // 2] = -1 - - labels += [np.array([-1, 1]).repeat(500)] - else: - labels = [labels] * nlevels + [np.arange(2).repeat(500)] - - levels = [level] * nlevels + [[0, 1]] - - # no dups - index = MultiIndex(levels=levels, labels=labels) - assert not index.has_duplicates - - # with a dup - if with_nulls: - f = lambda a: np.insert(a, 1000, a[0]) - labels = list(map(f, labels)) - index = MultiIndex(levels=levels, labels=labels) - else: - values = index.values.tolist() - index = MultiIndex.from_tuples(values + [values[0]]) - - assert index.has_duplicates - - # no overflow - check(4, False) - check(4, True) - - # overflow possible - check(8, False) - check(8, True) - - # GH 9125 - n, k = 200, 5000 - levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] - labels = [np.random.choice(n, k * n) for lev in levels] - mi = MultiIndex(levels=levels, labels=labels) - - for keep in ['first', 'last', False]: - left = mi.duplicated(keep=keep) - right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep) - tm.assert_numpy_array_equal(left, right) - - # GH5873 - for a in [101, 102]: - mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) - assert not mi.has_duplicates - assert mi.get_duplicates() == [] - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - 2, dtype='bool')) - - for n in range(1, 6): # 1st level shape - for m in range(1, 5): # 2nd level shape - # all possible unique combinations, including nan - lab = product(range(-1, n), range(-1, m)) - mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], - labels=np.random.permutation(list(lab)).T) - assert len(mi) == (n + 1) * (m + 1) - assert not mi.has_duplicates - assert mi.get_duplicates() == [] - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - len(mi), dtype='bool')) - - def test_duplicate_meta_data(self): - # GH 10115 - index = MultiIndex( - levels=[[0, 1], [0, 1, 2]], - labels=[[0, 0, 0, 0, 1, 1, 1], - [0, 1, 2, 0, 0, 1, 2]]) - - for idx in [index, - index.set_names([None, None]), - index.set_names([None, 'Num']), - index.set_names(['Upper', 'Num']), ]: - assert idx.has_duplicates - assert idx.drop_duplicates().names == idx.names - - def test_get_unique_index(self): - idx = self.index[[0, 1, 0, 1, 1, 0, 0]] - expected = self.index._shallow_copy(idx[[0, 1]]) - - for dropna in [False, True]: - result = idx._get_unique_index(dropna=dropna) - assert result.unique - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('names', [None, ['first', 'second']]) - def test_unique(self, names): - mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], - names=names) - - res = mi.unique() - exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) - tm.assert_index_equal(res, exp) - - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], - names=names) - res = mi.unique() - exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], - names=mi.names) - tm.assert_index_equal(res, exp) - - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], - names=names) - res = mi.unique() - exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) - tm.assert_index_equal(res, exp) - - # GH #20568 - empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=names) - res = mi.unique() - tm.assert_index_equal(mi, res) - - @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) - def test_unique_level(self, level): - # GH #17896 - with level= argument - result = self.index.unique(level=level) - expected = self.index.get_level_values(level).unique() - tm.assert_index_equal(result, expected) - - # With already unique level - mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], - names=['first', 'second']) - result = mi.unique(level=level) - expected = mi.get_level_values(level) - tm.assert_index_equal(result, expected) - - # With empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) - result = mi.unique(level=level) - expected = mi.get_level_values(level) - - def test_unique_datetimelike(self): - idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', - '2015-01-01', 'NaT', 'NaT']) - idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', - '2015-01-02', 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - result = pd.MultiIndex.from_arrays([idx1, idx2]).unique() - - eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) - eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02', - 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - exp = pd.MultiIndex.from_arrays([eidx1, eidx2]) - tm.assert_index_equal(result, exp) - - def test_tolist(self): - result = self.index.tolist() - exp = list(self.index.values) - assert result == exp - - def test_repr_with_unicode_data(self): - with pd.core.config.option_context("display.encoding", 'UTF-8'): - d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} - index = pd.DataFrame(d).set_index(["a", "b"]).index - assert "\\u" not in repr(index) # we don't want unicode-escaped - - def test_repr_roundtrip(self): - - mi = MultiIndex.from_product([list('ab'), range(3)], - names=['first', 'second']) - str(mi) - - if PY3: - tm.assert_index_equal(eval(repr(mi)), mi, exact=True) - else: - result = eval(repr(mi)) - # string coerces to unicode - tm.assert_index_equal(result, mi, exact=False) - assert mi.get_level_values('first').inferred_type == 'string' - assert result.get_level_values('first').inferred_type == 'unicode' - - mi_u = MultiIndex.from_product( - [list(u'ab'), range(3)], names=['first', 'second']) - result = eval(repr(mi_u)) - tm.assert_index_equal(result, mi_u, exact=True) - - # formatting - if PY3: - str(mi) - else: - compat.text_type(mi) - - # long format - mi = MultiIndex.from_product([list('abcdefg'), range(10)], - names=['first', 'second']) - - if PY3: - tm.assert_index_equal(eval(repr(mi)), mi, exact=True) - else: - result = eval(repr(mi)) - # string coerces to unicode - tm.assert_index_equal(result, mi, exact=False) - assert mi.get_level_values('first').inferred_type == 'string' - assert result.get_level_values('first').inferred_type == 'unicode' - - result = eval(repr(mi_u)) - tm.assert_index_equal(result, mi_u, exact=True) - - def test_str(self): - # tested elsewhere - pass - - def test_unicode_string_with_unicode(self): - d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} - idx = pd.DataFrame(d).set_index(["a", "b"]).index - - if PY3: - str(idx) - else: - compat.text_type(idx) - - def test_bytestring_with_unicode(self): - d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} - idx = pd.DataFrame(d).set_index(["a", "b"]).index - - if PY3: - bytes(idx) - else: - str(idx) - - def test_slice_keep_name(self): - x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')], - names=['x', 'y']) - assert x[1:].names == x.names - - def test_isna_behavior(self): - # should not segfault GH5123 - # NOTE: if MI representation changes, may make sense to allow - # isna(MI) - with pytest.raises(NotImplementedError): - pd.isna(self.index) - - def test_level_setting_resets_attributes(self): - ind = pd.MultiIndex.from_arrays([ - ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] - ]) - assert ind.is_monotonic - ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True) - # if this fails, probably didn't reset the cache correctly. - assert not ind.is_monotonic - - def test_is_monotonic_increasing(self): - i = MultiIndex.from_product([np.arange(10), - np.arange(10)], names=['one', 'two']) - assert i.is_monotonic - assert i._is_strictly_monotonic_increasing - assert Index(i.values).is_monotonic - assert i._is_strictly_monotonic_increasing - - i = MultiIndex.from_product([np.arange(10, 0, -1), - np.arange(10)], names=['one', 'two']) - assert not i.is_monotonic - assert not i._is_strictly_monotonic_increasing - assert not Index(i.values).is_monotonic - assert not Index(i.values)._is_strictly_monotonic_increasing - - i = MultiIndex.from_product([np.arange(10), - np.arange(10, 0, -1)], - names=['one', 'two']) - assert not i.is_monotonic - assert not i._is_strictly_monotonic_increasing - assert not Index(i.values).is_monotonic - assert not Index(i.values)._is_strictly_monotonic_increasing - - i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']]) - assert not i.is_monotonic - assert not i._is_strictly_monotonic_increasing - assert not Index(i.values).is_monotonic - assert not Index(i.values)._is_strictly_monotonic_increasing - - # string ordering - i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - assert not i.is_monotonic - assert not Index(i.values).is_monotonic - assert not i._is_strictly_monotonic_increasing - assert not Index(i.values)._is_strictly_monotonic_increasing - - i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'], - ['mom', 'next', 'zenith']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - assert i.is_monotonic - assert Index(i.values).is_monotonic - assert i._is_strictly_monotonic_increasing - assert Index(i.values)._is_strictly_monotonic_increasing - - # mixed levels, hits the TypeError - i = MultiIndex( - levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237', - 'nl0000289783', - 'nl0000289965', 'nl0000301109']], - labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], - names=['household_id', 'asset_id']) - - assert not i.is_monotonic - assert not i._is_strictly_monotonic_increasing - - # empty - i = MultiIndex.from_arrays([[], []]) - assert i.is_monotonic - assert Index(i.values).is_monotonic - assert i._is_strictly_monotonic_increasing - assert Index(i.values)._is_strictly_monotonic_increasing - - def test_is_monotonic_decreasing(self): - i = MultiIndex.from_product([np.arange(9, -1, -1), - np.arange(9, -1, -1)], - names=['one', 'two']) - assert i.is_monotonic_decreasing - assert i._is_strictly_monotonic_decreasing - assert Index(i.values).is_monotonic_decreasing - assert i._is_strictly_monotonic_decreasing - - i = MultiIndex.from_product([np.arange(10), - np.arange(10, 0, -1)], - names=['one', 'two']) - assert not i.is_monotonic_decreasing - assert not i._is_strictly_monotonic_decreasing - assert not Index(i.values).is_monotonic_decreasing - assert not Index(i.values)._is_strictly_monotonic_decreasing - - i = MultiIndex.from_product([np.arange(10, 0, -1), - np.arange(10)], names=['one', 'two']) - assert not i.is_monotonic_decreasing - assert not i._is_strictly_monotonic_decreasing - assert not Index(i.values).is_monotonic_decreasing - assert not Index(i.values)._is_strictly_monotonic_decreasing - - i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']]) - assert not i.is_monotonic_decreasing - assert not i._is_strictly_monotonic_decreasing - assert not Index(i.values).is_monotonic_decreasing - assert not Index(i.values)._is_strictly_monotonic_decreasing - - # string ordering - i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], - ['three', 'two', 'one']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - assert not i.is_monotonic_decreasing - assert not Index(i.values).is_monotonic_decreasing - assert not i._is_strictly_monotonic_decreasing - assert not Index(i.values)._is_strictly_monotonic_decreasing - - i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], - ['zenith', 'next', 'mom']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - assert i.is_monotonic_decreasing - assert Index(i.values).is_monotonic_decreasing - assert i._is_strictly_monotonic_decreasing - assert Index(i.values)._is_strictly_monotonic_decreasing - - # mixed levels, hits the TypeError - i = MultiIndex( - levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965', - 'nl0000289783', 'lu0197800237', - 'gb00b03mlx29']], - labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], - names=['household_id', 'asset_id']) - - assert not i.is_monotonic_decreasing - assert not i._is_strictly_monotonic_decreasing - - # empty - i = MultiIndex.from_arrays([[], []]) - assert i.is_monotonic_decreasing - assert Index(i.values).is_monotonic_decreasing - assert i._is_strictly_monotonic_decreasing - assert Index(i.values)._is_strictly_monotonic_decreasing - - def test_is_strictly_monotonic_increasing(self): - idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']], - labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) - assert idx.is_monotonic_increasing - assert not idx._is_strictly_monotonic_increasing - - def test_is_strictly_monotonic_decreasing(self): - idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']], - labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) - assert idx.is_monotonic_decreasing - assert not idx._is_strictly_monotonic_decreasing - - def test_reconstruct_sort(self): - - # starts off lexsorted & monotonic - mi = MultiIndex.from_arrays([ - ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] - ]) - assert mi.is_lexsorted() - assert mi.is_monotonic - - recons = mi._sort_levels_monotonic() - assert recons.is_lexsorted() - assert recons.is_monotonic - assert mi is recons - - assert mi.equals(recons) - assert Index(mi.values).equals(Index(recons.values)) - - # cannot convert to lexsorted - mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'), - ('x', 'b'), ('y', 'a'), ('z', 'b')], - names=['one', 'two']) - assert not mi.is_lexsorted() - assert not mi.is_monotonic - - recons = mi._sort_levels_monotonic() - assert not recons.is_lexsorted() - assert not recons.is_monotonic - - assert mi.equals(recons) - assert Index(mi.values).equals(Index(recons.values)) - - # cannot convert to lexsorted - mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]], - labels=[[0, 1, 0, 2], [2, 0, 0, 1]], - names=['col1', 'col2']) - assert not mi.is_lexsorted() - assert not mi.is_monotonic - - recons = mi._sort_levels_monotonic() - assert not recons.is_lexsorted() - assert not recons.is_monotonic - - assert mi.equals(recons) - assert Index(mi.values).equals(Index(recons.values)) - - def test_reconstruct_remove_unused(self): - # xref to GH 2770 - df = DataFrame([['deleteMe', 1, 9], - ['keepMe', 2, 9], - ['keepMeToo', 3, 9]], - columns=['first', 'second', 'third']) - df2 = df.set_index(['first', 'second'], drop=False) - df2 = df2[df2['first'] != 'deleteMe'] - - # removed levels are there - expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'], - [1, 2, 3]], - labels=[[1, 2], [1, 2]], - names=['first', 'second']) - result = df2.index - tm.assert_index_equal(result, expected) - - expected = MultiIndex(levels=[['keepMe', 'keepMeToo'], - [2, 3]], - labels=[[0, 1], [0, 1]], - names=['first', 'second']) - result = df2.index.remove_unused_levels() - tm.assert_index_equal(result, expected) - - # idempotent - result2 = result.remove_unused_levels() - tm.assert_index_equal(result2, expected) - assert result2.is_(result) - - @pytest.mark.parametrize('level0', [['a', 'd', 'b'], - ['a', 'd', 'b', 'unused']]) - @pytest.mark.parametrize('level1', [['w', 'x', 'y', 'z'], - ['w', 'x', 'y', 'z', 'unused']]) - def test_remove_unused_nan(self, level0, level1): - # GH 18417 - mi = pd.MultiIndex(levels=[level0, level1], - labels=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]) - - result = mi.remove_unused_levels() - tm.assert_index_equal(result, mi) - for level in 0, 1: - assert('unused' not in result.levels[level]) - - @pytest.mark.parametrize('first_type,second_type', [ - ('int64', 'int64'), - ('datetime64[D]', 'str')]) - def test_remove_unused_levels_large(self, first_type, second_type): - # GH16556 - - # because tests should be deterministic (and this test in particular - # checks that levels are removed, which is not the case for every - # random input): - rng = np.random.RandomState(4) # seed is arbitrary value that works - - size = 1 << 16 - df = DataFrame(dict( - first=rng.randint(0, 1 << 13, size).astype(first_type), - second=rng.randint(0, 1 << 10, size).astype(second_type), - third=rng.rand(size))) - df = df.groupby(['first', 'second']).sum() - df = df[df.third < 0.1] - - result = df.index.remove_unused_levels() - assert len(result.levels[0]) < len(df.index.levels[0]) - assert len(result.levels[1]) < len(df.index.levels[1]) - assert result.equals(df.index) - - expected = df.reset_index().set_index(['first', 'second']).index - tm.assert_index_equal(result, expected) - - def test_isin(self): - values = [('foo', 2), ('bar', 3), ('quux', 4)] - - idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( - 4)]) - result = idx.isin(values) - expected = np.array([False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - # empty, return dtype bool - idx = MultiIndex.from_arrays([[], []]) - result = idx.isin(values) - assert len(result) == 0 - assert result.dtype == np.bool_ - - @pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy") - def test_isin_nan_not_pypy(self): - idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]]) - tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]), - np.array([False, False])) - tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]), - np.array([False, False])) - - @pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy") - def test_isin_nan_pypy(self): - idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]]) - tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]), - np.array([False, True])) - tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]), - np.array([False, True])) - - def test_isin_level_kwarg(self): - idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( - 4)]) - - vals_0 = ['foo', 'bar', 'quux'] - vals_1 = [2, 3, 10] - - expected = np.array([False, False, True, True]) - tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0)) - tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2)) - - tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1)) - tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1)) - - pytest.raises(IndexError, idx.isin, vals_0, level=5) - pytest.raises(IndexError, idx.isin, vals_0, level=-5) - - pytest.raises(KeyError, idx.isin, vals_0, level=1.0) - pytest.raises(KeyError, idx.isin, vals_1, level=-1.0) - pytest.raises(KeyError, idx.isin, vals_1, level='A') - - idx.names = ['A', 'B'] - tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A')) - tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B')) - - pytest.raises(KeyError, idx.isin, vals_1, level='C') - - def test_reindex_preserves_names_when_target_is_list_or_ndarray(self): - # GH6552 - idx = self.index.copy() - target = idx.copy() - idx.names = target.names = [None, None] - - other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]]) - - # list & ndarray cases - assert idx.reindex([])[0].names == [None, None] - assert idx.reindex(np.array([]))[0].names == [None, None] - assert idx.reindex(target.tolist())[0].names == [None, None] - assert idx.reindex(target.values)[0].names == [None, None] - assert idx.reindex(other_dtype.tolist())[0].names == [None, None] - assert idx.reindex(other_dtype.values)[0].names == [None, None] - - idx.names = ['foo', 'bar'] - assert idx.reindex([])[0].names == ['foo', 'bar'] - assert idx.reindex(np.array([]))[0].names == ['foo', 'bar'] - assert idx.reindex(target.tolist())[0].names == ['foo', 'bar'] - assert idx.reindex(target.values)[0].names == ['foo', 'bar'] - assert idx.reindex(other_dtype.tolist())[0].names == ['foo', 'bar'] - assert idx.reindex(other_dtype.values)[0].names == ['foo', 'bar'] - - def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self): - # GH7774 - idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']], - names=['foo', 'bar']) - assert idx.reindex([], level=0)[0].names == ['foo', 'bar'] - assert idx.reindex([], level=1)[0].names == ['foo', 'bar'] - - def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self): - # GH7774 - idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']]) - assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64 - assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_ - - def test_groupby(self): - groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2])) - labels = self.index.get_values().tolist() - exp = {1: labels[:3], 2: labels[3:]} - tm.assert_dict_equal(groups, exp) - - # GH5620 - groups = self.index.groupby(self.index) - exp = {key: [key] for key in self.index} - tm.assert_dict_equal(groups, exp) - - def test_index_name_retained(self): - # GH9857 - result = pd.DataFrame({'x': [1, 2, 6], - 'y': [2, 2, 8], - 'z': [-5, 0, 5]}) - result = result.set_index('z') - result.loc[10] = [9, 10] - df_expected = pd.DataFrame({'x': [1, 2, 6, 9], - 'y': [2, 2, 8, 10], - 'z': [-5, 0, 5, 10]}) - df_expected = df_expected.set_index('z') - tm.assert_frame_equal(result, df_expected) - - def test_equals_operator(self): - # GH9785 - assert (self.index == self.index).all() - - def test_large_multiindex_error(self): - # GH12527 - df_below_1000000 = pd.DataFrame( - 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), - columns=['dest']) - with pytest.raises(KeyError): - df_below_1000000.loc[(-1, 0), 'dest'] - with pytest.raises(KeyError): - df_below_1000000.loc[(3, 0), 'dest'] - df_above_1000000 = pd.DataFrame( - 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), - columns=['dest']) - with pytest.raises(KeyError): - df_above_1000000.loc[(-1, 0), 'dest'] - with pytest.raises(KeyError): - df_above_1000000.loc[(3, 0), 'dest'] - - def test_partial_string_timestamp_multiindex(self): - # GH10331 - dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H') - abc = ['a', 'b', 'c'] - ix = pd.MultiIndex.from_product([dr, abc]) - df = pd.DataFrame({'c1': range(0, 15)}, index=ix) - idx = pd.IndexSlice - - # c1 - # 2016-01-01 00:00:00 a 0 - # b 1 - # c 2 - # 2016-01-01 12:00:00 a 3 - # b 4 - # c 5 - # 2016-01-02 00:00:00 a 6 - # b 7 - # c 8 - # 2016-01-02 12:00:00 a 9 - # b 10 - # c 11 - # 2016-01-03 00:00:00 a 12 - # b 13 - # c 14 - - # partial string matching on a single index - for df_swap in (df.swaplevel(), - df.swaplevel(0), - df.swaplevel(0, 1)): - df_swap = df_swap.sort_index() - just_a = df_swap.loc['a'] - result = just_a.loc['2016-01-01'] - expected = df.loc[idx[:, 'a'], :].iloc[0:2] - expected.index = expected.index.droplevel(1) - tm.assert_frame_equal(result, expected) - - # indexing with IndexSlice - result = df.loc[idx['2016-01-01':'2016-02-01', :], :] - expected = df - tm.assert_frame_equal(result, expected) - - # match on secondary index - result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :] - expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]] - tm.assert_frame_equal(result, expected) - - # Even though this syntax works on a single index, this is somewhat - # ambiguous and we don't want to extend this behavior forward to work - # in multi-indexes. This would amount to selecting a scalar from a - # column. - with pytest.raises(KeyError): - df['2016-01-01'] - - # partial string match on year only - result = df.loc['2016'] - expected = df - tm.assert_frame_equal(result, expected) - - # partial string match on date - result = df.loc['2016-01-01'] - expected = df.iloc[0:6] - tm.assert_frame_equal(result, expected) - - # partial string match on date and hour, from middle - result = df.loc['2016-01-02 12'] - expected = df.iloc[9:12] - tm.assert_frame_equal(result, expected) - - # partial string match on secondary index - result = df_swap.loc[idx[:, '2016-01-02'], :] - expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]] - tm.assert_frame_equal(result, expected) - - # tuple selector with partial string match on date - result = df.loc[('2016-01-01', 'a'), :] - expected = df.iloc[[0, 3]] - tm.assert_frame_equal(result, expected) - - # Slicing date on first level should break (of course) - with pytest.raises(KeyError): - df_swap.loc['2016-01-01'] - - # GH12685 (partial string with daily resolution or below) - dr = date_range('2013-01-01', periods=100, freq='D') - ix = MultiIndex.from_product([dr, ['a', 'b']]) - df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix) - - result = df.loc[idx['2013-03':'2013-03', :], :] - expected = df.iloc[118:180] - tm.assert_frame_equal(result, expected) - - def test_rangeindex_fallback_coercion_bug(self): - # GH 12893 - foo = pd.DataFrame(np.arange(100).reshape((10, 10))) - bar = pd.DataFrame(np.arange(100).reshape((10, 10))) - df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1) - df.index.names = ['fizz', 'buzz'] - - str(df) - expected = pd.DataFrame({'bar': np.arange(100), - 'foo': np.arange(100)}, - index=pd.MultiIndex.from_product( - [range(10), range(10)], - names=['fizz', 'buzz'])) - tm.assert_frame_equal(df, expected, check_like=True) - - result = df.index.get_level_values('fizz') - expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10) - tm.assert_index_equal(result, expected) - - result = df.index.get_level_values('buzz') - expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz') - tm.assert_index_equal(result, expected) - - def test_dropna(self): - # GH 6194 - idx = pd.MultiIndex.from_arrays([[1, np.nan, 3, np.nan, 5], - [1, 2, np.nan, np.nan, 5], - ['a', 'b', 'c', np.nan, 'e']]) - - exp = pd.MultiIndex.from_arrays([[1, 5], - [1, 5], - ['a', 'e']]) - tm.assert_index_equal(idx.dropna(), exp) - tm.assert_index_equal(idx.dropna(how='any'), exp) - - exp = pd.MultiIndex.from_arrays([[1, np.nan, 3, 5], - [1, 2, np.nan, 5], - ['a', 'b', 'c', 'e']]) - tm.assert_index_equal(idx.dropna(how='all'), exp) - - msg = "invalid how option: xxx" - with tm.assert_raises_regex(ValueError, msg): - idx.dropna(how='xxx') - - def test_unsortedindex(self): - # GH 11897 - mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'), - ('x', 'b'), ('y', 'a'), ('z', 'b')], - names=['one', 'two']) - df = pd.DataFrame([[i, 10 * i] for i in lrange(6)], index=mi, - columns=['one', 'two']) - - # GH 16734: not sorted, but no real slicing - result = df.loc(axis=0)['z', 'a'] - expected = df.iloc[0] - tm.assert_series_equal(result, expected) - - with pytest.raises(UnsortedIndexError): - df.loc(axis=0)['z', slice('a')] - df.sort_index(inplace=True) - assert len(df.loc(axis=0)['z', :]) == 2 - - with pytest.raises(KeyError): - df.loc(axis=0)['q', :] - - def test_unsortedindex_doc_examples(self): - # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa - dfm = DataFrame({'jim': [0, 0, 1, 1], - 'joe': ['x', 'x', 'z', 'y'], - 'jolie': np.random.rand(4)}) - - dfm = dfm.set_index(['jim', 'joe']) - with tm.assert_produces_warning(PerformanceWarning): - dfm.loc[(1, 'z')] - - with pytest.raises(UnsortedIndexError): - dfm.loc[(0, 'y'):(1, 'z')] - - assert not dfm.index.is_lexsorted() - assert dfm.index.lexsort_depth == 1 - - # sort it - dfm = dfm.sort_index() - dfm.loc[(1, 'z')] - dfm.loc[(0, 'y'):(1, 'z')] - - assert dfm.index.is_lexsorted() - assert dfm.index.lexsort_depth == 2 - - def test_tuples_with_name_string(self): - # GH 15110 and GH 14848 - - li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] - with pytest.raises(ValueError): - pd.Index(li, name='abc') - with pytest.raises(ValueError): - pd.Index(li, name='a') - - def test_nan_stays_float(self): - - # GH 7031 - idx0 = pd.MultiIndex(levels=[["A", "B"], []], - labels=[[1, 0], [-1, -1]], - names=[0, 1]) - idx1 = pd.MultiIndex(levels=[["C"], ["D"]], - labels=[[0], [0]], - names=[0, 1]) - idxm = idx0.join(idx1, how='outer') - assert pd.isna(idx0.get_level_values(1)).all() - # the following failed in 0.14.1 - assert pd.isna(idxm.get_level_values(1)[:-1]).all() - - df0 = pd.DataFrame([[1, 2]], index=idx0) - df1 = pd.DataFrame([[3, 4]], index=idx1) - dfm = df0 - df1 - assert pd.isna(df0.index.get_level_values(1)).all() - # the following failed in 0.14.1 - assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() - - def test_million_record_attribute_error(self): - # GH 18165 - r = list(range(1000000)) - df = pd.DataFrame({'a': r, 'b': r}, - index=pd.MultiIndex.from_tuples([(x, x) for x in r])) - - with tm.assert_raises_regex(AttributeError, - "'Series' object has no attribute 'foo'"): - df['a'].foo() - - def test_duplicate_multiindex_labels(self): - # GH 17464 - # Make sure that a MultiIndex with duplicate levels throws a ValueError - with pytest.raises(ValueError): - ind = pd.MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) - - # And that using set_levels with duplicate levels fails - ind = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], - [1, 2, 1, 2, 3]]) - with pytest.raises(ValueError): - ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], - inplace=True)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ ] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #18644 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20669
2018-04-12T14:50:13Z
2018-06-06T02:50:44Z
null
2018-06-06T02:50:44Z
DOC: Updated the docstring of pandas.Series.str.get
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 23c891ec4fcd0..bbb7ec7e77a59 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1651,17 +1651,53 @@ def str_translate(arr, table, deletechars=None): def str_get(arr, i): """ + Extract element from each component at specified position. + Extract element from lists, tuples, or strings in each element in the Series/Index. Parameters ---------- i : int - Integer index (location) + Position of element to extract. Returns ------- items : Series/Index of objects + + Examples + -------- + >>> s = pd.Series(["String", + (1, 2, 3), + ["a", "b", "c"], + 123, -456, + {1:"Hello", "2":"World"}]) + >>> s + 0 String + 1 (1, 2, 3) + 2 [a, b, c] + 3 123 + 4 -456 + 5 {1: 'Hello', '2': 'World'} + dtype: object + + >>> s.str.get(1) + 0 t + 1 2 + 2 b + 3 NaN + 4 NaN + 5 Hello + dtype: object + + >>> s.str.get(-1) + 0 g + 1 3 + 2 c + 3 NaN + 4 NaN + 5 NaN + dtype: object """ f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan return _na_map(f, arr)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ###################### Docstring (pandas.Series.str.get) ###################### ################################################################################ Extract the element from each component at the specified position. Extract element from lists, tuples, or strings in each element in the Series/Index. Parameters ---------- i : int Position of the element to extract. Returns ------- items : Series/Index of objects Examples -------- >>> s = pd.Series(["String", (1, 2, 3), ["a", "b", "c"], 123]) >>> s 0 String 1 (1, 2, 3) 2 [a, b, c] 3 123 dtype: object >>> s.str.get(1) 0 t 1 2 2 b 3 NaN dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20667
2018-04-12T13:12:08Z
2018-04-24T10:38:43Z
2018-04-24T10:38:43Z
2018-04-28T14:39:43Z
Fix test__get_dtype test expecting little-endian
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 2960a12b133d2..edabc177535fc 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -570,8 +570,8 @@ def test_is_offsetlike(): (pd.CategoricalIndex(['a', 'b']), CategoricalDtype(['a', 'b'])), (CategoricalDtype(), CategoricalDtype()), (CategoricalDtype(['a', 'b']), CategoricalDtype()), - (pd.DatetimeIndex([1, 2]), np.dtype('<M8[ns]')), - (pd.DatetimeIndex([1, 2]).dtype, np.dtype('<M8[ns]')), + (pd.DatetimeIndex([1, 2]), np.dtype('=M8[ns]')), + (pd.DatetimeIndex([1, 2]).dtype, np.dtype('=M8[ns]')), ('<M8[ns]', np.dtype('<M8[ns]')), ('datetime64[ns, Europe/London]', DatetimeTZDtype('ns', 'Europe/London')), (pd.SparseSeries([1, 2], dtype='int32'), np.dtype('int32')),
Similar to #14832, use = (native) instead of < (little-endian) - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20662
2018-04-12T07:35:07Z
2018-04-14T13:22:47Z
2018-04-14T13:22:47Z
2018-04-14T13:49:08Z
Np Any/All Transformation Bug
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index daa7f937cca9d..94b4f6f3ab72f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1130,6 +1130,7 @@ Groupby/Resample/Rolling - Fixed a performance regression for ``GroupBy.nth`` and ``GroupBy.last`` with some object columns (:issue:`19283`) - Bug in :func:`DataFrameGroupBy.cumsum` and :func:`DataFrameGroupBy.cumprod` when ``skipna`` was passed (:issue:`19806`) - Bug in :func:`Dataframe.resample` that dropped timezone information (:issue:`13238`) +- Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) Sparse ^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index 8907e9144b60e..0d55fa8b97aae 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -188,6 +188,8 @@ class SelectionMixin(object): builtins.sum: 'sum', builtins.max: 'max', builtins.min: 'min', + np.all: 'all', + np.any: 'any', np.sum: 'sum', np.mean: 'mean', np.prod: 'prod', diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 23326d1b105fe..390b99d0fab1c 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -723,3 +723,15 @@ def get_result(grp_obj): exp = DataFrame({'vals': exp_vals * 2}) result = get_result(grp) tm.assert_frame_equal(result, exp) + + @pytest.mark.parametrize("func", [np.any, np.all]) + def test_any_all_np_func(self, func): + # GH 20653 + df = pd.DataFrame([['foo', True], + [np.nan, True], + ['foo', True]], columns=['key', 'val']) + + exp = pd.Series([True, np.nan, True], name='val') + + res = df.groupby('key')['val'].transform(func) + tm.assert_series_equal(res, exp)
- [X] closes #20653 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry I believe this can be added to the whitelist of Cython functions as a result of #19722. I did similar changes with other agg funcs and will review separately to see if we should be adding more to this dict
https://api.github.com/repos/pandas-dev/pandas/pulls/20655
2018-04-11T07:46:28Z
2018-04-14T13:28:43Z
2018-04-14T13:28:43Z
2018-04-14T15:05:51Z
BUG: Add uint64 support to IntervalTree
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a6c92bf9faf9b..239c9b9fbe1c9 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1065,6 +1065,7 @@ Indexing - Bug in ``Index`` subclasses constructors that ignore unexpected keyword arguments (:issue:`19348`) - Bug in :meth:`Index.difference` when taking difference of an ``Index`` with itself (:issue:`20040`) - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`). +- Bug in :class:`IntervalIndex` where some indexing operations were not supported for overlapping or non-monotonic ``uint64`` data (:issue:`20636`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index b22e694c9fcca..9ed76242a95c3 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -5,7 +5,7 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in """ from numpy cimport ( - int64_t, int32_t, float64_t, float32_t, + int64_t, int32_t, float64_t, float32_t, uint64_t, ndarray, PyArray_ArgSort, NPY_QUICKSORT, PyArray_Take) import numpy as np @@ -24,6 +24,7 @@ ctypedef fused scalar_t: float32_t int64_t int32_t + uint64_t #---------------------------------------------------------------------- @@ -205,7 +206,7 @@ cdef sort_values_and_indices(all_values, all_indices, subset): {{py: nodes = [] -for dtype in ['float32', 'float64', 'int32', 'int64']: +for dtype in ['float32', 'float64', 'int32', 'int64', 'uint64']: for closed, cmp_left, cmp_right in [ ('left', '<=', '<'), ('right', '<', '<='), diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 343131125f640..056d3e1087a2e 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -12,36 +12,37 @@ def closed(request): return request.param +@pytest.fixture( + scope='class', params=['int32', 'int64', 'float32', 'float64', 'uint64']) +def dtype(request): + return request.param + + +@pytest.fixture(scope='class') +def tree(dtype): + left = np.arange(5, dtype=dtype) + return IntervalTree(left, left + 2) + + class TestIntervalTree(object): - def setup_method(self, method): - def gentree(dtype): - left = np.arange(5, dtype=dtype) - right = left + 2 - return IntervalTree(left, right) - - self.tree = gentree('int64') - self.trees = {dtype: gentree(dtype) - for dtype in ['int32', 'int64', 'float32', 'float64']} - - def test_get_loc(self): - for dtype, tree in self.trees.items(): - tm.assert_numpy_array_equal(tree.get_loc(1), - np.array([0], dtype='int64')) - tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)), - np.array([0, 1], dtype='int64')) - with pytest.raises(KeyError): - tree.get_loc(-1) - - def test_get_indexer(self): - for dtype, tree in self.trees.items(): - tm.assert_numpy_array_equal( - tree.get_indexer(np.array([1.0, 5.5, 6.5])), - np.array([0, 4, -1], dtype='int64')) - with pytest.raises(KeyError): - tree.get_indexer(np.array([3.0])) - - def test_get_indexer_non_unique(self): - indexer, missing = self.tree.get_indexer_non_unique( + + def test_get_loc(self, tree): + tm.assert_numpy_array_equal(tree.get_loc(1), + np.array([0], dtype='int64')) + tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)), + np.array([0, 1], dtype='int64')) + with pytest.raises(KeyError): + tree.get_loc(-1) + + def test_get_indexer(self, tree): + tm.assert_numpy_array_equal( + tree.get_indexer(np.array([1.0, 5.5, 6.5])), + np.array([0, 4, -1], dtype='int64')) + with pytest.raises(KeyError): + tree.get_indexer(np.array([3.0])) + + def test_get_indexer_non_unique(self, tree): + indexer, missing = tree.get_indexer_non_unique( np.array([1.0, 2.0, 6.5])) tm.assert_numpy_array_equal(indexer[:1], np.array([0], dtype='int64')) @@ -51,8 +52,9 @@ def test_get_indexer_non_unique(self): np.array([-1], dtype='int64')) tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64')) - def test_duplicates(self): - tree = IntervalTree([0, 0, 0], [1, 1, 1]) + def test_duplicates(self, dtype): + left = np.array([0, 0, 0], dtype=dtype) + tree = IntervalTree(left, left + 1) tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)), np.array([0, 1, 2], dtype='int64'))
- [X] xref https://github.com/pandas-dev/pandas/issues/20636#issuecomment-380276144 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Note that this doesn't fully close the linked issue; this just resolves the `uint64` case, which was relatively straightforward. The datetimelike case still needs to be resolved, but seems distinct enough for a different PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/20651
2018-04-11T01:05:57Z
2018-04-11T10:05:48Z
2018-04-11T10:05:48Z
2018-09-24T17:24:36Z
Fixing melt() when col_level>0 in a multi-index column
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index ce99d2f8c9a63..8ab65dfa44d88 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -24,6 +24,10 @@ other='DataFrame.melt')) def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): + if col_level is not None: # allow list or other? + frame = frame.copy() + frame.columns = frame.columns.get_level_values(col_level) + # TODO: what about the existing index? if id_vars is not None: if not is_list_like(id_vars): @@ -47,13 +51,9 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None, else: value_vars = list(value_vars) frame = frame.loc[:, id_vars + value_vars] - else: + elif col_level is None: # avoid making copy if possible frame = frame.copy() - if col_level is not None: # allow list or other? - # frame is a copy - frame.columns = frame.columns.get_level_values(col_level) - if var_name is None: if isinstance(frame.columns, ABCMultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)):
Example Input DataFrame - ``` import pandas as pd x = pd.DataFrame([[0, 1], [2, 3]], columns=[list('AB'), list('CD')]) """ A B C D 0 0 1 1 2 3 """ x.melt(col_level=1, id_vars=['C'], value_vars=['D']) ``` Current Output - ``` KeyError: 'C' ``` Desired Output - ``` C variable value 0 0 D 1 1 2 D 3 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20646
2018-04-10T05:17:33Z
2018-09-25T15:44:17Z
null
2018-09-25T15:44:17Z
MAINT: Remove console encode
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index e0f53f671017a..a101113da23ba 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -81,12 +81,7 @@ def _join_unicode(lines, sep=''): # It will always return unicode text which can handled by other # parts of the package without breakage. # -# 2) If you need to send something to the console, use console_encode(). -# -# console_encode() should (hopefully) choose the right encoding for you -# based on the encoding set in option "display.encoding" -# -# 3) if you need to write something out to file, use +# 2) if you need to write something out to file, use # pprint_thing_encoded(encoding). # # If no encoding is specified, it defaults to utf-8. Since encoding pure diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index ec34e7656e01f..c9c46d4a991ec 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -202,19 +202,3 @@ def test_enable_data_resource_formatter(self): assert formatters[mimetype].enabled # smoke test that it works self.display_formatter.format(cf) - - -# TODO: fix this broken test - -# def test_console_encode(): -# """ -# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend) -# common.console_encode should encode things as utf-8. -# """ -# if compat.PY3: -# pytest.skip - -# with tm.stdin_encoding(encoding=None): -# result = printing.console_encode(u"\u05d0") -# expected = u"\u05d0".encode('utf-8') -# assert (result == expected)
Closes #20616 Nice to finally get back to some `pandas` work!
https://api.github.com/repos/pandas-dev/pandas/pulls/20645
2018-04-10T04:39:02Z
2018-04-10T05:54:14Z
2018-04-10T05:54:13Z
2018-04-10T05:54:17Z
added check if categoricalDtype for issue #19278
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 714cd09a27294..671da44a02866 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -19,6 +19,7 @@ import pandas.core.ops as ops import pandas._libs.index as libindex from pandas.util._decorators import Appender +from pandas.api.types import is_categorical_dtype from pandas.core.sparse.array import ( make_sparse, SparseArray, @@ -129,6 +130,14 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block', 'be False.') else: + if is_categorical_dtype(data): + if dtype is not None: + data = data.astype(dtype) + if index is None: + index = data.index.view() + else: + data = data.reindex(index, copy=False) + length = len(index) if data == fill_value or (isna(data) and isna(fill_value)):
Checklist for other PRs: - [x] closes #19278 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20644
2018-04-10T03:35:57Z
2018-10-11T01:51:53Z
null
2018-10-11T01:51:53Z
Save the name of undefined variables in expressions.
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index ca0c4db4947c4..d5e1b35403469 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -34,6 +34,8 @@ class UndefinedVariableError(NameError): """NameError subclass for local variables.""" def __init__(self, name, is_local): + self.name = name + self.is_local = is_local if is_local: msg = 'local variable {0!r} is not defined' else:
This can be useful information for downstream error-handling code.
https://api.github.com/repos/pandas-dev/pandas/pulls/20637
2018-04-08T17:43:49Z
2018-11-23T03:29:33Z
null
2018-11-23T03:29:33Z
update the pandas.Series.str.repeat docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ed1111ed3558a..cc5b9ad3071ac 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -678,17 +678,45 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): def str_repeat(arr, repeats): """ - Duplicate each string in the Series/Index by indicated number - of times. + Duplicate each string repeated by indicated number of times. + + Duplicate each string in the Series/Index by indicated number of times. + A passed value of zero or negative integer will return an empty string. Parameters ---------- - repeats : int or array - Same value for all (int) or different value per (array) + repeats : int or array-like + Same value for all (int) or different value per (array). Returns ------- - repeated : Series/Index of objects + Series or Index + Series or Index of repeated string objects specified by + input parameter repeats. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c', 'd', 'e']) + + Using same value for all: + + >>> s.str.repeat(repeats=4) + 0 aaaa + 1 bbbb + 2 cccc + 3 dddd + 4 eeee + dtype: object + + Using different value per element: + + >>> s.str.repeat(repeats=[-2, -1, 0, 1, 2]) + 0 + 1 + 2 + 3 d + 4 ee + dtype: object """ if is_scalar(repeats):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant ``` ################################################################################ ##################### Docstring (pandas.Series.str.repeat) ##################### ################################################################################ Duplicate each string repeated by indicated number of times. Parameters ---------- repeats : int or array Same value for all (int) or different value per (array). Returns ------- repeated : Series/Index of objects Same type as the original object Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd', 'e']) Using same value for all: >>> s.str.repeat(4) 0 aaaa 1 bbbb 2 cccc 3 dddd 4 eeee dtype: object Using different value per element: >>> s.str.repeat([3, 2, 5, 1, 4]) 0 aaa 1 bb 2 ccccc 3 d 4 eeee dtype: object Passing zero or negative integer will return an empty string >>> s.str.repeat([0, 0, -2, -1, 0]) 0 1 2 3 4 dtype: object Notes -------- A passed value of zero or negative integer will return an empty string. See also -------- numpy.ndarray.repeat: Repeat elements of an array. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20634
2018-04-08T10:36:23Z
2018-09-18T12:41:48Z
null
2018-09-18T12:41:48Z
DOC: clean io.rst codeblocks for PEP8
diff --git a/doc/source/io.rst b/doc/source/io.rst index fd998d32cfbfb..c5b7eff292722 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -20,9 +20,9 @@ plt.close('all') import pandas.util.testing as tm - pd.options.display.max_rows=15 - clipdf = pd.DataFrame({'A':[1,2,3],'B':[4,5,6],'C':['p','q','r']}, - index=['x','y','z']) + pd.options.display.max_rows = 15 + clipdf = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': ['p', 'q', 'r']}, + index=['x', 'y', 'z']) =============================== IO Tools (Text, CSV, HDF5, ...) @@ -256,7 +256,7 @@ parse_dates : boolean or list of ints or names or list of lists or dict, default column. - If ``[[1, 3]]`` -> combine columns 1 and 3 and parse as a single date column. - - If ``{'foo' : [1, 3]}`` -> parse columns 1, 3 as date and call result 'foo'. + - If ``{'foo': [1, 3]}`` -> parse columns 1, 3 as date and call result 'foo'. A fast-path exists for iso8601-formatted dates. infer_datetime_format : boolean, default ``False`` If ``True`` and parse_dates is enabled for a column, attempt to infer the @@ -391,7 +391,7 @@ of :func:`~pandas.read_csv`: .. ipython:: python data = "col_1\n1\n2\n'A'\n4.22" - df = pd.read_csv(StringIO(data), converters={'col_1':str}) + df = pd.read_csv(StringIO(data), converters={'col_1': str}) df df['col_1'].apply(type).value_counts() @@ -789,7 +789,7 @@ The simplest case is to just pass in ``parse_dates=True``: .. ipython:: python :suppress: - f = open('foo.csv','w') + f = open('foo.csv', 'w') f.write('date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5') f.close() @@ -863,7 +863,7 @@ data columns: date_spec = {'nominal': [1, 2], 'actual': [1, 3]} df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec, - index_col=0) #index is the nominal column + index_col=0) # index is the nominal column df .. note:: @@ -1336,7 +1336,7 @@ column specifications to the `read_fwf` function along with the file name: .. ipython:: python - #Column specifications are a list of half-intervals + # Column specifications are a list of half-intervals colspecs = [(0, 6), (8, 20), (21, 33), (34, 43)] df = pd.read_fwf('bar.csv', colspecs=colspecs, header=None, index_col=0) df @@ -1347,7 +1347,7 @@ column widths for contiguous columns: .. ipython:: python - #Widths are a list of integers + # Widths are a list of integers widths = [6, 14, 13, 10] df = pd.read_fwf('bar.csv', widths=widths, header=None) df @@ -1956,7 +1956,7 @@ Specify dtypes for conversion: .. ipython:: python - pd.read_json('test.json', dtype={'A' : 'float32', 'bools' : 'int8'}).dtypes + pd.read_json('test.json', dtype={'A': 'float32', 'bools': 'int8'}).dtypes Preserve string indices: @@ -2111,8 +2111,8 @@ For line-delimited json files, pandas can also return an iterator which reads in .. ipython:: python jsonl = ''' - {"a":1,"b":2} - {"a":3,"b":4} + {"a": 1, "b": 2} + {"a": 3, "b": 4} ''' df = pd.read_json(jsonl, lines=True) df @@ -2784,14 +2784,14 @@ Using None to get all sheets: .. code-block:: python # Returns a dictionary of DataFrames - read_excel('path_to_file.xls',sheet_name=None) + read_excel('path_to_file.xls', sheet_name=None) Using a list to get multiple sheets: .. code-block:: python # Returns the 1st and 4th sheet, as a dictionary of DataFrames. - read_excel('path_to_file.xls',sheet_name=['Sheet1',3]) + read_excel('path_to_file.xls', sheet_name=['Sheet1', 3]) ``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either a list of sheet names, a list of sheet positions, or ``None`` to read all sheets. @@ -2812,10 +2812,10 @@ For example, to read in a ``MultiIndex`` index without names: .. ipython:: python - df = pd.DataFrame({'a':[1,2,3,4], 'b':[5,6,7,8]}, - index=pd.MultiIndex.from_product([['a','b'],['c','d']])) + df = pd.DataFrame({'a':[1, 2, 3, 4], 'b':[5, 6, 7, 8]}, + index=pd.MultiIndex.from_product([['a', 'b'],['c', 'd']])) df.to_excel('path_to_file.xlsx') - df = pd.read_excel('path_to_file.xlsx', index_col=[0,1]) + df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1]) df If the index has level names, they will parsed as well, using the same @@ -2834,10 +2834,9 @@ should be passed to ``index_col`` and ``header``: .. ipython:: python - df.columns = pd.MultiIndex.from_product([['a'],['b', 'd']], names=['c1', 'c2']) + df.columns = pd.MultiIndex.from_product([['a'], ['b', 'd']], names=['c1', 'c2']) df.to_excel('path_to_file.xlsx') - df = pd.read_excel('path_to_file.xlsx', - index_col=[0,1], header=[0,1]) + df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1], header=[0, 1]) df .. ipython:: python @@ -2868,7 +2867,7 @@ indices to be parsed. read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) -Element order is ignored, so ``usecols=[0,1]`` is the same as ``[1,0]``. +Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. Parsing Dates +++++++++++++ @@ -3095,7 +3094,7 @@ applications (CTRL-V on many operating systems). Here we illustrate writing a .. ipython:: python - df = pd.DataFrame(randn(5,3)) + df = pd.DataFrame(randn(5, 3)) df df.to_clipboard() pd.read_clipboard() @@ -3231,30 +3230,30 @@ both on the writing (serialization), and reading (deserialization). .. ipython:: python - df = pd.DataFrame(np.random.rand(5,2),columns=list('AB')) + df = pd.DataFrame(np.random.rand(5, 2), columns=list('AB')) df.to_msgpack('foo.msg') pd.read_msgpack('foo.msg') - s = pd.Series(np.random.rand(5),index=pd.date_range('20130101',periods=5)) + s = pd.Series(np.random.rand(5), index=pd.date_range('20130101', periods=5)) You can pass a list of objects and you will receive them back on deserialization. .. ipython:: python - pd.to_msgpack('foo.msg', df, 'foo', np.array([1,2,3]), s) + pd.to_msgpack('foo.msg', df, 'foo', np.array([1, 2, 3]), s) pd.read_msgpack('foo.msg') You can pass ``iterator=True`` to iterate over the unpacked results: .. ipython:: python - for o in pd.read_msgpack('foo.msg',iterator=True): + for o in pd.read_msgpack('foo.msg', iterator=True): print(o) You can pass ``append=True`` to the writer to append to an existing pack: .. ipython:: python - df.to_msgpack('foo.msg',append=True) + df.to_msgpack('foo.msg', append=True) pd.read_msgpack('foo.msg') Unlike other io methods, ``to_msgpack`` is available on both a per-object basis, @@ -3264,7 +3263,8 @@ pandas objects. .. ipython:: python - pd.to_msgpack('foo2.msg', { 'dict' : [ { 'df' : df }, { 'string' : 'foo' }, { 'scalar' : 1. }, { 's' : s } ] }) + pd.to_msgpack('foo2.msg', {'dict': [{ 'df': df }, {'string': 'foo'}, + {'scalar': 1.}, {'s': s}]}) pd.read_msgpack('foo2.msg') .. ipython:: python @@ -3392,8 +3392,8 @@ similar to how ``read_csv`` and ``to_csv`` work. .. ipython:: python df_tl = pd.DataFrame(dict(A=list(range(5)), B=list(range(5)))) - df_tl.to_hdf('store_tl.h5','table',append=True) - pd.read_hdf('store_tl.h5', 'table', where = ['index>2']) + df_tl.to_hdf('store_tl.h5','table', append=True) + pd.read_hdf('store_tl.h5', 'table', where=['index>2']) .. ipython:: python :suppress: @@ -3411,17 +3411,17 @@ HDFStore will by default not drop rows that are all missing. This behavior can b .. ipython:: python - df_with_missing = pd.DataFrame({'col1':[0, np.nan, 2], - 'col2':[1, np.nan, np.nan]}) + df_with_missing = pd.DataFrame({'col1': [0, np.nan, 2], + 'col2': [1, np.nan, np.nan]}) df_with_missing df_with_missing.to_hdf('file.h5', 'df_with_missing', - format = 'table', mode='w') + format='table', mode='w') pd.read_hdf('file.h5', 'df_with_missing') df_with_missing.to_hdf('file.h5', 'df_with_missing', - format = 'table', mode='w', dropna=True) + format='table', mode='w', dropna=True) pd.read_hdf('file.h5', 'df_with_missing') @@ -3434,21 +3434,21 @@ This is also true for the major axis of a ``Panel``: .. ipython:: python - matrix = [[[np.nan, np.nan, np.nan],[1,np.nan,np.nan]], - [[np.nan, np.nan, np.nan], [np.nan,5,6]], - [[np.nan, np.nan, np.nan],[np.nan,3,np.nan]]] + matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]], + [[np.nan, np.nan, np.nan], [np.nan, 5, 6]], + [[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]] - panel_with_major_axis_all_missing = pd.Panel(matrix, - items=['Item1', 'Item2','Item3'], - major_axis=[1,2], + panel_with_major_axis_all_missing=pd.Panel(matrix, + items=['Item1', 'Item2', 'Item3'], + major_axis=[1, 2], minor_axis=['A', 'B', 'C']) panel_with_major_axis_all_missing panel_with_major_axis_all_missing.to_hdf('file.h5', 'panel', - dropna = True, - format='table', - mode='w') + dropna=True, + format='table', + mode='w') reloaded = pd.read_hdf('file.h5', 'panel') reloaded @@ -3596,13 +3596,13 @@ defaults to `nan`. .. ipython:: python - df_mixed = pd.DataFrame({ 'A' : randn(8), - 'B' : randn(8), - 'C' : np.array(randn(8),dtype='float32'), - 'string' :'string', - 'int' : 1, - 'bool' : True, - 'datetime64' : pd.Timestamp('20010102')}, + df_mixed = pd.DataFrame({'A': randn(8), + 'B': randn(8), + 'C': np.array(randn(8), dtype='float32'), + 'string':'string', + 'int': 1, + 'bool': True, + 'datetime64': pd.Timestamp('20010102')}, index=list(range(8))) df_mixed.loc[df_mixed.index[3:5], ['A', 'B', 'string', 'datetime64']] = np.nan @@ -3631,7 +3631,7 @@ storing/selecting from homogeneous index ``DataFrames``. columns=['A', 'B', 'C']) df_mi - store.append('df_mi',df_mi) + store.append('df_mi', df_mi) store.select('df_mi') # the levels are automatically included as data columns @@ -3679,15 +3679,15 @@ These rules are similar to how boolean expressions are used in pandas for indexi The following are valid expressions: -- ``'index>=date'`` -- ``"columns=['A', 'D']"`` +- ``'index >= date'`` +- ``"columns = ['A', 'D']"`` - ``"columns in ['A', 'D']"`` -- ``'columns=A'`` -- ``'columns==A'`` -- ``"~(columns=['A','B'])"`` -- ``'index>df.index[3] & string="bar"'`` -- ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'`` -- ``"ts>=Timestamp('2012-02-01')"`` +- ``'columns = A'`` +- ``'columns == A'`` +- ``"~(columns = ['A', 'B'])"`` +- ``'index > df.index[3] & string = "bar"'`` +- ``'(index > df.index[3] & index <= df.index[6]) | string = "bar"'`` +- ``"ts >= Timestamp('2012-02-01')"`` - ``"major_axis>=20130101"`` The ``indexers`` are on the left-hand side of the sub-expression: @@ -3699,7 +3699,7 @@ The right-hand side of the sub-expression (after a comparison operator) can be: - functions that will be evaluated, e.g. ``Timestamp('2012-02-01')`` - strings, e.g. ``"bar"`` - date-like, e.g. ``20130101``, or ``"20130101"`` -- lists, e.g. ``"['A','B']"`` +- lists, e.g. ``"['A', 'B']"`` - variables that are defined in the local names space, e.g. ``date`` .. note:: @@ -3737,26 +3737,27 @@ Here are some examples: .. ipython:: python - dfq = pd.DataFrame(randn(10,4),columns=list('ABCD'),index=pd.date_range('20130101',periods=10)) - store.append('dfq',dfq,format='table',data_columns=True) + dfq = pd.DataFrame(randn(10, 4), columns=list('ABCD'), + index=pd.date_range('20130101', periods=10)) + store.append('dfq', dfq, format='table', data_columns=True) Use boolean expressions, with in-line function evaluation. .. ipython:: python - store.select('dfq',"index>pd.Timestamp('20130104') & columns=['A', 'B']") + store.select('dfq', "index>pd.Timestamp('20130104') & columns=['A', 'B']") Use and inline column reference .. ipython:: python - store.select('dfq',where="A>0 or C>0") + store.select('dfq', where="A>0 or C>0") Works with a Panel as well. .. ipython:: python - store.append('wp',wp) + store.append('wp', wp) store store.select('wp', "major_axis>pd.Timestamp('20000102') & minor_axis=['A', 'B']") @@ -3777,7 +3778,7 @@ space. These are in terms of the total number of rows in a table. wp.to_frame() # limiting the search - store.select('wp',"major_axis>20000102 & minor_axis=['A','B']", + store.select('wp', "major_axis>20000102 & minor_axis=['A', 'B']", start=0, stop=10) .. note:: @@ -3801,11 +3802,11 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra .. ipython:: python from datetime import timedelta - dftd = pd.DataFrame(dict(A = pd.Timestamp('20130101'), B = [ pd.Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ])) - dftd['C'] = dftd['A']-dftd['B'] + dftd = pd.DataFrame(dict(A = pd.Timestamp('20130101'), B = [ pd.Timestamp('20130101') + timedelta(days=i, seconds=10) for i in range(10) ])) + dftd['C'] = dftd['A'] - dftd['B'] dftd - store.append('dftd',dftd,data_columns=True) - store.select('dftd',"C<'-3.5D'") + store.append('dftd', dftd, data_columns=True) + store.select('dftd', "C<'-3.5D'") Indexing ++++++++ @@ -3837,10 +3838,10 @@ Oftentimes when appending large amounts of data to a store, it is useful to turn .. ipython:: python - df_1 = pd.DataFrame(randn(10,2),columns=list('AB')) - df_2 = pd.DataFrame(randn(10,2),columns=list('AB')) + df_1 = pd.DataFrame(randn(10, 2), columns=list('AB')) + df_2 = pd.DataFrame(randn(10, 2), columns=list('AB')) - st = pd.HDFStore('appends.h5',mode='w') + st = pd.HDFStore('appends.h5', mode='w') st.append('df', df_1, data_columns=['B'], index=False) st.append('df', df_2, data_columns=['B'], index=False) st.get_storer('df').table @@ -3878,15 +3879,15 @@ be ``data_columns``. df_dc = df.copy() df_dc['string'] = 'foo' - df_dc.loc[df_dc.index[4:6], 'string'] = np.nan - df_dc.loc[df_dc.index[7:9], 'string'] = 'bar' + df_dc.loc[df_dc.index[4: 6], 'string'] = np.nan + df_dc.loc[df_dc.index[7: 9], 'string'] = 'bar' df_dc['string2'] = 'cool' - df_dc.loc[df_dc.index[1:3], ['B','C']] = 1.0 + df_dc.loc[df_dc.index[1: 3], ['B', 'C']] = 1.0 df_dc # on-disk operations store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2']) - store.select('df_dc', where='B>0') + store.select('df_dc', where='B > 0') # getting creative store.select('df_dc', 'B > 0 & C > 0 & string == foo') @@ -3941,9 +3942,9 @@ chunks. store.append('dfeq', dfeq, data_columns=['number']) def chunks(l, n): - return [l[i:i+n] for i in range(0, len(l), n)] + return [l[i: i+n] for i in range(0, len(l), n)] - evens = [2,4,6,8,10] + evens = [2, 4, 6, 8, 10] coordinates = store.select_as_coordinates('dfeq', 'number=evens') for c in chunks(coordinates, 2): print(store.select('dfeq', where=c)) @@ -3975,11 +3976,12 @@ Sometimes you want to get the coordinates (a.k.a the index locations) of your qu .. ipython:: python - df_coord = pd.DataFrame(np.random.randn(1000,2),index=pd.date_range('20000101',periods=1000)) - store.append('df_coord',df_coord) - c = store.select_as_coordinates('df_coord','index>20020101') - c.summary() - store.select('df_coord',where=c) + df_coord = pd.DataFrame(np.random.randn(1000, 2), + index=pd.date_range('20000101', periods=1000)) + store.append('df_coord', df_coord) + c = store.select_as_coordinates('df_coord', 'index > 20020101') + c + store.select('df_coord', where=c) .. _io.hdf5-where_mask: @@ -3992,11 +3994,12 @@ a datetimeindex which are 5. .. ipython:: python - df_mask = pd.DataFrame(np.random.randn(1000,2),index=pd.date_range('20000101',periods=1000)) - store.append('df_mask',df_mask) - c = store.select_column('df_mask','index') - where = c[pd.DatetimeIndex(c).month==5].index - store.select('df_mask',where=where) + df_mask = pd.DataFrame(np.random.randn(1000, 2), + index=pd.date_range('20000101', periods=1000)) + store.append('df_mask', df_mask) + c = store.select_column('df_mask', 'index') + where = c[pd.DatetimeIndex(c).month == 5].index + store.select('df_mask', where=where) Storer Object ^^^^^^^^^^^^^ @@ -4095,7 +4098,7 @@ the table using a ``where`` that selects all but the missing data. .. ipython:: python # returns the number of rows deleted - store.remove('wp', 'major_axis>20000102' ) + store.remove('wp', 'major_axis > 20000102' ) store.select('wp') .. warning:: @@ -4171,7 +4174,8 @@ Enable compression for all objects within the file: .. code-block:: python - store_compressed = pd.HDFStore('store_compressed.h5', complevel=9, complib='blosc:blosclz') + store_compressed = pd.HDFStore('store_compressed.h5', complevel=9, + complib='blosc:blosclz') Or on-the-fly compression (this only applies to tables) in stores where compression is not enabled: @@ -4266,13 +4270,13 @@ stored in a more efficient manner. .. ipython:: python - dfcat = pd.DataFrame({ 'A' : pd.Series(list('aabbcdba')).astype('category'), - 'B' : np.random.randn(8) }) + dfcat = pd.DataFrame({'A': pd.Series(list('aabbcdba')).astype('category'), + 'B': np.random.randn(8) }) dfcat dfcat.dtypes cstore = pd.HDFStore('cats.h5', mode='w') cstore.append('dfcat', dfcat, format='table', data_columns=['A']) - result = cstore.select('dfcat', where="A in ['b','c']") + result = cstore.select('dfcat', where="A in ['b', 'c']") result result.dtypes @@ -4309,16 +4313,16 @@ Passing a ``min_itemsize`` dict will cause all passed columns to be created as * .. ipython:: python - dfs = pd.DataFrame(dict(A = 'foo', B = 'bar'),index=list(range(5))) + dfs = pd.DataFrame(dict(A='foo', B='bar'), index=list(range(5))) dfs # A and B have a size of 30 - store.append('dfs', dfs, min_itemsize = 30) + store.append('dfs', dfs, min_itemsize=30) store.get_storer('dfs').table # A is created as a data_column with a size of 30 # B is size is calculated - store.append('dfs2', dfs, min_itemsize = { 'A' : 30 }) + store.append('dfs2', dfs, min_itemsize={'A': 30}) store.get_storer('dfs2').table **nan_rep** @@ -4328,7 +4332,7 @@ You could inadvertently turn an actual ``nan`` value into a missing value. .. ipython:: python - dfss = pd.DataFrame(dict(A = ['foo','bar','nan'])) + dfss = pd.DataFrame(dict(A=['foo', 'bar', 'nan'])) dfss store.append('dfss', dfss) @@ -4358,7 +4362,7 @@ It is possible to write an ``HDFStore`` object that can easily be imported into np.random.seed(1) df_for_r = pd.DataFrame({"first": np.random.rand(100), "second": np.random.rand(100), - "class": np.random.randint(0, 2, (100,))}, + "class": np.random.randint(0, 2, (100, ))}, index=range(100)) df_for_r.head() @@ -4596,7 +4600,8 @@ Read only certain columns of a parquet file. .. ipython:: python - result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) + result = pd.read_parquet('example_fp.parquet', + engine='fastparquet', columns=['a', 'b']) result.dtypes @@ -4846,7 +4851,8 @@ variant appropriate for your database. from pandas.io import sql sql.execute('SELECT * FROM table_name', engine) - sql.execute('INSERT INTO table_name VALUES(?, ?, ?)', engine, params=[('id', 1, 12.2, True)]) + sql.execute('INSERT INTO table_name VALUES(?, ?, ?)', engine, + params=[('id', 1, 12.2, True)]) Engine connection examples @@ -4888,7 +4894,8 @@ Use :func:`sqlalchemy.text` to specify query parameters in a backend-neutral way .. ipython:: python import sqlalchemy as sa - pd.read_sql(sa.text('SELECT * FROM data where Col_1=:col1'), engine, params={'col1': 'X'}) + pd.read_sql(sa.text('SELECT * FROM data where Col_1=:col1'), + engine, params={'col1': 'X'}) If you have an SQLAlchemy description of your database you can express where conditions using SQLAlchemy expressions @@ -5306,34 +5313,34 @@ And here's the code: sql_db.close() def test_hdf_fixed_write(df): - df.to_hdf('test_fixed.hdf','test',mode='w') + df.to_hdf('test_fixed.hdf', 'test', mode='w') def test_hdf_fixed_read(): - pd.read_hdf('test_fixed.hdf','test') + pd.read_hdf('test_fixed.hdf', 'test') def test_hdf_fixed_write_compress(df): - df.to_hdf('test_fixed_compress.hdf','test',mode='w',complib='blosc') + df.to_hdf('test_fixed_compress.hdf', 'test', mode='w', complib='blosc') def test_hdf_fixed_read_compress(): - pd.read_hdf('test_fixed_compress.hdf','test') + pd.read_hdf('test_fixed_compress.hdf', 'test') def test_hdf_table_write(df): - df.to_hdf('test_table.hdf','test',mode='w',format='table') + df.to_hdf('test_table.hdf', 'test', mode='w', format='table') def test_hdf_table_read(): - pd.read_hdf('test_table.hdf','test') + pd.read_hdf('test_table.hdf', 'test') def test_hdf_table_write_compress(df): - df.to_hdf('test_table_compress.hdf','test',mode='w',complib='blosc',format='table') + df.to_hdf('test_table_compress.hdf', 'test', mode='w', complib='blosc', format='table') def test_hdf_table_read_compress(): - pd.read_hdf('test_table_compress.hdf','test') + pd.read_hdf('test_table_compress.hdf', 'test') def test_csv_write(df): - df.to_csv('test.csv',mode='w') + df.to_csv('test.csv', mode='w') def test_csv_read(): - pd.read_csv('test.csv',index_col=0) + pd.read_csv('test.csv', index_col=0) def test_feather_write(df): df.to_feather('test.feather')
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20508 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20632
2018-04-08T01:40:44Z
2018-04-11T02:26:32Z
2018-04-11T02:26:32Z
2018-08-02T14:03:55Z
DOC: Updated the docstring of pandas.Series.str.get
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 23c891ec4fcd0..ad8aaf41ef153 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1651,17 +1651,34 @@ def str_translate(arr, table, deletechars=None): def str_get(arr, i): """ + Extract element for each component. + Extract element from lists, tuples, or strings in each element in the Series/Index. Parameters ---------- i : int - Integer index (location) + Integer index (location). Returns ------- items : Series/Index of objects + + Examples + -------- + >>> x = pd.Series(["String", (1, 2, 4), ["a", "b", "c"]]) + >>> x + 0 String + 1 (1, 2, 4) + 2 [a, b, c] + dtype: object + + >>> x.str.get(1) + 0 t + 1 2 + 2 b + dtype: object """ f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan return _na_map(f, arr)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ###################### Docstring (pandas.Series.str.get) ###################### ################################################################################ Extract element for each component. Extract element from lists, tuples, or strings in each element in the Series/Index. Parameters ---------- i : int Integer index (location). Returns ------- items : Series/Index of objects Examples -------- >>> x = pd.Series(["String", (1, 2, 4), ["a", "b", "c"]]) >>> x 0 String 1 (1, 2, 4) 2 [a, b, c] dtype: object >>> x.str.get(1) 0 t 1 2 2 b dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20630
2018-04-07T18:06:51Z
2018-04-12T13:15:03Z
null
2018-04-24T04:36:26Z
DOC: update the pandas.Series.str.strip docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 23c891ec4fcd0..8fa56c797fe4b 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2094,8 +2094,46 @@ def encode(self, encoding, errors="strict"): return self._wrap_result(result) _shared_docs['str_strip'] = (""" - Strip whitespace (including newlines) from each string in the - Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`. + Strip whitespaces from string in Series. + + Strip whitespace (including newlines) or given string from + each string in the Series/Index from %(side)s. Equivalent to + :meth:`str.%(method)s`. + + Parameters + ---------- + to_strip : str or unicode + String or unicode to strip in the given string. + + Examples + -------- + >>> # strip method + >>> s = pd.Series([' This is a Test 1 ']) + >>> s + 0 This is a Test 1 + dtype: object + >>> s = s.str.strip() + >>> s + 0 This is a Test 1 + dtype: object + >>> # lstrip method + >>> s1 = pd.Series(['This is another Test']) + >>> s1 + 0 This is another Test + dtype: object + >>> s1 = s1.str.lstrip('This') + >>> s1 + 0 is another Test + dtype: object + >>> # rstrip method + >>> s2 = pd.Series(['This is the last test']) + >>> s2 + 0 This is the last test + dtype: object + >>> s2 = s2.str.rstrip('test') + >>> s2 + 0 This is the last + dtype: object Returns -------
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ##################### Docstring (pandas.Series.str.strip) ##################### ################################################################################ Strip whitespaces from string in Series. Strip whitespace (including newlines) or given string from each string in the Series/Index from left and right sides. Equivalent to :meth:`str.strip`. Parameters ---------- to_strip : str or unicode String or unicode to strip in the given string. Examples -------- >>> # strip method >>> s = pd.Series([' This is a Test 1 ']) >>> s 0 This is a Test 1 dtype: object >>> s = s.str.strip() >>> s 0 This is a Test 1 dtype: object >>> # lstrip method >>> s1 = pd.Series(['This is another Test']) >>> s1 0 This is another Test dtype: object >>> s1 = s1.str.lstrip('This') >>> s1 0 is another Test dtype: object >>> # rstrip method >>> s2 = pd.Series(['This is the last test']) >>> s2 0 This is the last test dtype: object >>> s2 = s2.str.rstrip('test') >>> s2 0 This is the last dtype: object Returns ------- stripped : Series/Index of objects ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20628
2018-04-07T17:01:36Z
2018-07-07T22:14:37Z
null
2018-07-07T22:14:38Z
TST: Parametrized index tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index e78f565b0a9af..559b5e44631b6 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1,7 +1,7 @@ import pytest -import numpy -import pandas +import numpy as np +import pandas as pd import pandas.util._test_decorators as td @@ -36,15 +36,15 @@ def pytest_runtest_setup(item): @pytest.fixture(autouse=True) def configure_tests(): - pandas.set_option('chained_assignment', 'raise') + pd.set_option('chained_assignment', 'raise') # For running doctests: make np and pd names available @pytest.fixture(autouse=True) def add_imports(doctest_namespace): - doctest_namespace['np'] = numpy - doctest_namespace['pd'] = pandas + doctest_namespace['np'] = np + doctest_namespace['pd'] = pd @pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']) @@ -89,6 +89,14 @@ def join_type(request): return request.param +@pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')]) +def nulls_fixture(request): + """ + Fixture for each null type in pandas + """ + return request.param + + TIMEZONES = [None, 'UTC', 'US/Eastern', 'Asia/Tokyo', 'dateutil/US/Pacific'] diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7e19de4cca292..68c355651fdf5 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -77,17 +77,20 @@ def test_copy_and_deepcopy(self, indices): new_copy2 = self.intIndex.copy(dtype=int) assert new_copy2.dtype.kind == 'i' - def test_constructor(self): + @pytest.mark.parametrize("attr", ['strIndex', 'dateIndex']) + def test_constructor_regular(self, attr): # regular instance creation - tm.assert_contains_all(self.strIndex, self.strIndex) - tm.assert_contains_all(self.dateIndex, self.dateIndex) + idx = getattr(self, attr) + tm.assert_contains_all(idx, idx) + def test_constructor_casting(self): # casting arr = np.array(self.strIndex) index = Index(arr) tm.assert_contains_all(arr, index) tm.assert_index_equal(self.strIndex, index) + def test_constructor_copy(self): # copy arr = np.array(self.strIndex) index = Index(arr, copy=True, name='name') @@ -105,16 +108,14 @@ def test_constructor_corner(self): # corner case pytest.raises(TypeError, Index, 0) - def test_construction_list_mixed_tuples(self): + @pytest.mark.parametrize("idx_vals", [ + [('A', 1), 'B'], ['B', ('A', 1)]]) + def test_construction_list_mixed_tuples(self, idx_vals): # see gh-10697: if we are constructing from a mixed list of tuples, # make sure that we are independent of the sorting order. - idx1 = Index([('A', 1), 'B']) - assert isinstance(idx1, Index) - assert not isinstance(idx1, MultiIndex) - - idx2 = Index(['B', ('A', 1)]) - assert isinstance(idx2, Index) - assert not isinstance(idx2, MultiIndex) + idx = Index(idx_vals) + assert isinstance(idx, Index) + assert not isinstance(idx, MultiIndex) @pytest.mark.parametrize('na_value', [None, np.nan]) @pytest.mark.parametrize('vtype', [list, tuple, iter]) @@ -125,88 +126,85 @@ def test_construction_list_tuples_nan(self, na_value, vtype): expected = MultiIndex.from_tuples(values) tm.assert_index_equal(result, expected) - def test_constructor_from_index_datetimetz(self): - idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3, - tz='US/Eastern') - result = pd.Index(idx) - tm.assert_index_equal(result, idx) - assert result.tz == idx.tz - - result = pd.Index(idx.astype(object)) - tm.assert_index_equal(result, idx) - assert result.tz == idx.tz - - def test_constructor_from_index_timedelta(self): - idx = pd.timedelta_range('1 days', freq='D', periods=3) - result = pd.Index(idx) - tm.assert_index_equal(result, idx) - - result = pd.Index(idx.astype(object)) - tm.assert_index_equal(result, idx) - - def test_constructor_from_index_period(self): - idx = pd.period_range('2015-01-01', freq='D', periods=3) - result = pd.Index(idx) - tm.assert_index_equal(result, idx) + @pytest.mark.parametrize("cast_as_obj", [True, False]) + @pytest.mark.parametrize("idx", [ + pd.date_range('2015-01-01 10:00', freq='D', periods=3, + tz='US/Eastern'), # DTI with tz + pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz + pd.timedelta_range('1 days', freq='D', periods=3), # td + pd.period_range('2015-01-01', freq='D', periods=3) # period + ]) + def test_constructor_from_index_dtlike(self, cast_as_obj, idx): + if cast_as_obj: + result = pd.Index(idx.astype(object)) + else: + result = pd.Index(idx) - result = pd.Index(idx.astype(object)) tm.assert_index_equal(result, idx) - def test_constructor_from_series_datetimetz(self): - idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3, - tz='US/Eastern') - result = pd.Index(pd.Series(idx)) - tm.assert_index_equal(result, idx) - assert result.tz == idx.tz + if isinstance(idx, pd.DatetimeIndex) and hasattr(idx, 'tz'): + assert result.tz == idx.tz - def test_constructor_from_series_timedelta(self): - idx = pd.timedelta_range('1 days', freq='D', periods=3) - result = pd.Index(pd.Series(idx)) - tm.assert_index_equal(result, idx) - - def test_constructor_from_series_period(self): - idx = pd.period_range('2015-01-01', freq='D', periods=3) + @pytest.mark.parametrize("idx,has_tz", [ + (pd.date_range('2015-01-01 10:00', freq='D', periods=3, + tz='US/Eastern'), True), # datetimetz + (pd.timedelta_range('1 days', freq='D', periods=3), False), # td + (pd.period_range('2015-01-01', freq='D', periods=3), False) # period + ]) + def test_constructor_from_series_dtlike(self, idx, has_tz): result = pd.Index(pd.Series(idx)) tm.assert_index_equal(result, idx) - def test_constructor_from_series(self): + if has_tz: + assert result.tz == idx.tz + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_from_series(self, klass): expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')]) s = Series([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')]) - result = Index(s) - tm.assert_index_equal(result, expected) - result = DatetimeIndex(s) + result = klass(s) tm.assert_index_equal(result, expected) + def test_constructor_from_series_freq(self): # GH 6273 # create from a series, passing a freq - s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', - '4-1-1990', '5-1-1990'])) + dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'] + expected = DatetimeIndex(dts, freq='MS') + + s = Series(pd.to_datetime(dts)) result = DatetimeIndex(s, freq='MS') - expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', - '4-1-1990', '5-1-1990'], freq='MS') + tm.assert_index_equal(result, expected) + def test_constructor_from_frame_series_freq(self): + # GH 6273 + # create from a series, passing a freq + dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'] + expected = DatetimeIndex(dts, freq='MS') + df = pd.DataFrame(np.random.rand(5, 3)) - df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', - '5-1-1990'] + df['date'] = dts result = DatetimeIndex(df['date'], freq='MS') - expected.name = 'date' - tm.assert_index_equal(result, expected) assert df['date'].dtype == object - exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', - '5-1-1990'], name='date') + expected.name = 'date' + exp = pd.Series(dts, name='date') tm.assert_series_equal(df['date'], exp) # GH 6274 # infer freq of same - result = pd.infer_freq(df['date']) - assert result == 'MS' + freq = pd.infer_freq(df['date']) + assert freq == 'MS' - def test_constructor_ndarray_like(self): + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("array", [ + np.arange(5), np.array(['a', 'b', 'c']), date_range( + '2000-01-01', periods=3).values + ]) + def test_constructor_ndarray_like(self, array): # GH 5460#issuecomment-44474502 # it should be possible to convert any object that satisfies the numpy # ndarray interface directly into an Index @@ -217,11 +215,9 @@ def __init__(self, array): def __array__(self, dtype=None): return self.array - for array in [np.arange(5), np.array(['a', 'b', 'c']), - date_range('2000-01-01', periods=3).values]: - expected = pd.Index(array) - result = pd.Index(ArrayLike(array)) - tm.assert_index_equal(result, expected) + expected = pd.Index(array) + result = pd.Index(ArrayLike(array)) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize('dtype', [ int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32', @@ -240,58 +236,59 @@ def test_constructor_int_dtype_float(self, dtype): def test_constructor_int_dtype_nan(self): # see gh-15187 data = [np.nan] - msg = "cannot convert" - - with tm.assert_raises_regex(ValueError, msg): - Index(data, dtype='int64') - - with tm.assert_raises_regex(ValueError, msg): - Index(data, dtype='uint64') - - # This, however, should not break - # because NaN is float. expected = Float64Index(data) result = Index(data, dtype='float') tm.assert_index_equal(result, expected) - def test_index_ctor_infer_nan_nat(self): + @pytest.mark.parametrize("dtype", ['int64', 'uint64']) + def test_constructor_int_dtype_nan_raises(self, dtype): + # see gh-15187 + data = [np.nan] + msg = "cannot convert" + with tm.assert_raises_regex(ValueError, msg): + Index(data, dtype=dtype) + + @pytest.mark.parametrize("klass,dtype,na_val", [ + (pd.Float64Index, np.float64, np.nan), + (pd.DatetimeIndex, 'datetime64[ns]', pd.NaT) + ]) + def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): # GH 13467 - exp = pd.Float64Index([np.nan, np.nan]) - assert exp.dtype == np.float64 - tm.assert_index_equal(Index([np.nan, np.nan]), exp) - tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp) - - exp = pd.DatetimeIndex([pd.NaT, pd.NaT]) - assert exp.dtype == 'datetime64[ns]' - tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp) - tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp) - - exp = pd.DatetimeIndex([pd.NaT, pd.NaT]) - assert exp.dtype == 'datetime64[ns]' - - for data in [[pd.NaT, np.nan], [np.nan, pd.NaT], - [np.nan, np.datetime64('nat')], - [np.datetime64('nat'), np.nan]]: - tm.assert_index_equal(Index(data), exp) - tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) - - exp = pd.TimedeltaIndex([pd.NaT, pd.NaT]) - assert exp.dtype == 'timedelta64[ns]' - - for data in [[np.nan, np.timedelta64('nat')], - [np.timedelta64('nat'), np.nan], - [pd.NaT, np.timedelta64('nat')], - [np.timedelta64('nat'), pd.NaT]]: - tm.assert_index_equal(Index(data), exp) - tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) + na_list = [na_val, na_val] + exp = klass(na_list) + assert exp.dtype == dtype + + result = Index(na_list) + tm.assert_index_equal(result, exp) + + result = Index(np.array(na_list)) + tm.assert_index_equal(result, exp) + + @pytest.mark.parametrize("pos", [0, 1]) + @pytest.mark.parametrize("klass,dtype,ctor", [ + (pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')), + (pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat')) + ]) + def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor, + nulls_fixture): + exp = klass([pd.NaT, pd.NaT]) + assert exp.dtype == dtype + data = [ctor] + data.insert(pos, nulls_fixture) + + result = Index(data) + tm.assert_index_equal(result, exp) + + result = Index(np.array(data, dtype=object)) + tm.assert_index_equal(result, exp) + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_index_ctor_nat_result(self, swap_objs): # mixed np.datetime64/timedelta64 nat results in object data = [np.datetime64('nat'), np.timedelta64('nat')] - exp = pd.Index(data, dtype=object) - tm.assert_index_equal(Index(data), exp) - tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) + if swap_objs: + data = data[::-1] - data = [np.timedelta64('nat'), np.datetime64('nat')] exp = pd.Index(data, dtype=object) tm.assert_index_equal(Index(data), exp) tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) @@ -302,119 +299,122 @@ def test_index_ctor_infer_periodindex(self): tm.assert_index_equal(rs, xp) assert isinstance(rs, PeriodIndex) - def test_constructor_simple_new(self): - idx = Index([1, 2, 3, 4, 5], name='int') - result = idx._simple_new(idx, 'int') + @pytest.mark.parametrize("vals,dtype", [ + ([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'), + (['A', 'B', 'C', np.nan], 'obj') + ]) + def test_constructor_simple_new(self, vals, dtype): + idx = Index(vals, name=dtype) + result = idx._simple_new(idx, dtype) tm.assert_index_equal(result, idx) - idx = Index([1.1, np.nan, 2.2, 3.0], name='float') - result = idx._simple_new(idx, 'float') - tm.assert_index_equal(result, idx) + @pytest.mark.parametrize("vals", [ + [1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int), + # below should coerce + [1., 2., 3.], np.array([1., 2., 3.], dtype=float) + ]) + def test_constructor_dtypes_to_int64(self, vals): + idx = Index(vals, dtype=int) + assert isinstance(idx, Int64Index) - idx = Index(['A', 'B', 'C', np.nan], name='obj') - result = idx._simple_new(idx, 'obj') - tm.assert_index_equal(result, idx) + @pytest.mark.parametrize("vals", [ + [1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]), + np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float) + ]) + def test_constructor_dtypes_to_float64(self, vals): + idx = Index(vals, dtype=float) + assert isinstance(idx, Float64Index) - def test_constructor_dtypes(self): - - for idx in [Index(np.array([1, 2, 3], dtype=int)), - Index(np.array([1, 2, 3], dtype=int), dtype=int), - Index([1, 2, 3], dtype=int)]: - assert isinstance(idx, Int64Index) - - # These should coerce - for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int), - Index([1., 2., 3.], dtype=int)]: - assert isinstance(idx, Int64Index) - - for idx in [Index(np.array([1., 2., 3.], dtype=float)), - Index(np.array([1, 2, 3], dtype=int), dtype=float), - Index(np.array([1., 2., 3.], dtype=float), dtype=float), - Index([1, 2, 3], dtype=float), - Index([1., 2., 3.], dtype=float)]: - assert isinstance(idx, Float64Index) - - for idx in [Index(np.array([True, False, True], dtype=bool)), - Index([True, False, True]), - Index(np.array([True, False, True], dtype=bool), - dtype=bool), - Index([True, False, True], dtype=bool)]: - assert isinstance(idx, Index) - assert idx.dtype == object + @pytest.mark.parametrize("cast_idx", [True, False]) + @pytest.mark.parametrize("vals", [ + [True, False, True], np.array([True, False, True], dtype=bool) + ]) + def test_constructor_dtypes_to_object(self, cast_idx, vals): + if cast_idx: + idx = Index(vals, dtype=bool) + else: + idx = Index(vals) - for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'), - Index([1, 2, 3], dtype='category'), - Index(np.array([np_datetime64_compat('2011-01-01'), - np_datetime64_compat('2011-01-02')]), - dtype='category'), - Index([datetime(2011, 1, 1), datetime(2011, 1, 2)], - dtype='category')]: - assert isinstance(idx, CategoricalIndex) - - for idx in [Index(np.array([np_datetime64_compat('2011-01-01'), - np_datetime64_compat('2011-01-02')])), - Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]: - assert isinstance(idx, DatetimeIndex) + assert isinstance(idx, Index) + assert idx.dtype == object - for idx in [Index(np.array([np_datetime64_compat('2011-01-01'), - np_datetime64_compat('2011-01-02')]), - dtype=object), - Index([datetime(2011, 1, 1), - datetime(2011, 1, 2)], dtype=object)]: - assert not isinstance(idx, DatetimeIndex) + @pytest.mark.parametrize("vals", [ + [1, 2, 3], np.array([1, 2, 3], dtype=int), + np.array([np_datetime64_compat('2011-01-01'), + np_datetime64_compat('2011-01-02')]), + [datetime(2011, 1, 1), datetime(2011, 1, 2)] + ]) + def test_constructor_dtypes_to_categorical(self, vals): + idx = Index(vals, dtype='category') + assert isinstance(idx, CategoricalIndex) + + @pytest.mark.parametrize("cast_idx", [True, False]) + @pytest.mark.parametrize("vals", [ + Index(np.array([np_datetime64_compat('2011-01-01'), + np_datetime64_compat('2011-01-02')])), + Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]) + + ]) + def test_constructor_dtypes_to_datetime(self, cast_idx, vals): + if cast_idx: + idx = Index(vals, dtype=object) assert isinstance(idx, Index) assert idx.dtype == object + else: + idx = Index(vals) + assert isinstance(idx, DatetimeIndex) - for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64( - 1, 'D')])), Index([timedelta(1), timedelta(1)])]: - assert isinstance(idx, TimedeltaIndex) - - for idx in [Index(np.array([np.timedelta64(1, 'D'), - np.timedelta64(1, 'D')]), dtype=object), - Index([timedelta(1), timedelta(1)], dtype=object)]: - assert not isinstance(idx, TimedeltaIndex) + @pytest.mark.parametrize("cast_idx", [True, False]) + @pytest.mark.parametrize("vals", [ + np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]), + [timedelta(1), timedelta(1)] + ]) + def test_constructor_dtypes_to_timedelta(self, cast_idx, vals): + if cast_idx: + idx = Index(vals, dtype=object) assert isinstance(idx, Index) assert idx.dtype == object + else: + idx = Index(vals) + assert isinstance(idx, TimedeltaIndex) - def test_constructor_dtypes_datetime(self, tz_naive_fixture): - tz = tz_naive_fixture - idx = pd.date_range('2011-01-01', periods=5, tz=tz) + @pytest.mark.parametrize("values", [ + # pass values without timezone, as DatetimeIndex localizes it + pd.date_range('2011-01-01', periods=5).values, + pd.date_range('2011-01-01', periods=5).asi8]) + @pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex]) + def test_constructor_dtypes_datetime(self, tz_naive_fixture, values, + klass): + idx = pd.date_range('2011-01-01', periods=5, tz=tz_naive_fixture) dtype = idx.dtype - # pass values without timezone, as DatetimeIndex localizes it - for values in [pd.date_range('2011-01-01', periods=5).values, - pd.date_range('2011-01-01', periods=5).asi8]: + res = klass(values, tz=tz_naive_fixture) + tm.assert_index_equal(res, idx) - for res in [pd.Index(values, tz=tz), - pd.Index(values, dtype=dtype), - pd.Index(list(values), tz=tz), - pd.Index(list(values), dtype=dtype)]: - tm.assert_index_equal(res, idx) + res = klass(values, dtype=dtype) + tm.assert_index_equal(res, idx) - # check compat with DatetimeIndex - for res in [pd.DatetimeIndex(values, tz=tz), - pd.DatetimeIndex(values, dtype=dtype), - pd.DatetimeIndex(list(values), tz=tz), - pd.DatetimeIndex(list(values), dtype=dtype)]: - tm.assert_index_equal(res, idx) + res = klass(list(values), tz=tz_naive_fixture) + tm.assert_index_equal(res, idx) - def test_constructor_dtypes_timedelta(self): + res = klass(list(values), dtype=dtype) + tm.assert_index_equal(res, idx) + @pytest.mark.parametrize("attr", ['values', 'asi8']) + @pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex]) + def test_constructor_dtypes_timedelta(self, attr, klass): idx = pd.timedelta_range('1 days', periods=5) dtype = idx.dtype - for values in [idx.values, idx.asi8]: + values = getattr(idx, attr) - for res in [pd.Index(values, dtype=dtype), - pd.Index(list(values), dtype=dtype)]: - tm.assert_index_equal(res, idx) + res = klass(values, dtype=dtype) + tm.assert_index_equal(res, idx) - # check compat with TimedeltaIndex - for res in [pd.TimedeltaIndex(values, dtype=dtype), - pd.TimedeltaIndex(list(values), dtype=dtype)]: - tm.assert_index_equal(res, idx) + res = klass(list(values), dtype=dtype) + tm.assert_index_equal(res, idx) - def test_constructor_empty(self): + def test_constructor_empty_gen(self): skip_index_keys = ["repeats", "periodIndex", "rangeIndex", "tuples"] for key, idx in self.generate_index_types(skip_index_keys): @@ -422,17 +422,14 @@ def test_constructor_empty(self): assert isinstance(empty, idx.__class__) assert not len(empty) - empty = PeriodIndex([], freq='B') - assert isinstance(empty, PeriodIndex) - assert not len(empty) - - empty = RangeIndex(step=1) - assert isinstance(empty, pd.RangeIndex) - assert not len(empty) - - empty = MultiIndex(levels=[[1, 2], ['blue', 'red']], - labels=[[], []]) - assert isinstance(empty, MultiIndex) + @pytest.mark.parametrize("empty,klass", [ + (PeriodIndex([], freq='B'), PeriodIndex), + (RangeIndex(step=1), pd.RangeIndex), + (MultiIndex(levels=[[1, 2], ['blue', 'red']], + labels=[[], []]), MultiIndex) + ]) + def test_constructor_empty(self, empty, klass): + assert isinstance(empty, klass) assert not len(empty) def test_view_with_args(self): @@ -468,14 +465,10 @@ def test_equals_object(self): # same assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])) - # different length - assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b'])) - - # same length, different values - assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])) - - # Must also be an Index - assert not Index(['a', 'b', 'c']).equals(['a', 'b', 'c']) + @pytest.mark.parametrize("comp", [ + Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']]) + def test_not_equals_object(self, comp): + assert not Index(['a', 'b', 'c']).equals(comp) def test_insert(self): @@ -498,28 +491,27 @@ def test_insert(self): null_index = Index([]) tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a')) + def test_insert_missing(self, nulls_fixture): # GH 18295 (test missing) expected = Index(['a', np.nan, 'b', 'c']) - for na in (np.nan, pd.NaT, None): - result = Index(list('abc')).insert(1, na) - tm.assert_index_equal(result, expected) - - def test_delete(self): - idx = Index(['a', 'b', 'c', 'd'], name='idx') - - expected = Index(['b', 'c', 'd'], name='idx') - result = idx.delete(0) + result = Index(list('abc')).insert(1, nulls_fixture) tm.assert_index_equal(result, expected) - assert result.name == expected.name - expected = Index(['a', 'b', 'c'], name='idx') - result = idx.delete(-1) - tm.assert_index_equal(result, expected) - assert result.name == expected.name + @pytest.mark.parametrize("pos,exp", [ + (0, Index(['b', 'c', 'd'], name='idx')), + (-1, Index(['a', 'b', 'c'], name='idx')) + ]) + def test_delete(self, pos, exp): + idx = Index(['a', 'b', 'c', 'd'], name='idx') + result = idx.delete(pos) + tm.assert_index_equal(result, exp) + assert result.name == exp.name + def test_delete_raises(self): + idx = Index(['a', 'b', 'c', 'd'], name='idx') with pytest.raises((IndexError, ValueError)): # either depending on numpy version - result = idx.delete(5) + idx.delete(5) def test_identical(self): @@ -595,26 +587,21 @@ def test_nanosecond_index_access(self): 'ns') assert first_value == x[Timestamp(exp_ts)] - def test_comparators(self): + @pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.gt, operator.lt, + operator.ge, operator.le + ]) + def test_comparators(self, op): index = self.dateIndex element = index[len(index) // 2] element = _to_m8(element) arr = np.array(index) + arr_result = op(arr, element) + index_result = op(index, element) - def _check(op): - arr_result = op(arr, element) - index_result = op(index, element) - - assert isinstance(index_result, np.ndarray) - tm.assert_numpy_array_equal(arr_result, index_result) - - _check(operator.eq) - _check(operator.ne) - _check(operator.gt) - _check(operator.lt) - _check(operator.ge) - _check(operator.le) + assert isinstance(index_result, np.ndarray) + tm.assert_numpy_array_equal(arr_result, index_result) def test_booleanindex(self): boolIdx = np.repeat(True, len(self.strIndex)).astype(bool) @@ -634,31 +621,34 @@ def test_fancy(self): for i in sl: assert i == sl[sl.get_loc(i)] - def test_empty_fancy(self): - empty_farr = np.array([], dtype=np.float_) - empty_iarr = np.array([], dtype=np.int_) - empty_barr = np.array([], dtype=np.bool_) + @pytest.mark.parametrize("attr", [ + 'strIndex', 'intIndex', 'floatIndex']) + @pytest.mark.parametrize("dtype", [np.int_, np.bool_]) + def test_empty_fancy(self, attr, dtype): + empty_arr = np.array([], dtype=dtype) + idx = getattr(self, attr) + empty_idx = idx.__class__([]) + assert idx[[]].identical(empty_idx) + assert idx[empty_arr].identical(empty_idx) + + @pytest.mark.parametrize("attr", [ + 'strIndex', 'intIndex', 'floatIndex']) + def test_empty_fancy_raises(self, attr): # pd.DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. - for idx in [self.strIndex, self.intIndex, self.floatIndex]: - empty_idx = idx.__class__([]) - - assert idx[[]].identical(empty_idx) - assert idx[empty_iarr].identical(empty_idx) - assert idx[empty_barr].identical(empty_idx) - - # np.ndarray only accepts ndarray of int & bool dtypes, so should - # Index. - pytest.raises(IndexError, idx.__getitem__, empty_farr) - - def test_getitem_error(self, indices): + empty_farr = np.array([], dtype=np.float_) + idx = getattr(self, attr) + empty_idx = idx.__class__([]) - with pytest.raises(IndexError): - indices[101] + assert idx[[]].identical(empty_idx) + # np.ndarray only accepts ndarray of int & bool dtypes, so should Index + pytest.raises(IndexError, idx.__getitem__, empty_farr) + @pytest.mark.parametrize("itm", [101, 'no_int']) + def test_getitem_error(self, indices, itm): with pytest.raises(IndexError): - indices['no_int'] + indices[itm] def test_intersection(self): first = self.strIndex[:20]
I came across this module on another change and noticed that a lot of the tests could really use refactoring. There's a ton more to be done with this module but submitting as is so it doesn't get too large. Can either add other commits on top of this or have this merged (assuming looks OK) and continue down the module in additional PR(s)
https://api.github.com/repos/pandas-dev/pandas/pulls/20624
2018-04-06T16:41:31Z
2018-04-24T10:12:29Z
2018-04-24T10:12:29Z
2018-04-24T15:44:54Z
BUG: excel index label can be an array
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 76ffd41f93090..2cc14994311f8 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -497,12 +497,13 @@ def _format_regular_rows(self): if self.index: # check aliases # if list only take first as this is not a MultiIndex - if (self.index_label and + if (self.index_label is not None and isinstance(self.index_label, (list, tuple, np.ndarray, Index))): index_label = self.index_label[0] # if string good to go - elif self.index_label and isinstance(self.index_label, str): + elif (self.index_label is not None and + isinstance(self.index_label, str)): index_label = self.index_label else: index_label = self.df.index.names[0] @@ -539,7 +540,7 @@ def _format_hierarchical_rows(self): if self.index: index_labels = self.df.index.names # check for aliases - if (self.index_label and + if (self.index_label is not None and isinstance(self.index_label, (list, tuple, np.ndarray, Index))): index_labels = self.index_label
Fixes "ValueError: The truth value of an array with more than one element is ambiguous." where self.index_label could be a list, tuple np.ndarray, or Index type closes #20622
https://api.github.com/repos/pandas-dev/pandas/pulls/20623
2018-04-06T15:09:03Z
2018-09-25T15:43:47Z
null
2018-09-25T15:43:47Z
changes
diff --git a/doc/source/io.rst b/doc/source/io.rst index ff505f525fc22..c7f4b63b981fa 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3978,7 +3978,7 @@ Sometimes you want to get the coordinates (a.k.a the index locations) of your qu df_coord = pd.DataFrame(np.random.randn(1000,2),index=pd.date_range('20000101',periods=1000)) store.append('df_coord',df_coord) c = store.select_as_coordinates('df_coord','index>20020101') - c.summary() + c store.select('df_coord',where=c) .. _io.hdf5-where_mask:
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ ] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20621
2018-04-06T14:35:16Z
2018-04-06T14:36:11Z
null
2018-04-06T14:36:11Z
DOC: update the str_replace docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 23c891ec4fcd0..083e4067cdbd6 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -439,8 +439,7 @@ def str_endswith(arr, pat, na=np.nan): def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): r""" Replace occurrences of pattern/regex in the Series/Index with - some other string. Equivalent to :meth:`str.replace` or - :func:`re.sub`. + some other string. Equivalent to :func:`re.sub`. Parameters ----------
Remove claim in `str_replace` docstring of equivalence to base Python `str.replace` -- the former does regex parsing while the latter does not.
https://api.github.com/repos/pandas-dev/pandas/pulls/20618
2018-04-05T18:24:38Z
2018-07-22T00:31:53Z
null
2018-07-22T00:31:53Z
BUG: Avoid splitting string with list() (#20592)
diff --git a/pandas/core/series.py b/pandas/core/series.py index 1d6f770d92795..d1edbc5ec5e56 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -951,7 +951,10 @@ def _set_with(self, key, value): if not isinstance(key, (list, Series, np.ndarray, Series)): try: - key = list(key) + if isinstance(key, compat.string_types): + key = [key] + else: + key = list(key) except Exception: key = [key]
Solves #20592 and I doubt that there are any cases when a key string should be split by `list()`
https://api.github.com/repos/pandas-dev/pandas/pulls/20615
2018-04-05T12:28:42Z
2018-09-25T16:22:46Z
null
2018-09-25T16:22:46Z
Stop concat from attempting to sort mismatched columns by default
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 74b21c21252ec..1161656731f88 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -153,10 +153,10 @@ Set logic on the other axes ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When gluing together multiple DataFrames, you have a choice of how to handle -the other axes (other than the one being concatenated). This can be done in +the other axes (other than the one being concatenated). This can be done in the following three ways: -- Take the (sorted) union of them all, ``join='outer'``. This is the default +- Take the union of them all, ``join='outer'``. This is the default option as it results in zero information loss. - Take the intersection, ``join='inner'``. - Use a specific index, as passed to the ``join_axes`` argument. @@ -167,10 +167,10 @@ behavior: .. ipython:: python df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], - 'D': ['D2', 'D3', 'D6', 'D7'], - 'F': ['F2', 'F3', 'F6', 'F7']}, - index=[2, 3, 6, 7]) - result = pd.concat([df1, df4], axis=1) + 'D': ['D2', 'D3', 'D6', 'D7'], + 'F': ['F2', 'F3', 'F6', 'F7']}, + index=[2, 3, 6, 7]) + result = pd.concat([df1, df4], axis=1, sort=False) .. ipython:: python @@ -181,8 +181,16 @@ behavior: labels=['df1', 'df4'], vertical=False); plt.close('all'); -Note that the row indexes have been unioned and sorted. Here is the same thing -with ``join='inner'``: +.. warning:: + + .. versionchanged:: 0.23.0 + + The default behavior with ``join='outer'`` is to sort the other axis + (columns in this case). In a future version of pandas, the default will + be to not sort. We specified ``sort=False`` to opt in to the new + behavior now. + +Here is the same thing with ``join='inner'``: .. ipython:: python diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 2cf8e3cedf742..fb9e14080e4f6 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -639,6 +639,36 @@ Returning a ``Series`` allows one to control the exact return structure and colu df.apply(lambda x: Series([1, 2, 3], index=['D', 'E', 'F']), axis=1) +.. _whatsnew_0230.api_breaking.concat: + +Concatenation will no longer sort +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In a future version of pandas :func:`pandas.concat` will no longer sort the non-concatenation axis when it is not already aligned. +The current behavior is the same as the previous (sorting), but now a warning is issued when ``sort`` is not specified and the non-concatenation axis is not aligned (:issue:`4588`). + +.. ipython:: python + :okwarning: + + df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=['b', 'a']) + df2 = pd.DataFrame({"a": [4, 5]}) + + pd.concat([df1, df2]) + +To keep the previous behavior (sorting) and silence the warning, pass ``sort=True`` + +.. ipython:: python + + pd.concat([df1, df2], sort=True) + +To accept the future behavior (no sorting), pass ``sort=False`` + +.. ipython + + pd.concat([df1, df2], sort=False) + +Note that this change also applies to :meth:`DataFrame.append`, which has also received a ``sort`` keyword for controlling this behavior. + .. _whatsnew_0230.api_breaking.build_changes: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 30521760327b4..ae9d240afcb93 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -157,7 +157,7 @@ def fast_unique_multiple(list arrays): @cython.wraparound(False) @cython.boundscheck(False) -def fast_unique_multiple_list(list lists): +def fast_unique_multiple_list(list lists, bint sort=True): cdef: list buf Py_ssize_t k = len(lists) @@ -174,10 +174,11 @@ def fast_unique_multiple_list(list lists): if val not in table: table[val] = stub uniques.append(val) - try: - uniques.sort() - except Exception: - pass + if sort: + try: + uniques.sort() + except Exception: + pass return uniques diff --git a/pandas/core/base.py b/pandas/core/base.py index 9ca1c8bea4db7..2f25a9ce41369 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -507,7 +507,7 @@ def is_any_frame(): for r in compat.itervalues(result)) if isinstance(result, list): - return concat(result, keys=keys, axis=1), True + return concat(result, keys=keys, axis=1, sort=True), True elif is_any_frame(): # we have a dict of DataFrames diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d7efd777f4176..d475d8b944575 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6038,7 +6038,8 @@ def infer(x): # ---------------------------------------------------------------------- # Merging / joining methods - def append(self, other, ignore_index=False, verify_integrity=False): + def append(self, other, ignore_index=False, + verify_integrity=False, sort=None): """ Append rows of `other` to the end of this frame, returning a new object. Columns not in this frame are added as new columns. @@ -6051,6 +6052,14 @@ def append(self, other, ignore_index=False, verify_integrity=False): If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. + sort : boolean, default None + Sort columns if the columns of `self` and `other` are not aligned. + The default sorting is deprecated and will change to not-sorting + in a future version of pandas. Explicitly pass ``sort=True`` to + silence the warning and sort. Explicitly pass ``sort=False`` to + silence the warning and not sort. + + .. versionadded:: 0.23.0 Returns ------- @@ -6162,7 +6171,8 @@ def append(self, other, ignore_index=False, verify_integrity=False): else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, - verify_integrity=verify_integrity) + verify_integrity=verify_integrity, + sort=sort) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): @@ -7481,7 +7491,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): from pandas.core.index import _get_objs_combined_axis if columns is None: - columns = _get_objs_combined_axis(data) + columns = _get_objs_combined_axis(data, sort=False) indexer_cache = {} diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8c20d62117e25..4132d8e69704a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1098,7 +1098,8 @@ def reset_identity(values): group_names = self.grouper.names result = concat(values, axis=self.axis, keys=group_keys, - levels=group_levels, names=group_names) + levels=group_levels, names=group_names, + sort=False) else: # GH5610, returns a MI, with the first level being a diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 2e5ec8b554ce7..f9501cd2f9ddf 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,3 +1,6 @@ +import textwrap +import warnings + from pandas.core.indexes.base import (Index, _new_Index, _ensure_index, @@ -17,6 +20,16 @@ from pandas._libs import lib from pandas._libs.tslib import NaT +_sort_msg = textwrap.dedent("""\ +Sorting because non-concatenation axis is not aligned. A future version +of pandas will change to not sort by default. + +To accept the future behavior, pass 'sort=True'. + +To retain the current behavior and silence the warning, pass sort=False +""") + + # TODO: there are many places that rely on these private methods existing in # pandas.core.index __all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index', @@ -31,33 +44,40 @@ '_all_indexes_same'] -def _get_objs_combined_axis(objs, intersect=False, axis=0): +def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True): # Extract combined index: return intersection or union (depending on the # value of "intersect") of indexes on given axis, or None if all objects # lack indexes (e.g. they are numpy arrays) obs_idxes = [obj._get_axis(axis) for obj in objs if hasattr(obj, '_get_axis')] if obs_idxes: - return _get_combined_index(obs_idxes, intersect=intersect) + return _get_combined_index(obs_idxes, intersect=intersect, sort=sort) -def _get_combined_index(indexes, intersect=False): +def _get_combined_index(indexes, intersect=False, sort=False): # TODO: handle index names! indexes = com._get_distinct_objs(indexes) if len(indexes) == 0: - return Index([]) - if len(indexes) == 1: - return indexes[0] - if intersect: + index = Index([]) + elif len(indexes) == 1: + index = indexes[0] + elif intersect: index = indexes[0] for other in indexes[1:]: index = index.intersection(other) - return index - union = _union_indexes(indexes) - return _ensure_index(union) + else: + index = _union_indexes(indexes, sort=sort) + index = _ensure_index(index) + + if sort: + try: + index = index.sort_values() + except TypeError: + pass + return index -def _union_indexes(indexes): +def _union_indexes(indexes, sort=True): if len(indexes) == 0: raise AssertionError('Must have at least 1 Index to union') if len(indexes) == 1: @@ -74,7 +94,8 @@ def conv(i): i = i.tolist() return i - return Index(lib.fast_unique_multiple_list([conv(i) for i in inds])) + return Index( + lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort)) if kind == 'special': result = indexes[0] @@ -89,13 +110,19 @@ def conv(i): index = indexes[0] for other in indexes[1:]: if not index.equals(other): + + if sort is None: + # TODO: remove once pd.concat sort default changes + warnings.warn(_sort_msg, FutureWarning, stacklevel=8) + sort = True + return _unique_indices(indexes) name = _get_consensus_names(indexes)[0] if name != index.name: index = index._shallow_copy(name=name) return index - else: + else: # kind='list' return _unique_indices(indexes) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index e08d0a7368ccb..16e64192fdb20 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1499,8 +1499,11 @@ def _extract_axis(self, data, axis=0, intersect=False): raw_lengths.append(v.shape[axis]) if have_frames: + # we want the "old" behavior here, of sorting only + # 1. we're doing a union (intersect=False) + # 2. the indices are not aligned. index = _get_objs_combined_axis(data.values(), axis=axis, - intersect=intersect) + intersect=intersect, sort=None) if have_raw_arrays: lengths = list(set(raw_lengths)) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 6e564975f34cd..b36e9b8d900fd 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -20,7 +20,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, - copy=True): + sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. @@ -60,6 +60,19 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, verify_integrity : boolean, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation + sort : boolean, default None + Sort non-concatenation axis if it is not already aligned when `join` + is 'outer'. The current default of sorting is deprecated and will + change to not-sorting in a future version of pandas. + + Explicitly pass ``sort=True`` to silence the warning and sort. + Explicitly pass ``sort=False`` to silence the warning and not sort. + + This has no effect when ``join='inner'``, which already preserves + the order of the non-concatenation axis. + + .. versionadded:: 0.23.0 + copy : boolean, default True If False, do not copy data unnecessarily @@ -209,7 +222,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, - copy=copy) + copy=copy, sort=sort) return op.get_result() @@ -220,7 +233,8 @@ class _Concatenator(object): def __init__(self, objs, axis=0, join='outer', join_axes=None, keys=None, levels=None, names=None, - ignore_index=False, verify_integrity=False, copy=True): + ignore_index=False, verify_integrity=False, copy=True, + sort=False): if isinstance(objs, (NDFrame, compat.string_types)): raise TypeError('first argument must be an iterable of pandas ' 'objects, you passed an object of type ' @@ -355,6 +369,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, self.keys = keys self.names = names or getattr(keys, 'names', None) self.levels = levels + self.sort = sort self.ignore_index = ignore_index self.verify_integrity = verify_integrity @@ -447,7 +462,8 @@ def _get_comb_axis(self, i): data_axis = self.objs[0]._get_block_manager_axis(i) try: return _get_objs_combined_axis(self.objs, axis=data_axis, - intersect=self.intersect) + intersect=self.intersect, + sort=self.sort) except IndexError: types = [type(x).__name__ for x in self.objs] raise TypeError("Cannot concatenate list of {types}" diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 74a9b59d3194a..96f8a53b4d253 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -437,7 +437,8 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') - common_idx = _get_objs_combined_axis(index + columns, intersect=True) + common_idx = _get_objs_combined_axis(index + columns, intersect=True, + sort=False) data = {} data.update(zip(rownames, index)) diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index e82faaeef2986..15ca65395e4fc 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -96,7 +96,7 @@ def test_append_series_dict(self): result = df.append(series[::-1][:3], ignore_index=True) expected = df.append(DataFrame({0: series[::-1][:3]}).T, - ignore_index=True) + ignore_index=True, sort=True) assert_frame_equal(result, expected.loc[:, result.columns]) # can append when name set @@ -119,8 +119,8 @@ def test_append_list_of_series_dicts(self): # different columns dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4}, {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}] - result = df.append(dicts, ignore_index=True) - expected = df.append(DataFrame(dicts), ignore_index=True) + result = df.append(dicts, ignore_index=True, sort=True) + expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) assert_frame_equal(result, expected) def test_append_empty_dataframe(self): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 47b7d60e3b6e8..6dd38187f7277 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1071,6 +1071,17 @@ def test_constructor_list_of_series(self): expected = DataFrame.from_dict(sdict, orient='index') tm.assert_frame_equal(result, expected) + def test_constructor_list_of_series_aligned_index(self): + series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i)) + for i in range(3)] + result = pd.DataFrame(series) + expected = pd.DataFrame({'b': [0, 1, 2], + 'a': [0, 1, 2], + 'c': [0, 1, 2]}, + columns=['b', 'a', 'c'], + index=['0', '1', '2']) + tm.assert_frame_equal(result, expected) + def test_constructor_list_of_derived_dicts(self): class CustomDict(dict): pass diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index f1178d44dbfe0..bfc74db73b813 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -629,7 +629,8 @@ def test_iloc_non_unique_indexing(self): new_list.append(s * 3) expected = DataFrame(new_list) - expected = concat([expected, DataFrame(index=idx[idx > sidx.max()])]) + expected = concat([expected, DataFrame(index=idx[idx > sidx.max()])], + sort=True) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = df2.loc[idx] tm.assert_frame_equal(result, expected, check_index_type=False) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index f95f493c66043..3c7a7f070805d 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -156,8 +156,9 @@ def f(): df_orig = DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) - expected = pd.concat([df_orig, DataFrame( - {'A': 7}, index=[dates[-1] + 1])]) + expected = pd.concat([df_orig, + DataFrame({'A': 7}, index=[dates[-1] + 1])], + sort=True) df = df_orig.copy() df.loc[dates[-1] + 1, 'A'] = 7 tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index dbf7c7f100b0e..f3827ac251cf0 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -826,7 +826,7 @@ def test_validation(self): # Dups on left left_w_dups = left.append(pd.DataFrame({'a': ['a'], 'c': ['cow']}, - index=[3])) + index=[3]), sort=True) merge(left_w_dups, right, left_index=True, right_index=True, validate='many_to_one') @@ -1286,7 +1286,7 @@ def test_join_multi_levels(self): index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) - ], axis=0).reindex(columns=expected.columns)) + ], axis=0, sort=True).reindex(columns=expected.columns)) assert_frame_equal(result, expected) # invalid cases diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 640d09f3587fb..57af67422d65f 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -21,6 +21,22 @@ import pytest +@pytest.fixture(params=[True, False]) +def sort(request): + """Boolean sort keyword for concat and DataFrame.append.""" + return request.param + + +@pytest.fixture(params=[True, False, None]) +def sort_with_none(request): + """Boolean sort keyword for concat and DataFrame.append. + + Includes the default of None + """ + # TODO: Replace with sort once keyword changes. + return request.param + + class ConcatenateBase(object): def setup_method(self, method): @@ -716,7 +732,7 @@ def test_concat_categorical_empty(self): class TestAppend(ConcatenateBase): - def test_append(self): + def test_append(self, sort): begin_index = self.frame.index[:5] end_index = self.frame.index[5:] @@ -727,10 +743,10 @@ def test_append(self): tm.assert_almost_equal(appended['A'], self.frame['A']) del end_frame['A'] - partial_appended = begin_frame.append(end_frame) + partial_appended = begin_frame.append(end_frame, sort=sort) assert 'A' in partial_appended - partial_appended = end_frame.append(begin_frame) + partial_appended = end_frame.append(begin_frame, sort=sort) assert 'A' in partial_appended # mixed type handling @@ -738,8 +754,9 @@ def test_append(self): tm.assert_frame_equal(appended, self.mixed_frame) # what to test here - mixed_appended = self.mixed_frame[:5].append(self.frame[5:]) - mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:]) + mixed_appended = self.mixed_frame[:5].append(self.frame[5:], sort=sort) + mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:], + sort=sort) # all equal except 'foo' column tm.assert_frame_equal( @@ -769,10 +786,10 @@ def test_append(self): result = df.append(row) tm.assert_frame_equal(result, expected) - def test_append_length0_frame(self): + def test_append_length0_frame(self, sort): df = DataFrame(columns=['A', 'B', 'C']) df3 = DataFrame(index=[0, 1], columns=['A', 'B']) - df5 = df.append(df3) + df5 = df.append(df3, sort=sort) expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C']) assert_frame_equal(df5, expected) @@ -793,7 +810,33 @@ def test_append_records(self): expected = DataFrame(np.concatenate((arr1, arr2))) assert_frame_equal(result, expected) - def test_append_different_columns(self): + # rewrite sort fixture, since we also want to test default of None + def test_append_sorts(self, sort_with_none): + df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=['b', 'a']) + df2 = pd.DataFrame({"a": [1, 2], 'c': [3, 4]}, index=[2, 3]) + + if sort_with_none is None: + # only warn if not explicitly specified + # don't check stacklevel since its set for concat, and append + # has an extra stack. + ctx = tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) + else: + ctx = tm.assert_produces_warning(None) + + with ctx: + result = df1.append(df2, sort=sort_with_none) + + # for None / True + expected = pd.DataFrame({"b": [1, 2, None, None], + "a": [1, 2, 1, 2], + "c": [None, None, 3, 4]}, + columns=['a', 'b', 'c']) + if sort_with_none is False: + expected = expected[['b', 'a', 'c']] + tm.assert_frame_equal(result, expected) + + def test_append_different_columns(self, sort): df = DataFrame({'bools': np.random.randn(10) > 0, 'ints': np.random.randint(0, 10, 10), 'floats': np.random.randn(10), @@ -802,11 +845,11 @@ def test_append_different_columns(self): a = df[:5].loc[:, ['bools', 'ints', 'floats']] b = df[5:].loc[:, ['strings', 'ints', 'floats']] - appended = a.append(b) + appended = a.append(b, sort=sort) assert isna(appended['strings'][0:4]).all() assert isna(appended['bools'][5:]).all() - def test_append_many(self): + def test_append_many(self, sort): chunks = [self.frame[:5], self.frame[5:10], self.frame[10:15], self.frame[15:]] @@ -815,7 +858,7 @@ def test_append_many(self): chunks[-1] = chunks[-1].copy() chunks[-1]['foo'] = 'bar' - result = chunks[0].append(chunks[1:]) + result = chunks[0].append(chunks[1:], sort=sort) tm.assert_frame_equal(result.loc[:, self.frame.columns], self.frame) assert (result['foo'][15:] == 'bar').all() assert result['foo'][:15].isna().all() @@ -923,7 +966,7 @@ def test_append_different_columns_types_raises( with pytest.raises(TypeError): df.append(ser) - def test_append_dtype_coerce(self): + def test_append_dtype_coerce(self, sort): # GH 4993 # appending with datetime will incorrectly convert datetime64 @@ -946,16 +989,22 @@ def test_append_dtype_coerce(self): dt.datetime(2013, 1, 2, 0, 0), dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 4, 0, 0)], - name='start_time')], axis=1) - result = df1.append(df2, ignore_index=True) + name='start_time')], + axis=1, sort=sort) + result = df1.append(df2, ignore_index=True, sort=sort) + if sort: + expected = expected[['end_time', 'start_time']] + else: + expected = expected[['start_time', 'end_time']] + assert_frame_equal(result, expected) - def test_append_missing_column_proper_upcast(self): + def test_append_missing_column_proper_upcast(self, sort): df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')}) df2 = DataFrame({'B': np.array([True, False, True, False], dtype=bool)}) - appended = df1.append(df2, ignore_index=True) + appended = df1.append(df2, ignore_index=True, sort=sort) assert appended['A'].dtype == 'f8' assert appended['B'].dtype == 'O' @@ -1043,7 +1092,7 @@ def test_concat_keys_specific_levels(self): Index(level, name='group_key')) assert result.columns.names[0] == 'group_key' - def test_concat_dataframe_keys_bug(self): + def test_concat_dataframe_keys_bug(self, sort): t1 = DataFrame({ 'value': Series([1, 2, 3], index=Index(['a', 'b', 'c'], name='id'))}) @@ -1051,7 +1100,7 @@ def test_concat_dataframe_keys_bug(self): 'value': Series([7, 8], index=Index(['a', 'b'], name='id'))}) # it works - result = concat([t1, t2], axis=1, keys=['t1', 't2']) + result = concat([t1, t2], axis=1, keys=['t1', 't2'], sort=sort) assert list(result.columns) == [('t1', 'value'), ('t2', 'value')] def test_concat_series_partial_columns_names(self): @@ -1097,7 +1146,7 @@ def test_concat_dict(self): expected = concat([frames[k] for k in keys], keys=keys) tm.assert_frame_equal(result, expected) - def test_concat_ignore_index(self): + def test_concat_ignore_index(self, sort): frame1 = DataFrame({"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}) @@ -1105,7 +1154,8 @@ def test_concat_ignore_index(self): frame1.index = Index(["x", "y", "z"]) frame2.index = Index(["x", "y", "q"]) - v1 = concat([frame1, frame2], axis=1, ignore_index=True) + v1 = concat([frame1, frame2], axis=1, + ignore_index=True, sort=sort) nan = np.nan expected = DataFrame([[nan, nan, nan, 4.3], @@ -1113,6 +1163,8 @@ def test_concat_ignore_index(self): ['b', 2, 3.2, 2.2], ['c', 3, 1.2, nan]], index=Index(["q", "x", "y", "z"])) + if not sort: + expected = expected.loc[['x', 'y', 'z', 'q']] tm.assert_frame_equal(v1, expected) @@ -1309,16 +1361,16 @@ def test_dups_index(self): result = df.append(df) assert_frame_equal(result, expected) - def test_with_mixed_tuples(self): + def test_with_mixed_tuples(self, sort): # 10697 # columns have mixed tuples, so handle properly df1 = DataFrame({u'A': 'foo', (u'B', 1): 'bar'}, index=range(2)) df2 = DataFrame({u'B': 'foo', (u'B', 1): 'bar'}, index=range(2)) # it works - concat([df1, df2]) + concat([df1, df2], sort=sort) - def test_handle_empty_objects(self): + def test_handle_empty_objects(self, sort): df = DataFrame(np.random.randn(10, 4), columns=list('abcd')) baz = df[:5].copy() @@ -1326,7 +1378,7 @@ def test_handle_empty_objects(self): empty = df[5:5] frames = [baz, empty, empty, df[5:]] - concatted = concat(frames, axis=0) + concatted = concat(frames, axis=0, sort=sort) expected = df.reindex(columns=['a', 'b', 'c', 'd', 'foo']) expected['foo'] = expected['foo'].astype('O') @@ -1478,7 +1530,7 @@ def test_panel_concat_other_axes(self): expected.loc['ItemC', :, :2] = 'baz' tm.assert_panel_equal(result, expected) - def test_panel_concat_buglet(self): + def test_panel_concat_buglet(self, sort): with catch_warnings(record=True): # #2257 def make_panel(): @@ -1503,7 +1555,7 @@ def df(): panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2) # it works! - concat([panel1, panel3], axis=1, verify_integrity=True) + concat([panel1, panel3], axis=1, verify_integrity=True, sort=sort) def test_concat_series(self): @@ -1528,7 +1580,7 @@ def test_concat_series(self): expected.index = exp_index tm.assert_series_equal(result, expected) - def test_concat_series_axis1(self): + def test_concat_series_axis1(self, sort=sort): ts = tm.makeTimeSeries() pieces = [ts[:-2], ts[2:], ts[2:-2]] @@ -1557,7 +1609,7 @@ def test_concat_series_axis1(self): # must reindex, #2603 s = Series(randn(3), index=['c', 'a', 'b'], name='A') s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B') - result = concat([s, s2], axis=1) + result = concat([s, s2], axis=1, sort=sort) expected = DataFrame({'A': s, 'B': s2}) assert_frame_equal(result, expected) @@ -2043,7 +2095,7 @@ def test_categorical_concat_dtypes(self): expected = Series([True, False, False], index=index) tm.assert_series_equal(result, expected) - def test_categorical_concat(self): + def test_categorical_concat(self, sort): # See GH 10177 df1 = DataFrame(np.arange(18, dtype='int64').reshape(6, 3), columns=["a", "b", "c"]) @@ -2054,7 +2106,7 @@ def test_categorical_concat(self): cat_values = ["one", "one", "two", "one", "two", "two", "one"] df2['h'] = Series(Categorical(cat_values)) - res = pd.concat((df1, df2), axis=0, ignore_index=True) + res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort) exp = DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12], 'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], @@ -2162,10 +2214,15 @@ def test_concat_order(self): dfs = [pd.DataFrame(index=range(3), columns=['a', 1, None])] dfs += [pd.DataFrame(index=range(3), columns=[None, 1, 'a']) for i in range(100)] - result = pd.concat(dfs).columns - expected = dfs[0].columns + + result = pd.concat(dfs, sort=True).columns + if PY2: - expected = expected.sort_values() + # Different sort order between incomparable objects between + # python 2 and python3 via Index.union. + expected = dfs[1].columns + else: + expected = dfs[0].columns tm.assert_index_equal(result, expected) def test_concat_datetime_timezone(self): @@ -2249,3 +2306,98 @@ def test_concat_empty_and_non_empty_series_regression(): expected = s1 result = pd.concat([s1, s2]) tm.assert_series_equal(result, expected) + + +def test_concat_sorts_columns(sort_with_none): + # GH-4588 + df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, columns=['b', 'a']) + df2 = pd.DataFrame({"a": [3, 4], "c": [5, 6]}) + + # for sort=True/None + expected = pd.DataFrame({"a": [1, 2, 3, 4], + "b": [1, 2, None, None], + "c": [None, None, 5, 6]}, + columns=['a', 'b', 'c']) + + if sort_with_none is False: + expected = expected[['b', 'a', 'c']] + + if sort_with_none is None: + # only warn if not explicitly specified + ctx = tm.assert_produces_warning(FutureWarning) + else: + ctx = tm.assert_produces_warning(None) + + # default + with ctx: + result = pd.concat([df1, df2], ignore_index=True, sort=sort_with_none) + tm.assert_frame_equal(result, expected) + + +def test_concat_sorts_index(sort_with_none): + df1 = pd.DataFrame({"a": [1, 2, 3]}, index=['c', 'a', 'b']) + df2 = pd.DataFrame({"b": [1, 2]}, index=['a', 'b']) + + # For True/None + expected = pd.DataFrame({"a": [2, 3, 1], "b": [1, 2, None]}, + index=['a', 'b', 'c'], + columns=['a', 'b']) + if sort_with_none is False: + expected = expected.loc[['c', 'a', 'b']] + + if sort_with_none is None: + # only warn if not explicitly specified + ctx = tm.assert_produces_warning(FutureWarning) + else: + ctx = tm.assert_produces_warning(None) + + # Warn and sort by default + with ctx: + result = pd.concat([df1, df2], axis=1, sort=sort_with_none) + tm.assert_frame_equal(result, expected) + + +def test_concat_inner_sort(sort_with_none): + # https://github.com/pandas-dev/pandas/pull/20613 + df1 = pd.DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, + columns=['b', 'a', 'c']) + df2 = pd.DataFrame({"a": [1, 2], 'b': [3, 4]}, index=[3, 4]) + + with tm.assert_produces_warning(None): + # unset sort should *not* warn for inner join + # since that never sorted + result = pd.concat([df1, df2], sort=sort_with_none, + join='inner', + ignore_index=True) + + expected = pd.DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, + columns=['b', 'a']) + if sort_with_none is True: + expected = expected[['a', 'b']] + tm.assert_frame_equal(result, expected) + + +def test_concat_aligned_sort(): + # GH-4588 + df = pd.DataFrame({"c": [1, 2], "b": [3, 4], 'a': [5, 6]}, + columns=['c', 'b', 'a']) + result = pd.concat([df, df], sort=True, ignore_index=True) + expected = pd.DataFrame({'a': [5, 6, 5, 6], 'b': [3, 4, 3, 4], + 'c': [1, 2, 1, 2]}, + columns=['a', 'b', 'c']) + tm.assert_frame_equal(result, expected) + + result = pd.concat([df, df[['c', 'b']]], join='inner', sort=True, + ignore_index=True) + expected = expected[['b', 'c']] + tm.assert_frame_equal(result, expected) + + +def test_concat_aligned_sort_does_not_raise(): + # GH-4588 + # We catch TypeErrors from sorting internally and do not re-raise. + df = pd.DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, 'a']) + expected = pd.DataFrame({1: [1, 2, 1, 2], 'a': [3, 4, 3, 4]}, + columns=[1, 'a']) + result = pd.concat([df, df], ignore_index=True, sort=True) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 1004b40bfb4c1..db287a719ae1e 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1724,3 +1724,15 @@ def test_crosstab_tuple_name(self, names): result = pd.crosstab(s1, s2) tm.assert_frame_equal(result, expected) + + def test_crosstab_unsorted_order(self): + df = pd.DataFrame({"b": [3, 1, 2], 'a': [5, 4, 6]}, + index=['C', 'A', 'B']) + result = pd.crosstab(df.index, [df.b, df.a]) + e_idx = pd.Index(['A', 'B', 'C'], name='row_0') + e_columns = pd.MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], + names=['b', 'a']) + expected = pd.DataFrame([[1, 0, 0], [0, 1, 0], [0, 0, 1]], + index=e_idx, + columns=e_columns) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 540933cb90be2..9cc615e15564f 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -629,10 +629,31 @@ def test_append(self): a = self.frame.iloc[:5, :3] b = self.frame.iloc[5:] - appended = a.append(b) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + # Stacklevel is set for pd.concat, not append + appended = a.append(b) tm.assert_sp_frame_equal(appended.iloc[:, :3], self.frame.iloc[:, :3], exact_indices=False) + a = a[['B', 'C', 'A']].head(2) + b = b.head(2) + + expected = pd.SparseDataFrame({ + "B": [0., 1, None, 3], + "C": [0., 1, 5, 6], + "A": [None, None, 2, 3], + "D": [None, None, 5, None], + }, index=a.index | b.index, columns=['B', 'C', 'A', 'D']) + with tm.assert_produces_warning(None): + appended = a.append(b, sort=False) + + tm.assert_frame_equal(appended, expected) + + with tm.assert_produces_warning(None): + appended = a.append(b, sort=True) + + tm.assert_sp_frame_equal(appended, expected[['A', 'B', 'C', 'D']]) + def test_astype(self): sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4], dtype=np.int64), diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index 70fd1da529d46..9e392457edbc3 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -202,17 +202,29 @@ def test_concat_different_fill_value(self): exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) + def test_concat_different_columns_sort_warns(self): + sparse = self.dense1.to_sparse() + sparse3 = self.dense3.to_sparse() + + with tm.assert_produces_warning(FutureWarning): + res = pd.concat([sparse, sparse3]) + with tm.assert_produces_warning(FutureWarning): + exp = pd.concat([self.dense1, self.dense3]) + + exp = exp.to_sparse() + tm.assert_sp_frame_equal(res, exp) + def test_concat_different_columns(self): # fill_value = np.nan sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse() - res = pd.concat([sparse, sparse3]) - exp = pd.concat([self.dense1, self.dense3]).to_sparse() + res = pd.concat([sparse, sparse3], sort=True) + exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse() tm.assert_sp_frame_equal(res, exp) - res = pd.concat([sparse3, sparse]) - exp = pd.concat([self.dense3, self.dense1]).to_sparse() + res = pd.concat([sparse3, sparse], sort=True) + exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse() exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) @@ -220,13 +232,15 @@ def test_concat_different_columns(self): sparse = self.dense1.to_sparse(fill_value=0) sparse3 = self.dense3.to_sparse(fill_value=0) - res = pd.concat([sparse, sparse3]) - exp = pd.concat([self.dense1, self.dense3]).to_sparse(fill_value=0) + res = pd.concat([sparse, sparse3], sort=True) + exp = (pd.concat([self.dense1, self.dense3], sort=True) + .to_sparse(fill_value=0)) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) - res = pd.concat([sparse3, sparse]) - exp = pd.concat([self.dense3, self.dense1]).to_sparse(fill_value=0) + res = pd.concat([sparse3, sparse], sort=True) + exp = (pd.concat([self.dense3, self.dense1], sort=True) + .to_sparse(fill_value=0)) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) @@ -234,13 +248,13 @@ def test_concat_different_columns(self): sparse = self.dense1.to_sparse() sparse3 = self.dense3.to_sparse(fill_value=0) # each columns keeps its fill_value, thus compare in dense - res = pd.concat([sparse, sparse3]) - exp = pd.concat([self.dense1, self.dense3]) + res = pd.concat([sparse, sparse3], sort=True) + exp = pd.concat([self.dense1, self.dense3], sort=True) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) - res = pd.concat([sparse3, sparse]) - exp = pd.concat([self.dense3, self.dense1]) + res = pd.concat([sparse3, sparse], sort=True) + exp = pd.concat([self.dense3, self.dense1], sort=True) assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp)
Preserve column order upon concatenation to obey least astonishment principle. Allow old behavior to be enabled by adding a boolean switch to concat and DataFrame.append, mismatch_sort, which is by default disabled. - [x] Close #4588 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20613
2018-04-05T03:29:11Z
2018-05-01T20:06:12Z
2018-05-01T20:06:11Z
2018-05-01T20:07:06Z
REF: IntervalIndex[IntervalArray]
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index c460b19640f46..c18b94fea9a28 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1924,11 +1924,24 @@ untouched. If the data is modified, it is because you did so explicitly. dtypes ------ -The main types stored in pandas objects are ``float``, ``int``, ``bool``, -``datetime64[ns]`` and ``datetime64[ns, tz]``, ``timedelta[ns]``, -``category`` and ``object``. In addition these dtypes have item sizes, e.g. -``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>` -for more detail on ``datetime64[ns, tz]`` dtypes. +For the most part, pandas uses NumPy arrays and dtypes for Series or individual +columns of a DataFrame. The main types allowed in pandas objects are ``float``, +``int``, ``bool``, and ``datetime64[ns]`` (note that NumPy does not support +timezone-aware datetimes). + +In addition to NumPy's types, pandas :ref:`extends <extending.extension-types>` +NumPy's type-system for a few cases. + +* :ref:`Categorical <categorical>` +* :ref:`Datetime with Timezone <timeseries.timezone_series>` +* :ref:`Period <timeseries.periods>` +* :ref:`Interval <advanced.indexing.intervallindex>` + +Pandas uses the ``object`` dtype for storing strings. + +Finally, arbitrary objects may be stored using the ``object`` dtype, but should +be avoided to the extent possible (for performance and interoperability with +other libraries and methods. See :ref:`basics.object_conversion`). A convenient :attr:`~DataFrame.dtypes` attribute for DataFrame returns a Series with the data type of each column. diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a2210c39bb6a6..6ffa7ebf994e5 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -66,6 +66,36 @@ Current Behavior: result + +.. _whatsnew_0240.enhancements.interval: + +Storing Interval Data in Series and DataFrame +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interval data may now be stored in a ``Series`` or ``DataFrame``, in addition to an +:class:`IntervalIndex` like previously (:issue:`19453`). + +.. ipython:: python + + ser = pd.Series(pd.interval_range(0, 5)) + ser + ser.dtype + +Previously, these would be cast to a NumPy array of ``Interval`` objects. In general, +this should result in better performance when storing an array of intervals in +a :class:`Series`. + +Note that the ``.values`` of a ``Series`` containing intervals is no longer a NumPy +array, but rather an ``ExtensionArray``: + +.. ipython:: python + + ser.values + +This is the same behavior as ``Series.values`` for categorical data. See +:ref:`whatsnew_0240.api_breaking.interval_values` for more. + + .. _whatsnew_0240.enhancements.other: Other Enhancements @@ -90,6 +120,45 @@ Other Enhancements Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0240.api_breaking.interval_values: + +``IntervalIndex.values`` is now an ``IntervalArray`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :attr:`~Interval.values` attribute of an :class:`IntervalIndex` now returns an +``IntervalArray``, rather than a NumPy array of :class:`Interval` objects (:issue:`19453`). + +Previous Behavior: + +.. code-block:: ipython + + In [1]: idx = pd.interval_range(0, 4) + + In [2]: idx.values + Out[2]: + array([Interval(0, 1, closed='right'), Interval(1, 2, closed='right'), + Interval(2, 3, closed='right'), Interval(3, 4, closed='right')], + dtype=object) + +New Behavior: + +.. ipython:: python + + idx = pd.interval_range(0, 4) + idx.values + +This mirrors ``CateogricalIndex.values``, which returns a ``Categorical``. + +For situations where you need an ``ndarray`` of ``Interval`` objects, use +:meth:`numpy.asarray` or ``idx.astype(object)``. + +.. ipython:: python + + np.asarray(idx) + idx.values.astype(object) + + .. _whatsnew_0240.api.datetimelike.normalize: Tick DateOffset Normalize Restrictions @@ -345,6 +414,7 @@ Interval ^^^^^^^^ - Bug in the :class:`IntervalIndex` constructor where the ``closed`` parameter did not always override the inferred ``closed`` (:issue:`19370`) +- Bug in the ``IntervalIndex`` repr where a trailing comma was missing after the list of intervals (:issue:`20611`) - - diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index fbb7265a17f8b..93fb9d21d7b3a 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -98,6 +98,26 @@ cdef class IntervalMixin(object): msg = 'cannot compute length between {left!r} and {right!r}' raise TypeError(msg.format(left=self.left, right=self.right)) + def _check_closed_matches(self, other, name='other'): + """Check if the closed attribute of `other` matches. + + Note that 'left' and 'right' are considered different from 'both'. + + Parameters + ---------- + other : Interval, IntervalIndex, IntervalArray + name : str + Name to use for 'other' in the error message. + + Raises + ------ + ValueError + When `other` is not closed exactly the same as self. + """ + if self.closed != other.closed: + msg = "'{}.closed' is '{}', expected '{}'." + raise ValueError(msg.format(name, other.closed, self.closed)) + cdef _interval_like(other): return (hasattr(other, 'left') diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 1b8a43d4293a5..72ff0828e3486 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -2,5 +2,6 @@ ExtensionScalarOpsMixin) from .categorical import Categorical # noqa from .datetimes import DatetimeArrayMixin # noqa +from .interval import IntervalArray # noqa from .period import PeriodArrayMixin # noqa from .timedelta import TimedeltaArrayMixin # noqa diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 375db28a4ee5a..3dd5e06af9d7d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -20,6 +20,7 @@ _ensure_int64, _ensure_object, _ensure_platform_int, + is_extension_array_dtype, is_dtype_equal, is_datetimelike, is_datetime64_dtype, @@ -1243,6 +1244,11 @@ def __array__(self, dtype=None): ret = take_1d(self.categories.values, self._codes) if dtype and not is_dtype_equal(dtype, self.categories.dtype): return np.asarray(ret, dtype) + if is_extension_array_dtype(ret): + # When we're a Categorical[ExtensionArray], like Interval, + # we need to ensure __array__ get's all the way to an + # ndarray. + ret = np.asarray(ret) return ret def __setstate__(self, state): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py new file mode 100644 index 0000000000000..4ad53e16bc439 --- /dev/null +++ b/pandas/core/arrays/interval.py @@ -0,0 +1,1049 @@ +import textwrap +import numpy as np + +from pandas._libs.interval import (Interval, IntervalMixin, + intervals_to_interval_bounds) +from pandas.compat import add_metaclass +from pandas.compat.numpy import function as nv +import pandas.core.common as com +from pandas.core.config import get_option +from pandas.core.dtypes.cast import maybe_convert_platform +from pandas.core.dtypes.common import (is_categorical_dtype, is_float_dtype, + is_integer_dtype, is_interval_dtype, + is_scalar, is_string_dtype, + is_datetime64_any_dtype, + is_timedelta64_dtype, is_interval, + pandas_dtype) +from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.core.dtypes.generic import (ABCDatetimeIndex, ABCPeriodIndex, + ABCSeries, ABCIntervalIndex, + ABCInterval) +from pandas.core.dtypes.missing import isna, notna +from pandas.core.indexes.base import Index, _ensure_index +from pandas.util._decorators import Appender +from pandas.util._doctools import _WritableDoc + +from . import ExtensionArray, Categorical + +_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) +_interval_shared_docs = {} +_shared_docs_kwargs = dict( + klass='IntervalArray', + name='' +) + + +_interval_shared_docs['class'] = """%(summary)s + +.. versionadded:: %(versionadded)s + +.. warning:: + + The indexing behaviors are provisional and may change in + a future version of pandas. + +Parameters +---------- +data : array-like (1-dimensional) + Array-like containing Interval objects from which to build the + %(klass)s. +closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or + neither. +%(name)s\ +copy : boolean, default False + Copy the meta-data. +dtype : dtype or None, default None + If None, dtype will be inferred + + .. versionadded:: 0.23.0 + +Attributes +---------- +left +right +closed +mid +length +values +is_non_overlapping_monotonic + +Methods +------- +from_arrays +from_tuples +from_breaks +set_closed +%(extra_methods)s\ + +%(examples)s\ + +Notes +------ +See the `user guide +<http://pandas.pydata.org/pandas-docs/stable/advanced.html#intervalindex>`_ +for more. + +See Also +-------- +Index : The base pandas Index type +Interval : A bounded slice-like interval; the elements of an IntervalIndex +interval_range : Function to create a fixed frequency IntervalIndex +cut, qcut : Convert arrays of continuous data into Categoricals/Series of + Intervals +""" + + +@Appender(_interval_shared_docs['class'] % dict( + klass="IntervalArray", + summary="Pandas array for interval data that are closed on the same side", + versionadded="0.24.0", + name='', extra_methods='', examples='', +)) +@add_metaclass(_WritableDoc) +class IntervalArray(IntervalMixin, ExtensionArray): + dtype = IntervalDtype() + ndim = 1 + can_hold_na = True + _na_value = _fill_value = np.nan + + def __new__(cls, data, closed=None, dtype=None, copy=False, + fastpath=False, verify_integrity=True): + + if fastpath: + return cls._simple_new(data.left, data.right, closed, + copy=copy, dtype=dtype, + verify_integrity=False) + + if isinstance(data, ABCSeries) and is_interval_dtype(data): + data = data.values + + if isinstance(data, (cls, ABCIntervalIndex)): + left = data.left + right = data.right + closed = closed or data.closed + else: + + # don't allow scalars + if is_scalar(data): + msg = ("{}(...) must be called with a collection of some kind," + " {} was passed") + raise TypeError(msg.format(cls.__name__, data)) + + # might need to convert empty or purely na data + data = maybe_convert_platform_interval(data) + left, right, infer_closed = intervals_to_interval_bounds( + data, validate_closed=closed is None) + closed = closed or infer_closed + + return cls._simple_new(left, right, closed, copy=copy, dtype=dtype, + verify_integrity=verify_integrity) + + @classmethod + def _simple_new(cls, left, right, closed=None, + copy=False, dtype=None, verify_integrity=True): + result = IntervalMixin.__new__(cls) + + closed = closed or 'right' + left = _ensure_index(left, copy=copy) + right = _ensure_index(right, copy=copy) + + if dtype is not None: + # GH 19262: dtype must be an IntervalDtype to override inferred + dtype = pandas_dtype(dtype) + if not is_interval_dtype(dtype): + msg = 'dtype must be an IntervalDtype, got {dtype}' + raise TypeError(msg.format(dtype=dtype)) + elif dtype.subtype is not None: + left = left.astype(dtype.subtype) + right = right.astype(dtype.subtype) + + # coerce dtypes to match if needed + if is_float_dtype(left) and is_integer_dtype(right): + right = right.astype(left.dtype) + elif is_float_dtype(right) and is_integer_dtype(left): + left = left.astype(right.dtype) + + if type(left) != type(right): + msg = ('must not have differing left [{ltype}] and right ' + '[{rtype}] types') + raise ValueError(msg.format(ltype=type(left).__name__, + rtype=type(right).__name__)) + elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalArray') + raise TypeError(msg) + elif isinstance(left, ABCPeriodIndex): + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + raise ValueError(msg) + elif (isinstance(left, ABCDatetimeIndex) and + str(left.tz) != str(right.tz)): + msg = ("left and right must have the same time zone, got " + "'{left_tz}' and '{right_tz}'") + raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz)) + + result._left = left + result._right = right + result._closed = closed + if verify_integrity: + result._validate() + return result + + @classmethod + def _from_sequence(cls, scalars): + return cls(scalars) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values, closed=original.closed) + + _interval_shared_docs['from_breaks'] = """ + Construct an %(klass)s from an array of splits. + + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + copy the data + dtype : dtype or None, default None + If None, dtype will be inferred + + .. versionadded:: 0.23.0 + + Examples + -------- + >>> pd.%(klass)s.from_breaks([0, 1, 2, 3]) + %(klass)s([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + %(klass)s.from_arrays : Construct from a left and right array + %(klass)s.from_tuples : Construct from a sequence of tuples + """ + + @classmethod + @Appender(_interval_shared_docs['from_breaks'] % _shared_docs_kwargs) + def from_breaks(cls, breaks, closed='right', copy=False, dtype=None): + breaks = maybe_convert_platform_interval(breaks) + + return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, + dtype=dtype) + + _interval_shared_docs['from_arrays'] = """ + Construct from two arrays defining the left and right bounds. + + Parameters + ---------- + left : array-like (1-dimensional) + Left bounds for each interval. + right : array-like (1-dimensional) + Right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + Copy the data. + dtype : dtype, optional + If None, dtype will be inferred. + + .. versionadded:: 0.23.0 + + Returns + ------- + %(klass)s + + Notes + ----- + Each element of `left` must be less than or equal to the `right` + element at the same position. If an element is missing, it must be + missing in both `left` and `right`. A TypeError is raised when + using an unsupported type for `left` or `right`. At the moment, + 'category', 'object', and 'string' subtypes are not supported. + + Raises + ------ + ValueError + When a value is missing in only one of `left` or `right`. + When a value in `left` is greater than the corresponding value + in `right`. + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + %(klass)s.from_tuples : Construct an %(klass)s from an + array-like of tuples. + + + Examples + -------- + >>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3]) + %(klass)s([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') + """ + + @classmethod + @Appender(_interval_shared_docs['from_arrays'] % _shared_docs_kwargs) + def from_arrays(cls, left, right, closed='right', copy=False, dtype=None): + left = maybe_convert_platform_interval(left) + right = maybe_convert_platform_interval(right) + + return cls._simple_new(left, right, closed, copy=copy, + dtype=dtype, verify_integrity=True) + + _interval_shared_docs['from_intervals'] = """ + Construct an %(klass)s from a 1d array of Interval objects + + .. deprecated:: 0.23.0 + + Parameters + ---------- + data : array-like (1-dimensional) + Array of Interval objects. All intervals must be closed on the same + sides. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 + + Examples + -------- + >>> pd.%(klass)s.from_intervals([pd.Interval(0, 1), + ... pd.Interval(1, 2)]) + %(klass)s([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') + + The generic Index constructor work identically when it infers an array + of all intervals: + + >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) + %(klass)s([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + %(klass)s.from_arrays : Construct an %(klass)s from a left and + right array + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits + %(klass)s.from_tuples : Construct an %(klass)s from an + array-like of tuples + """ + + _interval_shared_docs['from_tuples'] = """ + Construct an %(klass)s from an array-like of tuples + + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 + + + Examples + -------- + >>> pd.%(klass)s.from_tuples([(0, 1), (1, 2)]) + %(klass)s([(0, 1], (1, 2]], + closed='right', dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + %(klass)s.from_arrays : Construct an %(klass)s from a left and + right array + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits + """ + + @classmethod + @Appender(_interval_shared_docs['from_tuples'] % _shared_docs_kwargs) + def from_tuples(cls, data, closed='right', copy=False, dtype=None): + if len(data): + left, right = [], [] + else: + # ensure that empty data keeps input dtype + left = right = data + + for d in data: + if isna(d): + lhs = rhs = np.nan + else: + name = cls.__name__ + try: + # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] + lhs, rhs = d + except ValueError: + msg = ('{name}.from_tuples requires tuples of ' + 'length 2, got {tpl}').format(name=name, tpl=d) + raise ValueError(msg) + except TypeError: + msg = ('{name}.from_tuples received an invalid ' + 'item, {tpl}').format(name=name, tpl=d) + raise TypeError(msg) + lhs, rhs = d + left.append(lhs) + right.append(rhs) + + return cls.from_arrays(left, right, closed, copy=False, + dtype=dtype) + + def _validate(self): + """Verify that the IntervalArray is valid. + + Checks that + + * closed is valid + * left and right match lengths + * left and right have the same missing values + * left is always below right + """ + if self.closed not in _VALID_CLOSED: + raise ValueError("invalid option for 'closed': {closed}" + .format(closed=self.closed)) + if len(self.left) != len(self.right): + raise ValueError('left and right must have the same length') + left_mask = notna(self.left) + right_mask = notna(self.right) + if not (left_mask == right_mask).all(): + raise ValueError('missing values must be missing in the same ' + 'location both left and right sides') + if not (self.left[left_mask] <= self.right[left_mask]).all(): + raise ValueError('left side of interval must be <= right side') + + # --------- + # Interface + # --------- + def __iter__(self): + return iter(np.asarray(self)) + + def __len__(self): + return len(self.left) + + def __getitem__(self, value): + left = self.left[value] + right = self.right[value] + + # scalar + if not isinstance(left, Index): + if isna(left): + return self._fill_value + return Interval(left, right, self.closed) + + return self._shallow_copy(left, right) + + def __setitem__(self, key, value): + # na value: need special casing to set directly on numpy arrays + needs_float_conversion = False + if is_scalar(value) and isna(value): + if is_integer_dtype(self.dtype.subtype): + # can't set NaN on a numpy integer array + needs_float_conversion = True + elif is_datetime64_any_dtype(self.dtype.subtype): + # need proper NaT to set directly on the numpy array + value = np.datetime64('NaT') + elif is_timedelta64_dtype(self.dtype.subtype): + # need proper NaT to set directly on the numpy array + value = np.timedelta64('NaT') + value_left, value_right = value, value + + # scalar interval + elif is_interval_dtype(value) or isinstance(value, ABCInterval): + self._check_closed_matches(value, name="value") + value_left, value_right = value.left, value.right + + else: + # list-like of intervals + try: + array = IntervalArray(value) + value_left, value_right = array.left, array.right + except TypeError: + # wrong type: not interval or NA + msg = "'value' should be an interval type, got {} instead." + raise TypeError(msg.format(type(value))) + + # Need to ensure that left and right are updated atomically, so we're + # forced to copy, update the copy, and swap in the new values. + left = self.left.copy(deep=True) + if needs_float_conversion: + left = left.astype('float') + left.values[key] = value_left + self._left = left + + right = self.right.copy(deep=True) + if needs_float_conversion: + right = right.astype('float') + right.values[key] = value_right + self._right = right + + def fillna(self, value=None, method=None, limit=None): + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, dict, Series + If a scalar value is passed it is used to fill all missing values. + Alternatively, a Series or dict can be used to fill in different + values for each index. The value should not be a list. The + value(s) passed should be either Interval objects or NA/NaN. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + (Not implemented yet for IntervalArray) + Method to use for filling holes in reindexed Series + limit : int, default None + (Not implemented yet for IntervalArray) + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + Returns + ------- + filled : IntervalArray with NA/NaN filled + """ + if method is not None: + raise TypeError('Filling by method is not supported for ' + 'IntervalArray.') + if limit is not None: + raise TypeError('limit is not supported for IntervalArray.') + + if not isinstance(value, ABCInterval): + msg = ("'IntervalArray.fillna' only supports filling with a " + "scalar 'pandas.Interval'. Got a '{}' instead." + .format(type(value).__name__)) + raise TypeError(msg) + + value = getattr(value, '_values', value) + self._check_closed_matches(value, name="value") + + left = self.left.fillna(value=value.left) + right = self.right.fillna(value=value.right) + return self._shallow_copy(left, right) + + @property + def dtype(self): + return IntervalDtype(self.left.dtype) + + def astype(self, dtype, copy=True): + """ + Cast to an ExtensionArray or NumPy array with dtype 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ExtensionArray or ndarray + ExtensionArray or NumPy ndarray with 'dtype' for its dtype. + """ + dtype = pandas_dtype(dtype) + if is_interval_dtype(dtype): + if dtype == self.dtype: + return self.copy() if copy else self + + # need to cast to different subtype + try: + new_left = self.left.astype(dtype.subtype) + new_right = self.right.astype(dtype.subtype) + except TypeError: + msg = ('Cannot convert {dtype} to {new_dtype}; subtypes are ' + 'incompatible') + raise TypeError(msg.format(dtype=self.dtype, new_dtype=dtype)) + return self._shallow_copy(new_left, new_right) + elif is_categorical_dtype(dtype): + return Categorical(np.asarray(self)) + # TODO: This try/except will be repeated. + try: + return np.asarray(self).astype(dtype, copy=copy) + except (TypeError, ValueError): + msg = 'Cannot cast {name} to dtype {dtype}' + raise TypeError(msg.format(name=type(self).__name__, dtype=dtype)) + + @classmethod + def _concat_same_type(cls, to_concat): + """ + Concatenate multiple IntervalArray + + Parameters + ---------- + to_concat : sequence of IntervalArray + + Returns + ------- + IntervalArray + """ + closed = set(interval.closed for interval in to_concat) + if len(closed) != 1: + raise ValueError("Intervals must all be closed on the same side.") + closed = closed.pop() + + left = np.concatenate([interval.left for interval in to_concat]) + right = np.concatenate([interval.right for interval in to_concat]) + return cls._simple_new(left, right, closed=closed, copy=False) + + def _shallow_copy(self, left=None, right=None, closed=None): + """ + Return a new IntervalArray with the replacement attributes + + Parameters + ---------- + left : array-like + Values to be used for the left-side of the the intervals. + If None, the existing left and right values will be used. + + right : array-like + Values to be used for the right-side of the the intervals. + If None and left is IntervalArray-like, the left and right + of the IntervalArray-like will be used. + + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. If None, the existing closed will be used. + """ + if left is None: + + # no values passed + left, right = self.left, self.right + + elif right is None: + + # only single value passed, could be an IntervalArray + # or array of Intervals + if not isinstance(left, (type(self), ABCIntervalIndex)): + left = type(self)(left) + + left, right = left.left, left.right + else: + + # both left and right are values + pass + + closed = closed or self.closed + return self._simple_new( + left, right, closed=closed, verify_integrity=False) + + def copy(self, deep=False): + """ + Return a copy of the array. + + Parameters + ---------- + deep : bool, default False + Also copy the underlying data backing this array. + + Returns + ------- + IntervalArray + """ + left = self.left.copy(deep=True) if deep else self.left + right = self.right.copy(deep=True) if deep else self.right + closed = self.closed + # TODO: Could skip verify_integrity here. + return type(self).from_arrays(left, right, closed=closed) + + def _formatting_values(self): + return np.asarray(self) + + def isna(self): + return isna(self.left) + + @property + def nbytes(self): + return self.left.nbytes + self.right.nbytes + + @property + def size(self): + # Avoid materializing self.values + return self.left.size + + @property + def shape(self): + return self.left.shape + + @property + def itemsize(self): + return self.left.itemsize + self.right.itemsize + + def take(self, indices, allow_fill=False, fill_value=None, axis=None, + **kwargs): + """ + Take elements from the IntervalArray. + + Parameters + ---------- + indices : sequence of integers + Indices to be taken. + + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : Interval or NA, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + axis : any, default None + Present for compat with IntervalIndex; does nothing. + + Returns + ------- + IntervalArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + """ + from pandas.core.algorithms import take + + nv.validate_take(tuple(), kwargs) + + fill_left = fill_right = fill_value + if allow_fill: + if fill_value is None: + fill_left = fill_right = self.left._na_value + elif is_interval(fill_value): + self._check_closed_matches(fill_value, name='fill_value') + fill_left, fill_right = fill_value.left, fill_value.right + elif not is_scalar(fill_value) and notna(fill_value): + msg = ("'IntervalArray.fillna' only supports filling with a " + "'scalar pandas.Interval or NA'. Got a '{}' instead." + .format(type(fill_value).__name__)) + raise ValueError(msg) + + left_take = take(self.left, indices, + allow_fill=allow_fill, fill_value=fill_left) + right_take = take(self.right, indices, + allow_fill=allow_fill, fill_value=fill_right) + + return self._shallow_copy(left_take, right_take) + + def value_counts(self, dropna=True): + """ + Returns a Series containing counts of each interval. + + Parameters + ---------- + dropna : boolean, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + # TODO: implement this is a non-naive way! + from pandas.core.algorithms import value_counts + return value_counts(np.asarray(self), dropna=dropna) + + # Formatting + + def _format_data(self): + + # TODO: integrate with categorical and make generic + # name argument is unused here; just for compat with base / categorical + n = len(self) + max_seq_items = min((get_option( + 'display.max_seq_items') or n) // 10, 10) + + formatter = str + + if n == 0: + summary = '[]' + elif n == 1: + first = formatter(self[0]) + summary = '[{first}]'.format(first=first) + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary = '[{first}, {last}]'.format(first=first, last=last) + else: + + if n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in self[:n]] + tail = [formatter(x) for x in self[-n:]] + summary = '[{head} ... {tail}]'.format( + head=', '.join(head), tail=', '.join(tail)) + else: + head = [] + tail = [formatter(x) for x in self] + summary = '[{tail}]'.format(tail=', '.join(tail)) + + return summary + + def __repr__(self): + tpl = textwrap.dedent("""\ + {cls}({data}, + {lead}closed='{closed}', + {lead}dtype='{dtype}')""") + return tpl.format(cls=self.__class__.__name__, + data=self._format_data(), + lead=' ' * len(self.__class__.__name__) + ' ', + closed=self.closed, dtype=self.dtype) + + def _format_space(self): + space = ' ' * (len(self.__class__.__name__) + 1) + return "\n{space}".format(space=space) + + @property + def left(self): + """ + Return the left endpoints of each Interval in the IntervalArray as + an Index + """ + return self._left + + @property + def right(self): + """ + Return the right endpoints of each Interval in the IntervalArray as + an Index + """ + return self._right + + @property + def closed(self): + """ + Whether the intervals are closed on the left-side, right-side, both or + neither + """ + return self._closed + + _interval_shared_docs['set_closed'] = """ + Return an %(klass)s identical to the current one, but closed on the + specified side + + .. versionadded:: 0.24.0 + + Parameters + ---------- + closed : {'left', 'right', 'both', 'neither'} + Whether the intervals are closed on the left-side, right-side, both + or neither. + + Returns + ------- + new_index : %(klass)s + + Examples + -------- + >>> index = pd.interval_range(0, 3) + >>> index + %(klass)s([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') + >>> index.set_closed('both') + %(klass)s([[0, 1], [1, 2], [2, 3]] + closed='both', + dtype='interval[int64]') + """ + + @Appender(_interval_shared_docs['set_closed'] % _shared_docs_kwargs) + def set_closed(self, closed): + if closed not in _VALID_CLOSED: + msg = "invalid option for 'closed': {closed}" + raise ValueError(msg.format(closed=closed)) + + return self._shallow_copy(closed=closed) + + @property + def length(self): + """ + Return an Index with entries denoting the length of each Interval in + the IntervalArray + """ + try: + return self.right - self.left + except TypeError: + # length not defined for some types, e.g. string + msg = ('IntervalArray contains Intervals without defined length, ' + 'e.g. Intervals with string endpoints') + raise TypeError(msg) + + @property + def mid(self): + """ + Return the midpoint of each Interval in the IntervalArray as an Index + """ + try: + return 0.5 * (self.left + self.right) + except TypeError: + # datetime safe version + return self.left + 0.5 * self.length + + @property + def is_non_overlapping_monotonic(self): + """ + Return True if the IntervalArray is non-overlapping (no Intervals share + points) and is either monotonic increasing or monotonic decreasing, + else False + """ + # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) + # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) + # we already require left <= right + + # strict inequality for closed == 'both'; equality implies overlapping + # at a point when both sides of intervals are included + if self.closed == 'both': + return bool((self.right[:-1] < self.left[1:]).all() or + (self.left[:-1] > self.right[1:]).all()) + + # non-strict inequality when closed != 'both'; at least one side is + # not included in the intervals, so equality does not imply overlapping + return bool((self.right[:-1] <= self.left[1:]).all() or + (self.left[:-1] >= self.right[1:]).all()) + + # Conversion + def __array__(self, dtype=None): + """ + Return the IntervalArray's data as a numpy array of Interval + objects (with dtype='object') + """ + left = self.left + right = self.right + mask = self.isna() + closed = self._closed + + result = np.empty(len(left), dtype=object) + for i in range(len(left)): + if mask[i]: + result[i] = np.nan + else: + result[i] = Interval(left[i], right[i], closed) + return result + + _interval_shared_docs['to_tuples'] = """\ + Return an %(return_type)s of tuples of the form (left, right) + + Parameters + ---------- + na_tuple : boolean, default True + Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA + value itself if False, ``nan``. + + ..versionadded:: 0.23.0 + + Returns + ------- + tuples: %(return_type)s + %(examples)s\ + """ + + @Appender(_interval_shared_docs['to_tuples'] % dict( + return_type='ndarray', + examples='', + )) + def to_tuples(self, na_tuple=True): + tuples = com._asarray_tuplesafe(zip(self.left, self.right)) + if not na_tuple: + # GH 18756 + tuples = np.where(~self.isna(), tuples, np.nan) + return tuples + + def repeat(self, repeats, **kwargs): + """ + Repeat elements of an IntervalArray. + + Returns a new IntervalArray where each element of the current + IntervalArray is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int + The number of repetitions for each element. + + **kwargs + Additional keywords have no effect but might be accepted for + compatibility with numpy. + + Returns + ------- + IntervalArray + Newly created IntervalArray with repeated elements. + + See Also + -------- + Index.repeat : Equivalent function for Index + Series.repeat : Equivalent function for Series + numpy.repeat : Underlying implementation + """ + left_repeat = self.left.repeat(repeats, **kwargs) + right_repeat = self.right.repeat(repeats, **kwargs) + return self._shallow_copy(left=left_repeat, right=right_repeat) + + +def maybe_convert_platform_interval(values): + """ + Try to do platform conversion, with special casing for IntervalArray. + Wrapper around maybe_convert_platform that alters the default return + dtype in certain cases to be compatible with IntervalArray. For example, + empty lists return with integer dtype instead of object dtype, which is + prohibited for IntervalArray. + + Parameters + ---------- + values : array-like + + Returns + ------- + array + """ + if isinstance(values, (list, tuple)) and len(values) == 0: + # GH 19016 + # empty lists/tuples get object dtype by default, but this is not + # prohibited for IntervalArray, so coerce to integer instead + return np.array([], dtype=np.int64) + elif is_categorical_dtype(values): + values = np.asarray(values) + + return maybe_convert_platform(values) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9f6813bc38464..feedc0ebd86f4 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -15,6 +15,7 @@ is_period_dtype, is_object_dtype, is_bool_dtype, + is_interval_dtype, is_dtype_equal, _NS_DTYPE, _TD_DTYPE) @@ -58,6 +59,8 @@ def get_dtype_kinds(l): typ = 'bool' elif is_period_dtype(dtype): typ = str(arr.dtype) + elif is_interval_dtype(dtype): + typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index de837efc235a0..fbce7dc28dfe0 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -690,14 +690,13 @@ class IntervalDtypeType(type): pass -class IntervalDtype(PandasExtensionDtype): +class IntervalDtype(PandasExtensionDtype, ExtensionDtype): """ A Interval duck-typed class, suitable for holding an interval THIS IS NOT A REAL NUMPY DTYPE """ name = 'interval' - type = IntervalDtypeType kind = None str = '|O08' base = np.dtype('O') @@ -751,6 +750,17 @@ def __new__(cls, subtype=None): cls._cache[str(subtype)] = u return u + @classmethod + def construct_array_type(cls): + """Return the array type associated with this dtype + + Returns + ------- + type + """ + from pandas.core.arrays import IntervalArray + return IntervalArray + @classmethod def construct_from_string(cls, string): """ @@ -765,6 +775,11 @@ def construct_from_string(cls, string): msg = "a string needs to be passed, got type {typ}" raise TypeError(msg.format(typ=type(string))) + @property + def type(self): + from pandas import Interval + return Interval + def __unicode__(self): if self.subtype is None: return "interval" diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index ad4588f254174..89a7f2ca53a09 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -10,7 +10,7 @@ from .common import (is_string_dtype, is_datetimelike, is_datetimelike_v_numeric, is_float_dtype, is_datetime64_dtype, is_datetime64tz_dtype, - is_timedelta64_dtype, is_interval_dtype, + is_timedelta64_dtype, is_period_dtype, is_complex_dtype, is_string_like_dtype, is_bool_dtype, @@ -196,10 +196,6 @@ def _isna_ndarraylike(obj): else: values = obj result = values.isna() - elif is_interval_dtype(values): - # TODO(IntervalArray): remove this if block - from pandas import IntervalIndex - result = IntervalIndex(obj).isna() elif is_string_dtype(dtype): # Working around NumPy ticket 1542 shape = values.shape diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ba60d10099948..6ca76139b70ca 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -272,7 +272,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, **kwargs) # interval - if is_interval_dtype(data) or is_interval_dtype(dtype): + if ((is_interval_dtype(data) or is_interval_dtype(dtype)) and + not is_object_dtype(dtype)): from .interval import IntervalIndex closed = kwargs.get('closed', None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6b9e9dc2f9377..9375a60d0964c 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,20 +1,17 @@ """ define the IntervalIndex """ +import textwrap +import warnings import numpy as np -import warnings -from pandas.core.dtypes.missing import notna, isna -from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex -from pandas.core.dtypes.dtypes import IntervalDtype -from pandas.core.dtypes.cast import ( - maybe_convert_platform, find_common_type, maybe_downcast_to_dtype) +from pandas.compat import add_metaclass +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, - is_categorical_dtype, - is_string_dtype, is_integer_dtype, is_float_dtype, is_interval_dtype, @@ -22,8 +19,7 @@ is_scalar, is_float, is_number, - is_integer, - pandas_dtype) + is_integer) from pandas.core.indexes.base import ( Index, _ensure_index, default_pprint, _index_shared_docs) @@ -31,26 +27,33 @@ from pandas._libs import Timestamp, Timedelta from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, - intervals_to_interval_bounds) +) from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex -from pandas.compat.numpy import function as nv import pandas.core.common as com from pandas.util._decorators import cache_readonly, Appender +from pandas.util._doctools import _WritableDoc +from pandas.util._exceptions import rewrite_exception from pandas.core.config import get_option from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import DateOffset import pandas.core.indexes.base as ibase +from pandas.core.arrays.interval import (IntervalArray, + _interval_shared_docs) + +_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='IntervalIndex', - target_klass='IntervalIndex or list of Intervals')) - - -_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) + target_klass='IntervalIndex or list of Intervals', + name=textwrap.dedent("""\ + name : object, optional + to be stored in the index. + """), + )) def _get_next_label(label): @@ -96,34 +99,6 @@ def _get_interval_closed_bounds(interval): return left, right -def maybe_convert_platform_interval(values): - """ - Try to do platform conversion, with special casing for IntervalIndex. - Wrapper around maybe_convert_platform that alters the default return - dtype in certain cases to be compatible with IntervalIndex. For example, - empty lists return with integer dtype instead of object dtype, which is - prohibited for IntervalIndex. - - Parameters - ---------- - values : array-like - - Returns - ------- - array - """ - if is_categorical_dtype(values): - # GH 21243/21253 - values = np.array(values) - - if isinstance(values, (list, tuple)) and len(values) == 0: - # GH 19016 - # empty lists/tuples get object dtype by default, but this is not - # prohibited for IntervalIndex, so coerce to integer instead - return np.array([], dtype=np.int64) - return maybe_convert_platform(values) - - def _new_IntervalIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have @@ -132,58 +107,16 @@ def _new_IntervalIndex(cls, d): return cls.from_arrays(**d) -class IntervalIndex(IntervalMixin, Index): - """ - Immutable Index implementing an ordered, sliceable set. IntervalIndex - represents an Index of Interval objects that are all closed on the same - side. - - .. versionadded:: 0.20.0 - - .. warning:: - - The indexing behaviors are provisional and may change in - a future version of pandas. - - Parameters - ---------- - data : array-like (1-dimensional) - Array-like containing Interval objects from which to build the - IntervalIndex - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both or - neither. - name : object, optional - Name to be stored in the index. - copy : boolean, default False - Copy the meta-data - dtype : dtype or None, default None - If None, dtype will be inferred - - .. versionadded:: 0.23.0 - - Attributes - ---------- - closed - is_non_overlapping_monotonic - left - length - mid - right - values - - Methods - ------- - contains - from_arrays - from_breaks - from_tuples - get_indexer - get_loc - set_closed +@Appender(_interval_shared_docs['class'] % dict( + klass="IntervalIndex", + summary="Immutable index of intervals that are closed on the same side.", + name=_index_doc_kwargs['name'], + versionadded="0.20.0", + extra_methods="contains\n", + examples=textwrap.dedent("""\ Examples - --------- + -------- A new ``IntervalIndex`` is typically constructed using :func:`interval_range`: @@ -197,21 +130,11 @@ class IntervalIndex(IntervalMixin, Index): See further examples in the doc strings of ``interval_range`` and the mentioned constructor methods. + """), - Notes - ------ - See the `user guide - <http://pandas.pydata.org/pandas-docs/stable/advanced.html#intervalindex>`_ - for more. - - See Also - -------- - Index : The base pandas Index type - Interval : A bounded slice-like interval; the elements of an IntervalIndex - interval_range : Function to create a fixed frequency IntervalIndex - cut, qcut : Convert arrays of continuous data into Categoricals/Series of - Intervals - """ +)) +@add_metaclass(_WritableDoc) +class IntervalIndex(IntervalMixin, Index): _typ = 'intervalindex' _comparables = ['name'] _attributes = ['name', 'closed'] @@ -219,131 +142,50 @@ class IntervalIndex(IntervalMixin, Index): # we would like our indexing holder to defer to us _defer_to_indexing = True + # Immutable, so we are able to cache computations like isna in '_mask' _mask = None def __new__(cls, data, closed=None, dtype=None, copy=False, name=None, fastpath=False, verify_integrity=True): if fastpath: - return cls._simple_new(data.left, data.right, closed, name, - copy=copy, verify_integrity=False) + return cls._simple_new(data, name) if name is None and hasattr(data, 'name'): name = data.name - if isinstance(data, IntervalIndex): - left = data.left - right = data.right - closed = closed or data.closed - else: - - # don't allow scalars - if is_scalar(data): - cls._scalar_data_error(data) - - data = maybe_convert_platform_interval(data) - left, right, infer_closed = intervals_to_interval_bounds( - data, validate_closed=closed is None) - closed = closed or infer_closed + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype, + fastpath=fastpath, + verify_integrity=verify_integrity) - return cls._simple_new(left, right, closed, name, copy=copy, - dtype=dtype, verify_integrity=verify_integrity) + return cls._simple_new(array, name) @classmethod - def _simple_new(cls, left, right, closed=None, name=None, copy=False, - dtype=None, verify_integrity=True): - result = IntervalMixin.__new__(cls) + def _simple_new(cls, array, name, closed=None): + """ + Construct from an IntervalArray - closed = closed or 'right' - left = _ensure_index(left, copy=copy) - right = _ensure_index(right, copy=copy) - - if dtype is not None: - # GH 19262: dtype must be an IntervalDtype to override inferred - dtype = pandas_dtype(dtype) - if not is_interval_dtype(dtype): - msg = 'dtype must be an IntervalDtype, got {dtype}' - raise TypeError(msg.format(dtype=dtype)) - elif dtype.subtype is not None: - left = left.astype(dtype.subtype) - right = right.astype(dtype.subtype) - - # coerce dtypes to match if needed - if is_float_dtype(left) and is_integer_dtype(right): - right = right.astype(left.dtype) - elif is_float_dtype(right) and is_integer_dtype(left): - left = left.astype(right.dtype) - - if type(left) != type(right): - msg = ('must not have differing left [{ltype}] and right ' - '[{rtype}] types') - raise ValueError(msg.format(ltype=type(left).__name__, - rtype=type(right).__name__)) - elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): - # GH 19016 - msg = ('category, object, and string subtypes are not supported ' - 'for IntervalIndex') - raise TypeError(msg) - elif isinstance(left, ABCPeriodIndex): - msg = 'Period dtypes are not supported, use a PeriodIndex instead' - raise ValueError(msg) - elif (isinstance(left, ABCDatetimeIndex) and - str(left.tz) != str(right.tz)): - msg = ("left and right must have the same time zone, got " - "'{left_tz}' and '{right_tz}'") - raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz)) - - result._left = left - result._right = right - result._closed = closed + Parameters + ---------- + array : IntervalArray + name : str + Attached as result.name + closed : Any + Ignored. + """ + result = IntervalMixin.__new__(cls) + result._data = array result.name = name - if verify_integrity: - result._validate() result._reset_identity() return result @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, left=None, right=None, **kwargs): - if left is None: - - # no values passed - left, right = self.left, self.right - - elif right is None: - - # only single value passed, could be an IntervalIndex - # or array of Intervals - if not isinstance(left, IntervalIndex): - left = self._constructor(left) - - left, right = left.left, left.right - else: - - # both left and right are values - pass - + result = self._data._shallow_copy(left=left, right=right) attributes = self._get_attributes_dict() attributes.update(kwargs) - attributes['verify_integrity'] = False - return self._simple_new(left, right, **attributes) - - def _validate(self): - """ - Verify that the IntervalIndex is valid. - """ - if self.closed not in _VALID_CLOSED: - raise ValueError("invalid option for 'closed': {closed}" - .format(closed=self.closed)) - if len(self.left) != len(self.right): - raise ValueError('left and right must have the same length') - left_mask = notna(self.left) - right_mask = notna(self.right) - if not (left_mask == right_mask).all(): - raise ValueError('missing values must be missing in the same ' - 'location both left and right sides') - if not (self.left[left_mask] <= self.right[left_mask]).all(): - raise ValueError('left side of interval must be <= right side') - self._mask = ~left_mask + return self._simple_new(result, **attributes) @cache_readonly def hasnans(self): @@ -412,272 +254,60 @@ def contains(self, key): return False @classmethod + @Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs) def from_breaks(cls, breaks, closed='right', name=None, copy=False, dtype=None): - """ - Construct an IntervalIndex from an array of splits - - Parameters - ---------- - breaks : array-like (1-dimensional) - Left and right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither. - name : object, optional - Name to be stored in the index. - copy : boolean, default False - copy the data - dtype : dtype or None, default None - If None, dtype will be inferred - - .. versionadded:: 0.23.0 - - Examples - -------- - >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) - IntervalIndex([(0, 1], (1, 2], (2, 3]] - closed='right', - dtype='interval[int64]') - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex - IntervalIndex.from_arrays : Construct an IntervalIndex from a left and - right array - IntervalIndex.from_tuples : Construct an IntervalIndex from a - list/array of tuples - """ - breaks = maybe_convert_platform_interval(breaks) - - return cls.from_arrays(breaks[:-1], breaks[1:], closed, - name=name, copy=copy, dtype=dtype) + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy, + dtype=dtype) + return cls._simple_new(array, name=name) @classmethod + @Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs) def from_arrays(cls, left, right, closed='right', name=None, copy=False, dtype=None): - """ - Construct from two arrays defining the left and right bounds. - - Parameters - ---------- - left : array-like (1-dimensional) - Left bounds for each interval. - right : array-like (1-dimensional) - Right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither. - name : object, optional - Name to be stored in the index. - copy : boolean, default False - Copy the data. - dtype : dtype, optional - If None, dtype will be inferred. - - .. versionadded:: 0.23.0 - - Returns - ------- - index : IntervalIndex - - Notes - ----- - Each element of `left` must be less than or equal to the `right` - element at the same position. If an element is missing, it must be - missing in both `left` and `right`. A TypeError is raised when - using an unsupported type for `left` or `right`. At the moment, - 'category', 'object', and 'string' subtypes are not supported. - - Raises - ------ - ValueError - When a value is missing in only one of `left` or `right`. - When a value in `left` is greater than the corresponding value - in `right`. - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - IntervalIndex.from_breaks : Construct an IntervalIndex from an array of - splits. - IntervalIndex.from_tuples : Construct an IntervalIndex from a - list/array of tuples. - - Examples - -------- - >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) - IntervalIndex([(0, 1], (1, 2], (2, 3]] - closed='right', - dtype='interval[int64]') - - If you want to segment different groups of people based on - ages, you can apply the method as follows: - - >>> ages = pd.IntervalIndex.from_arrays([0, 2, 13], - ... [2, 13, 19], closed='left') - >>> ages - IntervalIndex([[0, 2), [2, 13), [13, 19)] - closed='left', - dtype='interval[int64]') - >>> s = pd.Series(['baby', 'kid', 'teen'], ages) - >>> s - [0, 2) baby - [2, 13) kid - [13, 19) teen - dtype: object - - Values may be missing, but they must be missing in both arrays. - - >>> pd.IntervalIndex.from_arrays([0, np.nan, 13], - ... [2, np.nan, 19]) - IntervalIndex([(0.0, 2.0], nan, (13.0, 19.0]] - closed='right', - dtype='interval[float64]') - """ - left = maybe_convert_platform_interval(left) - right = maybe_convert_platform_interval(right) - - return cls._simple_new(left, right, closed, name=name, copy=copy, - dtype=dtype, verify_integrity=True) + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray.from_arrays(left, right, closed, copy=copy, + dtype=dtype) + return cls._simple_new(array, name=name) @classmethod + @Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs) def from_intervals(cls, data, closed=None, name=None, copy=False, dtype=None): - """ - Construct an IntervalIndex from a 1d array of Interval objects - - .. deprecated:: 0.23.0 - - Parameters - ---------- - data : array-like (1-dimensional) - Array of Interval objects. All intervals must be closed on the same - sides. - name : object, optional - Name to be stored in the index. - copy : boolean, default False - by-default copy the data, this is compat only and ignored - dtype : dtype or None, default None - If None, dtype will be inferred - - .. versionadded:: 0.23.0 - - Examples - -------- - >>> pd.IntervalIndex.from_intervals([pd.Interval(0, 1), - ... pd.Interval(1, 2)]) - IntervalIndex([(0, 1], (1, 2]] - closed='right', dtype='interval[int64]') - - The generic Index constructor work identically when it infers an array - of all intervals: - - >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) - IntervalIndex([(0, 1], (1, 2]] - closed='right', dtype='interval[int64]') - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex - IntervalIndex.from_arrays : Construct an IntervalIndex from a left and - right array - IntervalIndex.from_breaks : Construct an IntervalIndex from an array of - splits - IntervalIndex.from_tuples : Construct an IntervalIndex from a - list/array of tuples - """ msg = ('IntervalIndex.from_intervals is deprecated and will be ' - 'removed in a future version; use IntervalIndex(...) instead') + 'removed in a future version; Use IntervalIndex(...) instead') warnings.warn(msg, FutureWarning, stacklevel=2) - return cls(data, closed=closed, name=name, copy=copy, dtype=dtype) + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype) + + if name is None and isinstance(data, cls): + name = data.name + + return cls._simple_new(array, name=name) @classmethod + @Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs) def from_tuples(cls, data, closed='right', name=None, copy=False, dtype=None): - """ - Construct an IntervalIndex from a list/array of tuples - - Parameters - ---------- - data : array-like (1-dimensional) - Array of tuples - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither. - name : object, optional - Name to be stored in the index. - copy : boolean, default False - by-default copy the data, this is compat only and ignored - dtype : dtype or None, default None - If None, dtype will be inferred - - .. versionadded:: 0.23.0 - - Examples - -------- - >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) - IntervalIndex([(0, 1], (1, 2]], - closed='right', dtype='interval[int64]') - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex - IntervalIndex.from_arrays : Construct an IntervalIndex from a left and - right array - IntervalIndex.from_breaks : Construct an IntervalIndex from an array of - splits - """ - if len(data): - left, right = [], [] - else: - left = right = data - - for d in data: - if isna(d): - lhs = rhs = np.nan - else: - try: - # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] - lhs, rhs = d - except ValueError: - msg = ('IntervalIndex.from_tuples requires tuples of ' - 'length 2, got {tpl}').format(tpl=d) - raise ValueError(msg) - except TypeError: - msg = ('IntervalIndex.from_tuples received an invalid ' - 'item, {tpl}').format(tpl=d) - raise TypeError(msg) - left.append(lhs) - right.append(rhs) - - return cls.from_arrays(left, right, closed, name=name, copy=False, - dtype=dtype) - - def to_tuples(self, na_tuple=True): - """ - Return an Index of tuples of the form (left, right) - - Parameters - ---------- - na_tuple : boolean, default True - Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA - value itself if False, ``nan``. - - .. versionadded:: 0.23.0 - + with rewrite_exception("IntervalArray", cls.__name__): + arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, + dtype=dtype) + return cls._simple_new(arr, name=name) + + @Appender(_interval_shared_docs['to_tuples'] % dict( + return_type="Index", + examples=""" Examples -------- >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3]) >>> idx.to_tuples() Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object') >>> idx.to_tuples(na_tuple=False) - Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') - """ - tuples = com._asarray_tuplesafe(zip(self.left, self.right)) - if not na_tuple: - # GH 18756 - tuples = np.where(~self._isnan, tuples, np.nan) + Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""", + )) + def to_tuples(self, na_tuple=True): + tuples = self._data.to_tuples(na_tuple=na_tuple) return Index(tuples) @cache_readonly @@ -691,7 +321,7 @@ def left(self): Return the left endpoints of each Interval in the IntervalIndex as an Index """ - return self._left + return self._data._left @property def right(self): @@ -699,7 +329,7 @@ def right(self): Return the right endpoints of each Interval in the IntervalIndex as an Index """ - return self._right + return self._data._right @property def closed(self): @@ -707,42 +337,17 @@ def closed(self): Whether the intervals are closed on the left-side, right-side, both or neither """ - return self._closed + return self._data._closed + @Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs) def set_closed(self, closed): - """ - Return an IntervalIndex identical to the current one, but closed on the - specified side - - .. versionadded:: 0.24.0 - - Parameters - ---------- - closed : {'left', 'right', 'both', 'neither'} - Whether the intervals are closed on the left-side, right-side, both - or neither. - - Returns - ------- - new_index : IntervalIndex - - Examples - -------- - >>> index = pd.interval_range(0, 3) - >>> index - IntervalIndex([(0, 1], (1, 2], (2, 3]] - closed='right', - dtype='interval[int64]') - >>> index.set_closed('both') - IntervalIndex([[0, 1], [1, 2], [2, 3]] - closed='both', - dtype='interval[int64]') - """ if closed not in _VALID_CLOSED: msg = "invalid option for 'closed': {closed}" raise ValueError(msg.format(closed=closed)) - return self._shallow_copy(closed=closed) + # return self._shallow_copy(closed=closed) + array = self._data.set_closed(closed) + return self._simple_new(array, self.name) @property def length(self): @@ -750,23 +355,22 @@ def length(self): Return an Index with entries denoting the length of each Interval in the IntervalIndex """ - try: - return self.right - self.left - except TypeError: - # length not defined for some types, e.g. string - msg = ('IntervalIndex contains Intervals without defined length, ' - 'e.g. Intervals with string endpoints') - raise TypeError(msg) + return self._data.length @property def size(self): - # Avoid materializing self.values - return self.left.size + # Avoid materializing ndarray[Interval] + return self._data.size @property def shape(self): - # Avoid materializing self.values - return self.left.shape + # Avoid materializing ndarray[Interval] + return self._data.shape + + @property + def itemsize(self): + # Avoid materializing ndarray[Interval] + return self._data.itemsize def __len__(self): return len(self.left) @@ -774,13 +378,20 @@ def __len__(self): @cache_readonly def values(self): """ - Return the IntervalIndex's data as a numpy array of Interval - objects (with dtype='object') + Return the IntervalIndex's data as an IntervalArray. """ + return self._data + + @cache_readonly + def _values(self): + return self._data + + @cache_readonly + def _ndarray_values(self): left = self.left right = self.right mask = self._isnan - closed = self._closed + closed = self.closed result = np.empty(len(left), dtype=object) for i in range(len(left)): @@ -792,15 +403,12 @@ def values(self): def __array__(self, result=None): """ the array interface, return my values """ - return self.values + return self._ndarray_values def __array_wrap__(self, result, context=None): # we don't want the superclass implementation return result - def _array_values(self): - return self.values - def __reduce__(self): d = dict(left=self.left, right=self.right) @@ -809,30 +417,25 @@ def __reduce__(self): @Appender(_index_shared_docs['copy']) def copy(self, deep=False, name=None): - left = self.left.copy(deep=True) if deep else self.left - right = self.right.copy(deep=True) if deep else self.right - name = name if name is not None else self.name - closed = self.closed - return type(self).from_arrays(left, right, closed=closed, name=name) + array = self._data.copy(deep=deep) + attributes = self._get_attributes_dict() + if name is not None: + attributes.update(name=name) + + return self._simple_new(array, **attributes) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): - dtype = pandas_dtype(dtype) - if is_interval_dtype(dtype) and dtype != self.dtype: - try: - new_left = self.left.astype(dtype.subtype) - new_right = self.right.astype(dtype.subtype) - except TypeError: - msg = ('Cannot convert {dtype} to {new_dtype}; subtypes are ' - 'incompatible') - raise TypeError(msg.format(dtype=self.dtype, new_dtype=dtype)) - return self._shallow_copy(new_left, new_right) + with rewrite_exception('IntervalArray', self.__class__.__name__): + new_values = self.values.astype(dtype, copy=copy) + if is_interval_dtype(new_values): + return self._shallow_copy(new_values.left, new_values.right) return super(IntervalIndex, self).astype(dtype, copy=copy) @cache_readonly def dtype(self): """Return the dtype object of the underlying data""" - return IntervalDtype(self.left.dtype.name) + return self._data.dtype @property def inferred_type(self): @@ -851,11 +454,7 @@ def mid(self): """ Return the midpoint of each Interval in the IntervalIndex as an Index """ - try: - return 0.5 * (self.left + self.right) - except TypeError: - # datetime safe version - return self.left + 0.5 * self.length + return self._data.mid @cache_readonly def is_monotonic(self): @@ -890,25 +489,7 @@ def is_unique(self): @cache_readonly def is_non_overlapping_monotonic(self): - """ - Return True if the IntervalIndex is non-overlapping (no Intervals share - points) and is either monotonic increasing or monotonic decreasing, - else False - """ - # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) - # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) - # we already require left <= right - - # strict inequality for closed == 'both'; equality implies overlapping - # at a point when both sides of intervals are included - if self.closed == 'both': - return bool((self.right[:-1] < self.left[1:]).all() or - (self.left[:-1] > self.right[1:]).all()) - - # non-strict inequality when closed != 'both'; at least one side is - # not included in the intervals, so equality does not imply overlapping - return bool((self.right[:-1] <= self.left[1:]).all() or - (self.left[:-1] >= self.right[1:]).all()) + return self._data.is_non_overlapping_monotonic @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): @@ -1299,33 +880,10 @@ def _concat_same_dtype(self, to_concat, name): @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): - nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) - left, right = self.left, self.right - - if fill_value is None: - fill_value = self._na_value - mask = indices == -1 - - if not mask.any(): - # we won't change dtype here in this case - # if we don't need - allow_fill = False - - taker = lambda x: x.take(indices, allow_fill=allow_fill, - fill_value=fill_value) - - try: - new_left = taker(left) - new_right = taker(right) - except ValueError: - - # we need to coerce; migth have NA's in an - # integer dtype - new_left = taker(left.astype(float)) - new_right = taker(right.astype(float)) - - return self._shallow_copy(new_left, new_right) + result = self._data.take(indices, axis=axis, allow_fill=allow_fill, + fill_value=fill_value, **kwargs) + attributes = self._get_attributes_dict() + return self._simple_new(result, **attributes) def __getitem__(self, value): mask = self._isnan[value] @@ -1385,7 +943,7 @@ def _format_data(self, name=None): tail = [formatter(x) for x in self] summary = '[{tail}]'.format(tail=', '.join(tail)) - return summary + self._format_space() + return summary + ',' + self._format_space() def _format_attrs(self): attrs = [('closed', repr(self.closed))] diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 7edb5b16ce77a..a161413594f95 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -205,7 +205,9 @@ def _hash_categorical(c, encoding, hash_key): ------- ndarray of hashed values array, same size as len(c) """ - hashed = hash_array(c.categories.values, encoding, hash_key, + # Convert ExtensionArrays to ndarrays + values = np.asarray(c.categories.values) + hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 03a5e8528f72d..7a1e72637f4ce 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -57,6 +57,7 @@ Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT, Categorical, CategoricalIndex, IntervalIndex, Interval, TimedeltaIndex) +from pandas.core.arrays import IntervalArray from pandas.core.sparse.api import SparseSeries, SparseDataFrame from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.generic import NDFrame @@ -402,13 +403,17 @@ def encode(obj): u'freq': u_safe(getattr(obj, 'freqstr', None)), u'tz': tz, u'compress': compressor} - elif isinstance(obj, IntervalIndex): - return {u'typ': u'interval_index', + elif isinstance(obj, (IntervalIndex, IntervalArray)): + if isinstance(obj, IntervalIndex): + typ = u'interval_index' + else: + typ = u'interval_array' + return {u'typ': typ, u'klass': u(obj.__class__.__name__), u'name': getattr(obj, 'name', None), - u'left': getattr(obj, '_left', None), - u'right': getattr(obj, '_right', None), - u'closed': getattr(obj, '_closed', None)} + u'left': getattr(obj, 'left', None), + u'right': getattr(obj, 'right', None), + u'closed': getattr(obj, 'closed', None)} elif isinstance(obj, MultiIndex): return {u'typ': u'multi_index', u'klass': u(obj.__class__.__name__), @@ -610,7 +615,7 @@ def decode(obj): result = result.tz_localize('UTC').tz_convert(tz) return result - elif typ == u'interval_index': + elif typ in (u'interval_index', 'interval_array'): return globals()[obj[u'klass']].from_arrays(obj[u'left'], obj[u'right'], obj[u'closed'], diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 62e0f1cb717f0..02ac7fc7d5ed7 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -552,10 +552,8 @@ def test_basic(self): s = Series(ii, name='A') - # dtypes - # series results in object dtype currently, - assert not is_interval_dtype(s.dtype) - assert not is_interval_dtype(s) + assert is_interval_dtype(s.dtype) + assert is_interval_dtype(s) def test_basic_dtype(self): assert is_interval_dtype('interval[int64]') diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 8fd3d1a57f6c8..0832e9f7d08df 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -161,6 +161,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests): pass +class TestSetitem(BaseDecimal, base.BaseSetitemTests): + pass + + # TODO(extension) @pytest.mark.xfail(reason=( "raising AssertionError as this is not implemented, " diff --git a/pandas/tests/extension/interval/__init__.py b/pandas/tests/extension/interval/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/interval/test_interval.py b/pandas/tests/extension/interval/test_interval.py new file mode 100644 index 0000000000000..a10a56ddfdfac --- /dev/null +++ b/pandas/tests/extension/interval/test_interval.py @@ -0,0 +1,193 @@ +import pytest +import numpy as np + +from pandas import Index, Interval, IntervalIndex, date_range, timedelta_range +from pandas.core.arrays import IntervalArray +from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.tests.extension import base +import pandas.util.testing as tm + + +def make_data(): + N = 100 + left = np.random.uniform(size=N).cumsum() + right = left + np.random.uniform(size=N) + return [Interval(l, r) for l, r in zip(left, right)] + + +@pytest.fixture(params=[ + (Index([0, 2, 4]), Index([1, 3, 5])), + (Index([0., 1., 2.]), Index([1., 2., 3.])), + (timedelta_range('0 days', periods=3), + timedelta_range('1 day', periods=3)), + (date_range('20170101', periods=3), date_range('20170102', periods=3)), + (date_range('20170101', periods=3, tz='US/Eastern'), + date_range('20170102', periods=3, tz='US/Eastern'))], + ids=lambda x: str(x[0].dtype)) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +@pytest.fixture +def dtype(): + return IntervalDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + return IntervalArray(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return IntervalArray.from_tuples([None, (0, 1)]) + + +@pytest.fixture +def data_repeated(): + """Return different versions of data for count times""" + def gen(count): + for _ in range(count): + yield IntervalArray(make_data()) + yield gen + + +@pytest.fixture +def data_for_sorting(): + return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)]) + + +@pytest.fixture +def data_missing_for_sorting(): + return IntervalArray.from_tuples([(1, 2), None, (0, 1)]) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(): + a = (0, 1) + b = (1, 2) + c = (2, 3) + return IntervalArray.from_tuples([b, b, None, None, a, a, b, c]) + + +class BaseInterval(object): + pass + + +class TestDtype(BaseInterval, base.BaseDtypeTests): + + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is IntervalArray + + +class TestCasting(BaseInterval, base.BaseCastingTests): + pass + + +class TestConstructors(BaseInterval, base.BaseConstructorsTests): + pass + + +class TestGetitem(BaseInterval, base.BaseGetitemTests): + pass + + +class TestGrouping(BaseInterval, base.BaseGroupbyTests): + pass + + +class TestInterface(BaseInterval, base.BaseInterfaceTests): + pass + + +class TestMethods(BaseInterval, base.BaseMethodsTests): + @pytest.mark.parametrize('repeats', [0, 1, 5]) + def test_repeat(self, left_right_dtypes, repeats): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right).repeat(repeats) + expected = IntervalArray.from_arrays( + left.repeat(repeats), right.repeat(repeats)) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize('bad_repeats, msg', [ + (-1, 'negative dimensions are not allowed'), + ('foo', r'invalid literal for (int|long)\(\) with base 10')]) + def test_repeat_errors(self, bad_repeats, msg): + array = IntervalArray.from_breaks(range(4)) + with tm.assert_raises_regex(ValueError, msg): + array.repeat(bad_repeats) + + @pytest.mark.parametrize('new_closed', [ + 'left', 'right', 'both', 'neither']) + def test_set_closed(self, closed, new_closed): + # GH 21670 + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.skip(reason='addition is not defined for intervals') + def test_combine_add(self, data_repeated): + pass + + +class TestMissing(BaseInterval, base.BaseMissingTests): + # Index.fillna only accepts scalar `value`, so we have to skip all + # non-scalar fill tests. + unsupported_fill = pytest.mark.skip("Unsupported fillna option.") + + @unsupported_fill + def test_fillna_limit_pad(self): + pass + + @unsupported_fill + def test_fillna_series_method(self): + pass + + @unsupported_fill + def test_fillna_limit_backfill(self): + pass + + @unsupported_fill + def test_fillna_series(self): + pass + + def test_non_scalar_raises(self, data_missing): + msg = "Got a 'list' instead." + with tm.assert_raises_regex(TypeError, msg): + data_missing.fillna([1, 1]) + + +class TestReshaping(BaseInterval, base.BaseReshapingTests): + pass + + +class TestSetitem(BaseInterval, base.BaseSetitemTests): + + def test_set_na(self, left_right_dtypes): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right) + result[0] = np.nan + + expected_left = Index([left._na_value] + list(left[1:])) + expected_right = Index([right._na_value] + list(right[1:])) + expected = IntervalArray.from_arrays(expected_left, expected_right) + + self.assert_extension_array_equal(result, expected) + + +def test_repr_matches(): + idx = IntervalIndex.from_breaks([1, 2, 3]) + a = repr(idx) + b = repr(idx.values) + assert a.replace("Index", "Array") == b diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 589134632c7e9..44b818be84e31 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -70,7 +70,6 @@ def test_astype_no_copy(): @pytest.mark.parametrize('dtype', [ dtypes.DatetimeTZDtype('ns', 'US/Central'), dtypes.PeriodDtype("D"), - dtypes.IntervalDtype(), ]) def test_is_not_extension_array_dtype(dtype): assert not isinstance(dtype, dtypes.ExtensionDtype) @@ -79,6 +78,7 @@ def test_is_not_extension_array_dtype(dtype): @pytest.mark.parametrize('dtype', [ dtypes.CategoricalDtype(), + dtypes.IntervalDtype(), ]) def test_is_extension_array_dtype(dtype): assert isinstance(dtype, dtypes.ExtensionDtype) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index f78bd583288a4..bb82d5578481b 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -886,7 +886,7 @@ def test_hasnans_isnans(self): assert not idx.hasnans idx = index.copy() - values = idx.values + values = np.asarray(idx.values) if len(index) == 0: continue @@ -928,7 +928,7 @@ def test_fillna(self): idx.fillna([idx[0]]) idx = index.copy() - values = idx.values + values = np.asarray(idx.values) if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index 3745f79d7d65d..d46e19ef56dd0 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -8,6 +8,7 @@ Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical, CategoricalIndex, date_range, timedelta_range, period_range, notna) from pandas.compat import lzip +from pandas.core.arrays import IntervalArray from pandas.core.dtypes.common import is_categorical_dtype from pandas.core.dtypes.dtypes import IntervalDtype import pandas.core.common as com @@ -74,7 +75,7 @@ def test_constructor_nan(self, constructor, breaks, closed): assert result.closed == closed assert result.dtype.subtype == expected_subtype - tm.assert_numpy_array_equal(result.values, expected_values) + tm.assert_numpy_array_equal(result._ndarray_values, expected_values) @pytest.mark.parametrize('breaks', [ [], @@ -93,7 +94,7 @@ def test_constructor_empty(self, constructor, breaks, closed): assert result.empty assert result.closed == closed assert result.dtype.subtype == expected_subtype - tm.assert_numpy_array_equal(result.values, expected_values) + tm.assert_numpy_array_equal(result._ndarray_values, expected_values) @pytest.mark.parametrize('breaks', [ tuple('0123456789'), @@ -348,6 +349,17 @@ def test_override_inferred_closed(self, constructor, data, closed): result = constructor(data, closed=closed) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('values_constructor', [ + list, np.array, IntervalIndex, IntervalArray]) + def test_index_object_dtype(self, values_constructor): + # Index(intervals, dtype=object) is an Index (not an IntervalIndex) + intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)] + values = values_constructor(intervals) + result = Index(values, dtype=object) + + assert type(result) is Index + tm.assert_numpy_array_equal(result.values, np.array(values)) + class TestFromIntervals(TestClassConstructors): """ @@ -368,3 +380,7 @@ def test_deprecated(self): ivs = [Interval(0, 1), Interval(1, 2)] with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): IntervalIndex.from_intervals(ivs) + + @pytest.mark.skip(reason='parent class test that is not applicable') + def test_index_object_dtype(self): + pass diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 431833f2627d8..0dc5970c22803 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -50,7 +50,6 @@ def test_properties(self, closed): ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))] expected = np.array(ivs, dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) - tm.assert_numpy_array_equal(index.values, expected) # with nans index = self.create_index_with_nan(closed=closed) @@ -71,7 +70,6 @@ def test_properties(self, closed): for l, r in zip(expected_left, expected_right)] expected = np.array(ivs, dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) - tm.assert_numpy_array_equal(index.values, expected) @pytest.mark.parametrize('breaks', [ [1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608], @@ -136,7 +134,7 @@ def test_ensure_copied_data(self, closed): check_same='same') # by-definition make a copy - result = IntervalIndex(index.values, copy=False) + result = IntervalIndex(index._ndarray_values, copy=False) tm.assert_numpy_array_equal(index.left.values, result.left.values, check_same='copy') tm.assert_numpy_array_equal(index.right.values, result.right.values, @@ -978,6 +976,24 @@ def test_to_tuples_na(self, tuples, na_tuple): else: assert isna(result_na) + def test_nbytes(self): + # GH 19209 + left = np.arange(0, 4, dtype='i8') + right = np.arange(1, 5, dtype='i8') + + result = IntervalIndex.from_arrays(left, right).nbytes + expected = 64 # 4 * 8 * 2 + assert result == expected + + def test_itemsize(self): + # GH 19209 + left = np.arange(0, 4, dtype='i8') + right = np.arange(1, 5, dtype='i8') + + result = IntervalIndex.from_arrays(left, right).itemsize + expected = 16 # 8 * 2 + assert result == expected + @pytest.mark.parametrize('new_closed', [ 'left', 'right', 'both', 'neither']) def test_set_closed(self, name, closed, new_closed): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index a5d83c1c26948..31e5bd88523d2 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1200,7 +1200,8 @@ def test_iter_box(self): 'datetime64[ns, US/Central]'), (pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'), (pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'), - (pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'), + (pd.IntervalIndex.from_breaks([0, 1, 2]), pd.core.arrays.IntervalArray, + 'interval'), ]) def test_values_consistent(array, expected_type, dtype): l_values = pd.Series(array)._values @@ -1214,6 +1215,8 @@ def test_values_consistent(array, expected_type, dtype): tm.assert_index_equal(l_values, r_values) elif pd.api.types.is_categorical(l_values): tm.assert_categorical_equal(l_values, r_values) + elif pd.api.types.is_interval_dtype(l_values): + tm.assert_interval_array_equal(l_values, r_values) else: raise TypeError("Unexpected type {}".format(type(l_values))) diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index 95ea4658212e9..dee01ab6efff6 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import textwrap import os import pandas as pd import pytest @@ -820,6 +821,21 @@ def test_categorical_equal_message(self): tm.assert_categorical_equal(a, b) +class TestAssertIntervalArrayEqual(object): + def test_interval_array_equal_message(self): + a = pd.interval_range(0, periods=4).values + b = pd.interval_range(1, periods=4).values + + msg = textwrap.dedent("""\ + IntervalArray.left are different + + IntervalArray.left values are different \\(100.0 %\\) + \\[left\\]: Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\) + \\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)""") + with tm.assert_raises_regex(AssertionError, msg): + tm.assert_interval_array_equal(a, b) + + class TestRNGContext(object): def test_RNGContext(self): diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py index 667c5d9526563..c9e6e27363aed 100644 --- a/pandas/util/_doctools.py +++ b/pandas/util/_doctools.py @@ -163,6 +163,14 @@ def _make_table(self, ax, df, title, height=None): ax.axis('off') +class _WritableDoc(type): + # Remove this when Python2 support is dropped + # __doc__ is not mutable for new-style classes in Python2, which means + # we can't use @Appender to share class docstrings. This can be used + # with `add_metaclass` to make cls.__doc__ mutable. + pass + + if __name__ == "__main__": import matplotlib.pyplot as plt diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py new file mode 100644 index 0000000000000..953c8a43a21b8 --- /dev/null +++ b/pandas/util/_exceptions.py @@ -0,0 +1,16 @@ +import contextlib + + +@contextlib.contextmanager +def rewrite_exception(old_name, new_name): + """Rewrite the message of an exception.""" + try: + yield + except Exception as e: + msg = e.args[0] + msg = msg.replace(old_name, new_name) + args = (msg,) + if len(e.args) > 1: + args = args + e.args[1:] + e.args = args + raise diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 54ae8cfb3d39e..9697c991122dd 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -19,7 +19,7 @@ import numpy as np import pandas as pd -from pandas.core.arrays import ExtensionArray +from pandas.core.arrays import ExtensionArray, IntervalArray from pandas.core.dtypes.missing import array_equivalent from pandas.core.dtypes.common import ( is_datetimelike_v_numeric, @@ -885,7 +885,7 @@ def _get_ilevel_values(index, level): assert_attr_equal('freq', left, right, obj=obj) if (isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex)): - assert_attr_equal('closed', left, right, obj=obj) + assert_interval_array_equal(left.values, right.values) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): @@ -1023,6 +1023,31 @@ def assert_categorical_equal(left, right, check_dtype=True, assert_attr_equal('ordered', left, right, obj=obj) +def assert_interval_array_equal(left, right, exact='equiv', + obj='IntervalArray'): + """Test that two IntervalArrays are equivalent. + + Parameters + ---------- + left, right : IntervalArray + The IntervalArrays to compare. + exact : bool / string {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Int64Index as well. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message + """ + _check_isinstance(left, right, IntervalArray) + + assert_index_equal(left.left, right.left, exact=exact, + obj='{obj}.left'.format(obj=obj)) + assert_index_equal(left.right, right.right, exact=exact, + obj='{obj}.left'.format(obj=obj)) + assert_attr_equal('closed', left, right, obj=obj) + + def raise_assert_detail(obj, message, left, right, diff=None): if isinstance(left, np.ndarray): left = pprint_thing(left) @@ -1251,10 +1276,7 @@ def assert_series_equal(left, right, check_dtype=True, assert_numpy_array_equal(left.get_values(), right.get_values(), check_dtype=check_dtype) elif is_interval_dtype(left) or is_interval_dtype(right): - # TODO: big hack here - left = pd.IntervalIndex(left) - right = pd.IntervalIndex(right) - assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj)) + assert_interval_array_equal(left.values, right.values) elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and is_extension_array_dtype(right) and not is_categorical_dtype(right)):
Closes #19453 Closes #19209
https://api.github.com/repos/pandas-dev/pandas/pulls/20611
2018-04-04T16:49:54Z
2018-07-13T20:57:26Z
2018-07-13T20:57:26Z
2018-11-08T18:16:49Z
BUG/PERF: Fixed IntervalIndex.nbytes and itemsize
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fb63dc16249b2..f098202281ab8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -896,6 +896,7 @@ Performance Improvements - Improved performance of :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` (:issue:`11296`) - Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) - Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`) +- Improved performance of ``IntervalIndex.nbytes`` and ``IntervalIndex.itemsize`` (:issue:`19209`) .. _whatsnew_0230.docs: @@ -1062,6 +1063,7 @@ Indexing - Bug in ``Index`` subclasses constructors that ignore unexpected keyword arguments (:issue:`19348`) - Bug in :meth:`Index.difference` when taking difference of an ``Index`` with itself (:issue:`20040`) - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`). +- Bug in ``IntervalIndex.nbytes`` and ``IntervalIndex.itemsize`` underreporting memory usage (:issues:`19209`). MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 54800d0d76d2e..733443d3c9fda 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -728,6 +728,14 @@ def size(self): # Avoid materializing self.values return self.left.size + @property + def nbytes(self): + return self.left.nbytes + self.right.nbytes + + @property + def itemsize(self): + return self.left.itemsize + self.right.itemsize + @property def shape(self): # Avoid materializing self.values diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 71a6f78125004..273d950d98c6e 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -24,6 +24,7 @@ def name(request): class TestIntervalIndex(Base): _holder = IntervalIndex + _compat_props = ['shape', 'ndim', 'size'] def setup_method(self, method): self.index = IntervalIndex.from_arrays([0, 1], [1, 2]) @@ -964,3 +965,21 @@ def test_to_tuples_na(self, tuples, na_tuple): assert all(isna(x) for x in result_na) else: assert isna(result_na) + + def test_nbytes(self): + # GH 19209 + left = np.arange(0, 4, dtype='i8') + right = np.arange(1, 5, dtype='i8') + + result = IntervalIndex.from_arrays(left, right).nbytes + expected = 64 # 4 * 8 * 2 + assert result == expected + + def test_itemsize(self): + # GH 19209 + left = np.arange(0, 4, dtype='i8') + right = np.arange(1, 5, dtype='i8') + + result = IntervalIndex.from_arrays(left, right).itemsize + expected = 16 # 8 * 2 + assert result == expected
Avoid materializing the ndarray of Intervals. Just get from left and right instead. Closes https://github.com/pandas-dev/pandas/issues/19209
https://api.github.com/repos/pandas-dev/pandas/pulls/20600
2018-04-04T00:11:41Z
2018-04-04T18:43:31Z
null
2023-05-11T01:17:40Z
BUG: HDFStore failures on timezone-aware data
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fb63dc16249b2..661402e4f2f27 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1099,6 +1099,7 @@ I/O - Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`) - Bug in ``usecols`` parameter in :func:`pandas.io.read_csv` and :func:`pandas.io.read_table` where error is not raised correctly when passing a string. (:issue:`20529`) - Bug in :func:`HDFStore.keys` when reading a file with a softlink causes exception (:issue:`20523`) +- Bug in :class:`HDFStore` export of a :class:`Series` or an empty :class:`DataFrame` with timezone-aware data in fixed format (:issue:`20594`) Plotting ^^^^^^^^ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f9a496edb45a3..efd2e02e12c8b 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2695,30 +2695,24 @@ def write_array(self, key, value, items=None): _tables().ObjectAtom()) vlarr.append(value) else: - if empty_array: + if is_datetime64_dtype(value.dtype): + self._handle.create_array(self.group, key, value.view('i8')) + getattr(self.group, key)._v_attrs.value_type = 'datetime64' + elif is_datetime64tz_dtype(value.dtype): + # store as UTC + # with a zone + self._handle.create_array(self.group, key, value.asi8) + + node = getattr(self.group, key) + node._v_attrs.tz = _get_tz(value.tz) + node._v_attrs.value_type = 'datetime64' + elif is_timedelta64_dtype(value.dtype): + self._handle.create_array(self.group, key, value.view('i8')) + getattr(self.group, key)._v_attrs.value_type = 'timedelta64' + elif empty_array: self.write_array_empty(key, value) else: - if is_datetime64_dtype(value.dtype): - self._handle.create_array( - self.group, key, value.view('i8')) - getattr( - self.group, key)._v_attrs.value_type = 'datetime64' - elif is_datetime64tz_dtype(value.dtype): - # store as UTC - # with a zone - self._handle.create_array(self.group, key, - value.asi8) - - node = getattr(self.group, key) - node._v_attrs.tz = _get_tz(value.tz) - node._v_attrs.value_type = 'datetime64' - elif is_timedelta64_dtype(value.dtype): - self._handle.create_array( - self.group, key, value.view('i8')) - getattr( - self.group, key)._v_attrs.value_type = 'timedelta64' - else: - self._handle.create_array(self.group, key, value) + self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed @@ -2771,7 +2765,11 @@ def read(self, **kwargs): def write(self, obj, **kwargs): super(SeriesFixed, self).write(obj, **kwargs) self.write_index('index', obj.index) - self.write_array('values', obj.values) + if is_datetime64tz_dtype(obj.dtype): + values = obj.dt._get_values() + else: + values = obj.values + self.write_array('values', values) self.attrs.name = obj.name diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index b34723d6cf72c..4e3816d7be0fd 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -2792,10 +2792,28 @@ def test_empty_series_frame(self): self._check_roundtrip(df1, tm.assert_frame_equal) self._check_roundtrip(df2, tm.assert_frame_equal) - def test_empty_series(self): - for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']: - s = Series(dtype=dtype) - self._check_roundtrip(s, tm.assert_series_equal) + @pytest.mark.parametrize('dtype', [ + np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]', + 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]' + ]) + def test_empty_series(self, dtype): + s = Series(dtype=dtype) + self._check_roundtrip(s, tm.assert_series_equal) + + @pytest.mark.parametrize('dtype', [ + 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]' + ]) + def test_series_timezone(self, dtype): + s = Series([0], dtype=dtype) + self._check_roundtrip(s, tm.assert_series_equal) + + @pytest.mark.parametrize('dtype', [ + 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]' + ]) + def test_empty_frame_timezone(self, dtype): + s = Series(dtype=dtype) + df = DataFrame({'A': s}) + self._check_roundtrip(df, tm.assert_frame_equal) def test_can_serialize_dates(self):
- [x] closes #20594 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20595
2018-04-03T15:39:09Z
2018-11-23T03:25:15Z
null
2018-11-23T03:25:15Z
Safely raise errors when object contains unicode
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 09bd09b06d9b9..ca46f94752731 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1165,3 +1165,4 @@ Other - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) - Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existant option key in some cases (:issue:`19789`) +- Bug in :func:`assert_series_equal` and :func:`assert_frame_equal` for Series or DataFrames with differing unicode data (:issue:`20503`) diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index 1c878604b11a2..d6f58d16bcf64 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -290,6 +290,24 @@ def test_numpy_array_equal_message(self): assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]), obj='Index') + def test_numpy_array_equal_unicode_message(self): + # Test ensures that `assert_numpy_array_equals` raises the right + # exception when comparing np.arrays containing differing + # unicode objects (#20503) + + expected = """numpy array are different + +numpy array values are different \\(33\\.33333 %\\) +\\[left\\]: \\[á, à, ä\\] +\\[right\\]: \\[á, à, å\\]""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_numpy_array_equal(np.array([u'á', u'à', u'ä']), + np.array([u'á', u'à', u'å'])) + with tm.assert_raises_regex(AssertionError, expected): + assert_almost_equal(np.array([u'á', u'à', u'ä']), + np.array([u'á', u'à', u'å'])) + @td.skip_if_windows def test_numpy_array_equal_object_message(self): @@ -499,10 +517,13 @@ def _assert_not_equal(self, a, b, **kwargs): def test_equal(self): self._assert_equal(Series(range(3)), Series(range(3))) self._assert_equal(Series(list('abc')), Series(list('abc'))) + self._assert_equal(Series(list(u'áàä')), Series(list(u'áàä'))) def test_not_equal(self): self._assert_not_equal(Series(range(3)), Series(range(3)) + 1) self._assert_not_equal(Series(list('abc')), Series(list('xyz'))) + self._assert_not_equal(Series(list(u'áàä')), Series(list(u'éèë'))) + self._assert_not_equal(Series(list(u'áàä')), Series(list(b'aaa'))) self._assert_not_equal(Series(range(3)), Series(range(4))) self._assert_not_equal( Series(range(3)), Series( @@ -678,6 +699,49 @@ def test_frame_equal_message(self): pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}), by_blocks=True) + def test_frame_equal_message_unicode(self): + # Test ensures that `assert_frame_equals` raises the right + # exception when comparing DataFrames containing differing + # unicode objects (#20503) + + expected = """DataFrame\\.iloc\\[:, 1\\] are different + +DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\) +\\[left\\]: \\[é, è, ë\\] +\\[right\\]: \\[é, è, e̊\\]""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'ë']}), + pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'e̊']})) + + with tm.assert_raises_regex(AssertionError, expected): + assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'ë']}), + pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'e̊']}), + by_blocks=True) + + expected = """DataFrame\\.iloc\\[:, 0\\] are different + +DataFrame\\.iloc\\[:, 0\\] values are different \\(100\\.0 %\\) +\\[left\\]: \\[á, à, ä\\] +\\[right\\]: \\[a, a, a\\]""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'ë']}), + pd.DataFrame({'A': ['a', 'a', 'a'], + 'E': ['e', 'e', 'e']})) + + with tm.assert_raises_regex(AssertionError, expected): + assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'], + 'E': [u'é', u'è', u'ë']}), + pd.DataFrame({'A': ['a', 'a', 'a'], + 'E': ['e', 'e', 'e']}), + by_blocks=True) + class TestAssertCategoricalEqual(object): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 6e13a17eba68c..e1484a9c1b390 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -38,7 +38,7 @@ import pandas.compat as compat from pandas.compat import ( filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter, - raise_with_traceback, httplib, StringIO, PY3) + raise_with_traceback, httplib, StringIO, string_types, PY3, PY2) from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, @@ -992,11 +992,20 @@ def raise_assert_detail(obj, message, left, right, diff=None): left = pprint_thing(left) elif is_categorical_dtype(left): left = repr(left) + + if PY2 and isinstance(left, string_types): + # left needs to be printable in native text type in python2 + left = left.encode('utf-8') + if isinstance(right, np.ndarray): right = pprint_thing(right) elif is_categorical_dtype(right): right = repr(right) + if PY2 and isinstance(right, string_types): + # right needs to be printable in native text type in python2 + right = right.encode('utf-8') + msg = """{obj} are different {message}
This safely turns nd.array objects that contain unicode into a representation that can be printed Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20503 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20593
2018-04-03T13:38:07Z
2018-04-09T18:33:59Z
2018-04-09T18:33:59Z
2018-04-10T10:21:42Z
TST: add hypothesis-based tests
diff --git a/ci/appveyor-27.yaml b/ci/appveyor-27.yaml index 84107c605b14f..0f1bedd5b01a0 100644 --- a/ci/appveyor-27.yaml +++ b/ci/appveyor-27.yaml @@ -27,3 +27,4 @@ dependencies: - pytest - pytest-xdist - moto + - hypothesis diff --git a/ci/appveyor-36.yaml b/ci/appveyor-36.yaml index 5e370de39958a..9f2499a24aeac 100644 --- a/ci/appveyor-36.yaml +++ b/ci/appveyor-36.yaml @@ -25,3 +25,4 @@ dependencies: - cython - pytest - pytest-xdist + - hypothesis diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml index 81a48d4edf11c..5ff26cc2640a9 100644 --- a/ci/circle-27-compat.yaml +++ b/ci/circle-27-compat.yaml @@ -22,6 +22,7 @@ dependencies: # universal - pytest - pytest-xdist + - hypothesis - pip: - html5lib==1.0b2 - beautifulsoup4==4.2.1 diff --git a/ci/circle-35-ascii.yaml b/ci/circle-35-ascii.yaml index 602c414b49bb2..fc464f307ca5b 100644 --- a/ci/circle-35-ascii.yaml +++ b/ci/circle-35-ascii.yaml @@ -11,3 +11,4 @@ dependencies: # universal - pytest - pytest-xdist + - hypothesis diff --git a/ci/circle-36-locale.yaml b/ci/circle-36-locale.yaml index cc852c1e2aeeb..263a7842c19fc 100644 --- a/ci/circle-36-locale.yaml +++ b/ci/circle-36-locale.yaml @@ -31,3 +31,4 @@ dependencies: - pytest - pytest-xdist - moto + - hypothesis diff --git a/ci/circle-36-locale_slow.yaml b/ci/circle-36-locale_slow.yaml index cc852c1e2aeeb..263a7842c19fc 100644 --- a/ci/circle-36-locale_slow.yaml +++ b/ci/circle-36-locale_slow.yaml @@ -31,3 +31,4 @@ dependencies: - pytest - pytest-xdist - moto + - hypothesis diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml index 1312c1296d46a..c22fdcb41def0 100644 --- a/ci/travis-27-locale.yaml +++ b/ci/travis-27-locale.yaml @@ -22,6 +22,7 @@ dependencies: # universal - pytest - pytest-xdist + - hypothesis - pip: - html5lib==1.0b2 - beautifulsoup4==4.2.1 diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml index 22b993a2da886..da842bb3924f9 100644 --- a/ci/travis-27.yaml +++ b/ci/travis-27.yaml @@ -42,6 +42,7 @@ dependencies: - pytest - pytest-xdist - moto + - hypothesis - pip: - backports.lzma - cpplint diff --git a/ci/travis-35-osx.yaml b/ci/travis-35-osx.yaml index e74abac4c9775..3db389c15bf85 100644 --- a/ci/travis-35-osx.yaml +++ b/ci/travis-35-osx.yaml @@ -23,5 +23,6 @@ dependencies: # universal - pytest - pytest-xdist + - hypothesis - pip: - python-dateutil==2.5.3 diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml index fe057e714761e..4bdad9469d367 100644 --- a/ci/travis-36.yaml +++ b/ci/travis-36.yaml @@ -42,6 +42,7 @@ dependencies: - pytest-xdist - pytest-cov - moto + - hypothesis - pip: - brotlipy - coverage diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index e9939250052f1..5de78b1b90f0d 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -775,6 +775,46 @@ Tests that we have ``parametrized`` are now accessible via the test name, for ex test_cool_feature.py::test_dtypes[int8] PASSED test_cool_feature.py::test_series[int8] PASSED +Using ``hypothesis`` +~~~~~~~~~~~~~~~~~~~~ +With the usage of ``pytest``, things have become easier for testing by having reduced boilerplate for test cases and also by utilizing pytest's features like parametizing, skipping and marking test cases. + +However, one has to still come up with input data examples which can be tested against the functionality. There is always a possibility to skip testing an example which could have failed the test case. + +Hypothesis is a python package which helps in overcoming this issue by generating the input data based on some set of specifications provided by the user. +e.g consider the test case for testing python's sum function for a list of int using hypothesis. + +.. code-block:: python + + from hypothesis import strategies as st + from hypothesis import given + + + @given(st.lists(st.integers())) + def test_sum(seq): + total = 0 + for item in seq: + total += item + assert sum(seq) == total + + +output of test cases: + +.. code-block:: shell + + collecting ... collected 1 item + hypothesis_example.py::test_sum PASSED [100%] + + ========================== 1 passed in 0.33 seconds =========================== + +In above example by applying a decorator "@given(st.lists(st.integers()))" to the unit test function, we have directed hypothesis to generate some random list of int as input for the test function, which eventually helps in adding more coverage for our test functions by generating random input data. + +For more information about hypothesis or in general about property based testing, check below links: + +- https://hypothesis.readthedocs.io/en/latest/quickstart.html +- https://hypothesis.works/articles/what-is-property-based-testing/ +- http://blog.jessitron.com/2013/04/property-based-testing-what-is-it.html + Running the test suite ---------------------- diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py index e4a9591b95c26..ad39afc237ceb 100644 --- a/pandas/tests/reshape/test_util.py +++ b/pandas/tests/reshape/test_util.py @@ -4,31 +4,54 @@ import pandas.util.testing as tm from pandas.core.reshape.util import cartesian_product +import string +from datetime import date +from dateutil import relativedelta + +from pandas.util import _hypothesis as hp + +NO_OF_EXAMPLES_PER_TEST_CASE = 20 + class TestCartesianProduct(object): - def test_simple(self): - x, y = list('ABC'), [1, 22] + @hp.settings(max_examples=20) + @hp.given(hp.st.lists(hp.st.text(string.ascii_letters, + min_size=1, max_size=1), + min_size=1, max_size=3), + hp.get_seq((int,), False, 1, 2)) + def test_simple(self, x, y): result1, result2 = cartesian_product([x, y]) - expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C']) - expected2 = np.array([1, 22, 1, 22, 1, 22]) + expected1 = np.array([item1 for item1 in x for item2 in y]) + expected2 = np.array([item2 for item1 in x for item2 in y]) + tm.assert_numpy_array_equal(result1, expected1) tm.assert_numpy_array_equal(result2, expected2) - def test_datetimeindex(self): + @hp.settings(max_examples=20) + @hp.given(hp.st.dates(min_value=date(1900, 1, 1), + max_value=date(2100, 1, 1))) + def test_datetimeindex(self, d): # regression test for GitHub issue #6439 # make sure that the ordering on datetimeindex is consistent - x = date_range('2000-01-01', periods=2) + n = d + relativedelta.relativedelta(days=1) + x = date_range(d, periods=2) result1, result2 = [Index(y).day for y in cartesian_product([x, x])] - expected1 = Index([1, 1, 2, 2]) - expected2 = Index([1, 2, 1, 2]) + expected1 = Index([d.day, d.day, n.day, n.day]) + expected2 = Index([d.day, n.day, d.day, n.day]) + tm.assert_index_equal(result1, expected1) tm.assert_index_equal(result2, expected2) - def test_empty(self): + @hp.settings(max_examples=20) + @hp.given(hp.st.lists(hp.st.nothing()), + hp.get_seq((int,), False, min_size=1, max_size=10), + hp.get_seq((str,), False, min_size=1, max_size=10)) + def test_empty(self, empty_list, list_of_int, list_of_str): # product of empty factors - X = [[], [0, 1], []] - Y = [[], [], ['a', 'b', 'c']] + X = [empty_list, list_of_int, empty_list] + Y = [empty_list, empty_list, list_of_str] + for x, y in zip(X, Y): expected1 = np.array([], dtype=np.asarray(x).dtype) expected2 = np.array([], dtype=np.asarray(y).dtype) @@ -37,13 +60,24 @@ def test_empty(self): tm.assert_numpy_array_equal(result2, expected2) # empty product (empty input): - result = cartesian_product([]) + result = cartesian_product(empty_list) expected = [] assert result == expected - def test_invalid_input(self): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] + @hp.settings(max_examples=20) + @hp.given(hp.st.integers(), + hp.st.text(string.ascii_letters, min_size=1), + hp.get_seq((int, str), True, min_size=1), + hp.st.builds(lambda *x: list(x), hp.st.integers(), + hp.st.text(string.ascii_letters, min_size=1), + hp.st.lists(hp.st.integers(), min_size=1))) + def test_invalid_input(self, number, text, seq, mixed_seq): + + invalid_inputs = [number, + text, + seq, + mixed_seq] + msg = "Input must be a list-like of list-likes" for X in invalid_inputs: tm.assert_raises_regex(TypeError, msg, cartesian_product, X=X) diff --git a/pandas/util/_hypothesis.py b/pandas/util/_hypothesis.py new file mode 100644 index 0000000000000..9ea247cd3dbfd --- /dev/null +++ b/pandas/util/_hypothesis.py @@ -0,0 +1,166 @@ +""" +This module houses utility functions to generate hypothesis strategies which + can be used to generate random input test data for various test cases. +It is for internal use by different test case files like pandas/test/test*.py + files only and should not be used beyond this purpose. +For more information on hypothesis, check +(http://hypothesis.readthedocs.io/en/latest/). +""" +import string +from hypothesis import (given, # noqa:F401 + settings, # noqa:F401 + assume, # noqa:F401 + strategies as st, + ) + + +def get_elements(elem_type): + """ + Helper function to return hypothesis strategy whose elements depends on + the input data-type. + Currently only four types are supported namely, bool, int, float and str. + + Parameters + ---------- + elem_type: type + type of the elements for the strategy. + + Returns + ------- + hypothesis strategy. + + Examples + -------- + >>> strat = get_elements(str) + >>> strat.example() + 'KWAo' + + >>> strat.example() + 'OfAlBH' + + >>> strat = get_elements(int) + >>> strat.example() + 31911 + + >>> strat.example() + 25288 + + >>> strat = get_elements(float) + >>> strat.example() + nan + + >>> strat.example() + inf + + >>> strat.example() + -2.2250738585072014e-308 + + >>> strat.example() + 0.5 + + >>> strat.example() + 1.7976931348623157e+308 + + >>> strat = get_elements(bool) + >>> strat.example() + True + + >>> strat.example() + True + + >>> strat.example() + False + """ + strategy = st.nothing() + if elem_type == bool: + strategy = st.booleans() + elif elem_type == int: + strategy = st.integers() + elif elem_type == float: + strategy = st.floats() + elif elem_type == str: + strategy = st.text(string.ascii_letters, max_size=10) + return strategy + + +@st.composite +def get_seq(draw, types, mixed=False, min_size=None, max_size=None, + transform_func=None): + """ + Helper function to generate strategy for creating lists. + What constitute in the generated list is driven by the different + parameters. + + Parameters + ---------- + types: iterable sequence like tuple or list + types which can be in the generated list. + mixed: bool + if True, list will contains elements from all types listed in arg, + otherwise it will have elements only from types[0]. + min_size: int + minimum size of the list. + max_size: int + maximum size of the list. + transform_func: callable + a callable which can be applied to whole list after it has been + generated. It can think of as providing functionality of filter + and map function. + + Returns + ------- + hypothesis lists strategy. + + Examples + -------- + >>> seq_strategy = get_seq((int, str, bool), mixed=True, min_size=1, +... max_size=5) + + >>> seq_strategy.example() + ['lkYMSn', -2501, 35, 'J'] + + >>> seq_strategy.example() + [True] + + >>> seq_strategy.example() + ['dRWgQYrBrW', True, False, 'gmsujJVDBM', 'Z'] + + >>> seq_strategy = get_seq((int, bool), +... mixed=False, +... min_size=1, +... max_size=5, +... transform_func=lambda seq: +... [str(x) for x in seq]) + + >>> seq_strategy.example() + ['9552', '124', '-24024'] + + >>> seq_strategy.example() + ['-1892'] + + >>> seq_strategy.example() + ['22', '66', '14785', '-26312', '32'] + """ + if min_size is None: + min_size = draw(st.integers(min_value=0, max_value=100)) + + if max_size is None: + max_size = draw(st.integers(min_value=min_size, max_value=100)) + + assert min_size <= max_size, \ + 'max_size must be greater than equal to min_size' + + elem_strategies = [] + for elem_type in types: + elem_strategies.append(get_elements(elem_type)) + if not mixed: + break + if transform_func: + strategy = draw(st.lists(st.one_of(elem_strategies), + min_size=min_size, + max_size=max_size).map(transform_func)) + else: + strategy = draw(st.lists(st.one_of(elem_strategies), + min_size=min_size, + max_size=max_size)) + return strategy
Addition of "hypothesis usage" in test cases of tests/reshape/test_util.py as kind of POC. - [x] closes #17978 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20590
2018-04-03T06:49:43Z
2018-08-20T22:47:41Z
null
2018-08-20T22:47:41Z
[WIP] Implement new Interval / IntervalIndex behavior, and update tests
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 71a6f78125004..2df8a91a5505c 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -31,6 +31,13 @@ def setup_method(self, method): [(0, 1), np.nan, (1, 2)]) self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) + def _compare_tuple_of_numpy_array(self, result, expected): + lidx, ridx = result + lidx_expected, ridx_expected = expected + + tm.assert_numpy_array_equal(lidx, lidx_expected) + tm.assert_numpy_array_equal(ridx, ridx_expected) + def create_index(self, closed='right'): return IntervalIndex.from_breaks(range(11), closed=closed) @@ -400,184 +407,300 @@ def test_get_item(self, closed): closed=closed) tm.assert_index_equal(result, expected) - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_get_loc_value(self): - pytest.raises(KeyError, self.index.get_loc, 0) - assert self.index.get_loc(0.5) == 0 - assert self.index.get_loc(1) == 0 - assert self.index.get_loc(1.5) == 1 - assert self.index.get_loc(2) == 1 - pytest.raises(KeyError, self.index.get_loc, -1) - pytest.raises(KeyError, self.index.get_loc, 3) - - idx = IntervalIndex.from_tuples([(0, 2), (1, 3)]) - assert idx.get_loc(0.5) == 0 - assert idx.get_loc(1) == 0 - tm.assert_numpy_array_equal(idx.get_loc(1.5), - np.array([0, 1], dtype='int64')) - tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)), - np.array([0, 1], dtype='int64')) - assert idx.get_loc(3) == 1 - pytest.raises(KeyError, idx.get_loc, 3.5) - - idx = IntervalIndex.from_arrays([0, 2], [1, 3]) - pytest.raises(KeyError, idx.get_loc, 1.5) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def slice_locs_cases(self, breaks): - # TODO: same tests for more index types - index = IntervalIndex.from_breaks([0, 1, 2], closed='right') - assert index.slice_locs() == (0, 2) - assert index.slice_locs(0, 1) == (0, 1) - assert index.slice_locs(1, 1) == (0, 1) - assert index.slice_locs(0, 2) == (0, 2) - assert index.slice_locs(0.5, 1.5) == (0, 2) - assert index.slice_locs(0, 0.5) == (0, 1) - assert index.slice_locs(start=1) == (0, 2) - assert index.slice_locs(start=1.2) == (1, 2) - assert index.slice_locs(end=1) == (0, 1) - assert index.slice_locs(end=1.1) == (0, 2) - assert index.slice_locs(end=1.0) == (0, 1) - assert index.slice_locs(-1, -1) == (0, 0) - - index = IntervalIndex.from_breaks([0, 1, 2], closed='neither') + @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("side", ['right', 'left', 'both', 'neither']) + def test_get_loc_interval(self, idx_side, side): + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=idx_side) + + for bound in [[0, 1], [1, 2], [2, 3], [3, 4], + [0, 2], [2.5, 3], [-1, 4]]: + # if get_loc is supplied an interval, it should only search + # for exact matches, not overlaps or covers, else KeyError. + if idx_side == side: + if bound == [0, 1]: + assert idx.get_loc(Interval(0, 1, closed=side)) == 0 + elif bound == [2, 3]: + assert idx.get_loc(Interval(2, 3, closed=side)) == 1 + else: + with pytest.raises(KeyError): + idx.get_loc(Interval(*bound, closed=side)) + else: + with pytest.raises(KeyError): + idx.get_loc(Interval(*bound, closed=side)) + + @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]) + def test_get_loc_scalar(self, idx_side, scalar): + + # correct = {side: {query: answer}}. + # If query is not in the dict, that query should raise a KeyError + correct = {'right': {0.5: 0, 1: 0, 2.5: 1, 3: 1}, + 'left': {0: 0, 0.5: 0, 2: 1, 2.5: 1}, + 'both': {0: 0, 0.5: 0, 1: 0, 2: 1, 2.5: 1, 3: 1}, + 'neither': {0.5: 0, 2.5: 1}} + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=idx_side) + + # if get_loc is supplied a scalar, it should return the index of + # the interval which contains the scalar, or KeyError. + if scalar in correct[idx_side].keys(): + assert idx.get_loc(scalar) == correct[idx_side][scalar] + else: + pytest.raises(KeyError, idx.get_loc, scalar) + + def test_slice_locs_with_interval(self): + + # increasing monotonically + index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) + + assert index.slice_locs( + start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(start=Interval(0, 2)) == (0, 3) + assert index.slice_locs(end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(end=Interval(0, 2)) == (0, 1) + assert index.slice_locs( + start=Interval(2, 4), end=Interval(0, 2)) == (2, 1) + + # decreasing monotonically + index = IntervalIndex.from_tuples([(2, 4), (1, 3), (0, 2)]) + + assert index.slice_locs( + start=Interval(0, 2), end=Interval(2, 4)) == (2, 1) + assert index.slice_locs(start=Interval(0, 2)) == (2, 3) + assert index.slice_locs(end=Interval(2, 4)) == (0, 1) + assert index.slice_locs(end=Interval(0, 2)) == (0, 3) + assert index.slice_locs( + start=Interval(2, 4), end=Interval(0, 2)) == (0, 3) + + # sorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]) + + assert index.slice_locs( + start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(start=Interval(0, 2)) == (0, 3) + assert index.slice_locs(end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(end=Interval(0, 2)) == (0, 2) + assert index.slice_locs( + start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) + + # unsorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) + + pytest.raises(KeyError, index.slice_locs( + start=Interval(0, 2), end=Interval(2, 4))) + pytest.raises(KeyError, index.slice_locs(start=Interval(0, 2))) + assert index.slice_locs(end=Interval(2, 4)) == (0, 2) + pytest.raises(KeyError, index.slice_locs(end=Interval(0, 2))) + pytest.raises(KeyError, index.slice_locs( + start=Interval(2, 4), end=Interval(0, 2))) + + # another unsorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) + + assert index.slice_locs( + start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(start=Interval(0, 2)) == (0, 4) + assert index.slice_locs(end=Interval(2, 4)) == (0, 3) + assert index.slice_locs(end=Interval(0, 2)) == (0, 2) + assert index.slice_locs( + start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) + + def test_slice_locs_with_ints_and_floats_succeeds(self): + + # increasing non-overlapping + index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) + assert index.slice_locs(0, 1) == (0, 1) assert index.slice_locs(0, 2) == (0, 2) - assert index.slice_locs(0.5, 1.5) == (0, 2) - assert index.slice_locs(1, 1) == (1, 1) - assert index.slice_locs(1, 2) == (1, 2) - - index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], - closed='both') - assert index.slice_locs(1, 1) == (0, 1) - assert index.slice_locs(1, 2) == (0, 2) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_slice_locs_int64(self): - self.slice_locs_cases([0, 1, 2]) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_slice_locs_float64(self): - self.slice_locs_cases([0.0, 1.0, 2.0]) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def slice_locs_decreasing_cases(self, tuples): - index = IntervalIndex.from_tuples(tuples) - assert index.slice_locs(1.5, 0.5) == (1, 3) - assert index.slice_locs(2, 0) == (1, 3) - assert index.slice_locs(2, 1) == (1, 3) - assert index.slice_locs(3, 1.1) == (0, 3) - assert index.slice_locs(3, 3) == (0, 2) - assert index.slice_locs(3.5, 3.3) == (0, 1) - assert index.slice_locs(1, -3) == (2, 3) - - slice_locs = index.slice_locs(-1, -1) - assert slice_locs[0] == slice_locs[1] - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_slice_locs_decreasing_int64(self): - self.slice_locs_cases([(2, 4), (1, 3), (0, 2)]) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_slice_locs_decreasing_float64(self): - self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)]) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_slice_locs_fails(self): - index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)]) - with pytest.raises(KeyError): - index.slice_locs(1, 2) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_get_loc_interval(self): - assert self.index.get_loc(Interval(0, 1)) == 0 - assert self.index.get_loc(Interval(0, 0.5)) == 0 - assert self.index.get_loc(Interval(0, 1, 'left')) == 0 - pytest.raises(KeyError, self.index.get_loc, Interval(2, 3)) - pytest.raises(KeyError, self.index.get_loc, - Interval(-1, 0, 'left')) - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_get_indexer(self): - actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) - expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + assert index.slice_locs(0, 3) == (0, 2) + assert index.slice_locs(3, 1) == (2, 1) + assert index.slice_locs(3, 4) == (2, 3) + assert index.slice_locs(0, 4) == (0, 3) - actual = self.index.get_indexer(self.index) - expected = np.array([0, 1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + # decreasing non-overlapping + index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)]) + assert index.slice_locs(0, 1) == (3, 2) + assert index.slice_locs(0, 2) == (3, 1) + assert index.slice_locs(0, 3) == (3, 1) + assert index.slice_locs(3, 1) == (1, 2) + assert index.slice_locs(3, 4) == (1, 0) + assert index.slice_locs(0, 4) == (3, 0) + + @pytest.mark.parametrize("query", [[0, 1], [0, 2], [0, 3], + [3, 1], [3, 4], [0, 4]]) + def test_slice_locs_with_ints_and_floats_fails(self, query): - index = IntervalIndex.from_breaks([0, 1, 2], closed='left') - actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) - expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + # increasing overlapping + index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) + pytest.raises(KeyError, index.slice_locs, query) - actual = self.index.get_indexer(index[:1]) - expected = np.array([0], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + # decreasing overlapping + index = IntervalIndex.from_tuples([(2, 4), (1, 3), (0, 2)]) + pytest.raises(KeyError, index.slice_locs, query) + + # sorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]) + pytest.raises(KeyError, index.slice_locs, query) + + # unsorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) + pytest.raises(KeyError, index.slice_locs, query) + + # another unsorted duplicates + index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) + pytest.raises(KeyError, index.slice_locs, query) + + @pytest.mark.parametrize("query", [ + Interval(1, 3, closed='right'), + Interval(1, 3, closed='left'), + Interval(1, 3, closed='both'), + Interval(1, 3, closed='neither'), + Interval(1, 4, closed='right'), + Interval(0, 4, closed='right'), + Interval(1, 2, closed='right')]) + @pytest.mark.parametrize("expected_result", [1, -1, -1, -1, -1, -1, -1]) + def test_get_indexer_with_interval_single_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 2.5), (1, 3), (2, 4)], closed='right') + + result = index.get_indexer([query]) + expect = np.array([expected_result], dtype='intp') + tm.assert_numpy_array_equal(result, expect) + + @pytest.mark.parametrize("query", [ + [Interval(2, 4, closed='right'), Interval(1, 3, closed='right')], + [Interval(1, 3, closed='right'), Interval(0, 2, closed='right')], + [Interval(1, 3, closed='right'), Interval(1, 3, closed='left')]]) + @pytest.mark.parametrize("expected_result", [[2, 1], [1, -1], [1, -1]]) + def test_get_indexer_with_interval_multiple_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 2.5), (1, 3), (2, 4)], closed='right') + + result = index.get_indexer(query) + expect = np.array(expected_result, dtype='intp') + tm.assert_numpy_array_equal(result, expect) + + @pytest.mark.parametrize( + "query", + [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5]) + @pytest.mark.parametrize( + "expected_result", + [-1, -1, 0, 0, 1, 1, -1, -1, 2, 2, -1]) + def test_get_indexer_with_ints_and_floats_single_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 1), (1, 2), (3, 4)], closed='right') + + result = index.get_indexer([query]) + expect = np.array([expected_result], dtype='intp') + tm.assert_numpy_array_equal(result, expect) + + @pytest.mark.parametrize( + "query", + [[1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 2]]) + @pytest.mark.parametrize( + "expected_result", + [[0, 1], [0, 1, -1], [0, 1, -1, 2], [0, 1, -1, 2, 1]]) + def test_get_indexer_with_ints_and_floats_multiple_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 1), (1, 2), (3, 4)], closed='right') + + result = index.get_indexer(query) + expect = np.array(expected_result, dtype='intp') + tm.assert_numpy_array_equal(result, expect) + + index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) + # TODO: @shoyer believes this should raise, master branch doesn't + + @pytest.mark.parametrize( + "query", + [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5]) + @pytest.mark.parametrize("expected_result", [ + (Int64Index([], dtype='int64'), np.array([0])), + (Int64Index([0], dtype='int64'), np.array([])), + (Int64Index([0], dtype='int64'), np.array([])), + (Int64Index([0, 1], dtype='int64'), np.array([])), + (Int64Index([0, 1], dtype='int64'), np.array([])), + (Int64Index([0, 1, 2], dtype='int64'), np.array([])), + (Int64Index([1, 2], dtype='int64'), np.array([])), + (Int64Index([2], dtype='int64'), np.array([])), + (Int64Index([2], dtype='int64'), np.array([])), + (Int64Index([], dtype='int64'), np.array([0])), + (Int64Index([], dtype='int64'), np.array([0]))]) + def test_get_indexer_non_unique_with_ints_and_floats_single_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 2.5), (1, 3), (2, 4)], closed='left') + + result = index.get_indexer_non_unique([query]) + tm.assert_numpy_array_equal(result, expected_result) + + @pytest.mark.parametrize( + "query", + [[1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 2]]) + @pytest.mark.parametrize("expected_result", [ + (Int64Index([0, 1, 0, 1, 2], dtype='int64'), np.array([])), + (Int64Index([0, 1, 0, 1, 2, 2], dtype='int64'), np.array([])), + (Int64Index([0, 1, 0, 1, 2, 2, -1], dtype='int64'), np.array([3])), + (Int64Index([0, 1, 0, 1, 2, 2, -1, 0, 1, 2], dtype='int64'), + np.array([3]))]) + def test_get_indexer_non_unique_with_ints_and_floats_multiple_queries( + self, query, expected_result): + + index = IntervalIndex.from_tuples( + [(0, 2.5), (1, 3), (2, 4)], closed='left') + + result = index.get_indexer_non_unique(query) + tm.assert_numpy_array_equal(result, expected_result) + + # TODO we may also want to test get_indexer for the case when + # the intervals are duplicated, decreasing, non-monotonic, etc.. - actual = self.index.get_indexer(index) - expected = np.array([-1, 1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + def test_contains(self): - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_get_indexer_subintervals(self): + index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right') - # TODO: is this right? - # return indexers for wholly contained subintervals - target = IntervalIndex.from_breaks(np.linspace(0, 2, 5)) - actual = self.index.get_indexer(target) - expected = np.array([0, 0, 1, 1], dtype='p') - tm.assert_numpy_array_equal(actual, expected) + # __contains__ requires perfect matches to intervals. + assert 0 not in index + assert 1 not in index + assert 2 not in index - target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2]) - actual = self.index.get_indexer(target) - expected = np.array([0, 0, 1, 1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + assert Interval(0, 1, closed='right') in index + assert Interval(0, 2, closed='right') not in index + assert Interval(0, 0.5, closed='right') not in index + assert Interval(3, 5, closed='right') not in index + assert Interval(-1, 0, closed='left') not in index + assert Interval(0, 1, closed='left') not in index + assert Interval(0, 1, closed='both') not in index - actual = self.index.get_indexer(target[[0, -1]]) - expected = np.array([0, 1], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + def test_contains_method(self): - target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left') - actual = self.index.get_indexer(target) - expected = np.array([0, 0, 0], dtype='intp') - tm.assert_numpy_array_equal(actual, expected) + index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right') - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_contains(self): - # Only endpoints are valid. - i = IntervalIndex.from_arrays([0, 1], [1, 2]) - - # Invalid - assert 0 not in i - assert 1 not in i - assert 2 not in i - - # Valid - assert Interval(0, 1) in i - assert Interval(0, 2) in i - assert Interval(0, 0.5) in i - assert Interval(3, 5) not in i - assert Interval(-1, 0, closed='left') not in i - - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def testcontains(self): - # can select values that are IN the range of a value - i = IntervalIndex.from_arrays([0, 1], [1, 2]) - - assert i.contains(0.1) - assert i.contains(0.5) - assert i.contains(1) - assert i.contains(Interval(0, 1)) - assert i.contains(Interval(0, 2)) - - # these overlaps completely - assert i.contains(Interval(0, 3)) - assert i.contains(Interval(1, 3)) - - assert not i.contains(20) - assert not i.contains(-20) + assert not index.contains(0) + assert index.contains(0.1) + assert index.contains(0.5) + assert index.contains(1) + + assert index.contains(Interval(0, 1), closed='right') + assert not index.contains(Interval(0, 1), closed='left') + assert not index.contains(Interval(0, 1), closed='both') + assert not index.contains(Interval(0, 2), closed='right') + + assert not index.contains(Interval(0, 3), closed='right') + assert not index.contains(Interval(1, 3), closed='right') + + assert not index.contains(20) + assert not index.contains(-20) def test_dropna(self, closed): diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py deleted file mode 100644 index a0d11db46d316..0000000000000 --- a/pandas/tests/indexes/interval/test_interval_new.py +++ /dev/null @@ -1,315 +0,0 @@ -from __future__ import division - -import pytest -import numpy as np - -from pandas import Interval, IntervalIndex, Int64Index -import pandas.util.testing as tm - - -pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316") - - -class TestIntervalIndex(object): - - def _compare_tuple_of_numpy_array(self, result, expected): - lidx, ridx = result - lidx_expected, ridx_expected = expected - - tm.assert_numpy_array_equal(lidx, lidx_expected) - tm.assert_numpy_array_equal(ridx, ridx_expected) - - @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) - @pytest.mark.parametrize("side", ['right', 'left', 'both', 'neither']) - def test_get_loc_interval(self, idx_side, side): - - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=idx_side) - - for bound in [[0, 1], [1, 2], [2, 3], [3, 4], - [0, 2], [2.5, 3], [-1, 4]]: - # if get_loc is supplied an interval, it should only search - # for exact matches, not overlaps or covers, else KeyError. - if idx_side == side: - if bound == [0, 1]: - assert idx.get_loc(Interval(0, 1, closed=side)) == 0 - elif bound == [2, 3]: - assert idx.get_loc(Interval(2, 3, closed=side)) == 1 - else: - with pytest.raises(KeyError): - idx.get_loc(Interval(*bound, closed=side)) - else: - with pytest.raises(KeyError): - idx.get_loc(Interval(*bound, closed=side)) - - @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) - @pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]) - def test_get_loc_scalar(self, idx_side, scalar): - - # correct = {side: {query: answer}}. - # If query is not in the dict, that query should raise a KeyError - correct = {'right': {0.5: 0, 1: 0, 2.5: 1, 3: 1}, - 'left': {0: 0, 0.5: 0, 2: 1, 2.5: 1}, - 'both': {0: 0, 0.5: 0, 1: 0, 2: 1, 2.5: 1, 3: 1}, - 'neither': {0.5: 0, 2.5: 1}} - - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=idx_side) - - # if get_loc is supplied a scalar, it should return the index of - # the interval which contains the scalar, or KeyError. - if scalar in correct[idx_side].keys(): - assert idx.get_loc(scalar) == correct[idx_side][scalar] - else: - pytest.raises(KeyError, idx.get_loc, scalar) - - def test_slice_locs_with_interval(self): - - # increasing monotonically - index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) - - assert index.slice_locs( - start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(start=Interval(0, 2)) == (0, 3) - assert index.slice_locs(end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(end=Interval(0, 2)) == (0, 1) - assert index.slice_locs( - start=Interval(2, 4), end=Interval(0, 2)) == (2, 1) - - # decreasing monotonically - index = IntervalIndex.from_tuples([(2, 4), (1, 3), (0, 2)]) - - assert index.slice_locs( - start=Interval(0, 2), end=Interval(2, 4)) == (2, 1) - assert index.slice_locs(start=Interval(0, 2)) == (2, 3) - assert index.slice_locs(end=Interval(2, 4)) == (0, 1) - assert index.slice_locs(end=Interval(0, 2)) == (0, 3) - assert index.slice_locs( - start=Interval(2, 4), end=Interval(0, 2)) == (0, 3) - - # sorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]) - - assert index.slice_locs( - start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(start=Interval(0, 2)) == (0, 3) - assert index.slice_locs(end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(end=Interval(0, 2)) == (0, 2) - assert index.slice_locs( - start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) - - # unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) - - pytest.raises(KeyError, index.slice_locs( - start=Interval(0, 2), end=Interval(2, 4))) - pytest.raises(KeyError, index.slice_locs(start=Interval(0, 2))) - assert index.slice_locs(end=Interval(2, 4)) == (0, 2) - pytest.raises(KeyError, index.slice_locs(end=Interval(0, 2))) - pytest.raises(KeyError, index.slice_locs( - start=Interval(2, 4), end=Interval(0, 2))) - - # another unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) - - assert index.slice_locs( - start=Interval(0, 2), end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(start=Interval(0, 2)) == (0, 4) - assert index.slice_locs(end=Interval(2, 4)) == (0, 3) - assert index.slice_locs(end=Interval(0, 2)) == (0, 2) - assert index.slice_locs( - start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) - - def test_slice_locs_with_ints_and_floats_succeeds(self): - - # increasing non-overlapping - index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) - - assert index.slice_locs(0, 1) == (0, 1) - assert index.slice_locs(0, 2) == (0, 2) - assert index.slice_locs(0, 3) == (0, 2) - assert index.slice_locs(3, 1) == (2, 1) - assert index.slice_locs(3, 4) == (2, 3) - assert index.slice_locs(0, 4) == (0, 3) - - # decreasing non-overlapping - index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)]) - assert index.slice_locs(0, 1) == (3, 2) - assert index.slice_locs(0, 2) == (3, 1) - assert index.slice_locs(0, 3) == (3, 1) - assert index.slice_locs(3, 1) == (1, 2) - assert index.slice_locs(3, 4) == (1, 0) - assert index.slice_locs(0, 4) == (3, 0) - - @pytest.mark.parametrize("query", [[0, 1], [0, 2], [0, 3], - [3, 1], [3, 4], [0, 4]]) - def test_slice_locs_with_ints_and_floats_fails(self, query): - - # increasing overlapping - index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) - pytest.raises(KeyError, index.slice_locs, query) - - # decreasing overlapping - index = IntervalIndex.from_tuples([(2, 4), (1, 3), (0, 2)]) - pytest.raises(KeyError, index.slice_locs, query) - - # sorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]) - pytest.raises(KeyError, index.slice_locs, query) - - # unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) - pytest.raises(KeyError, index.slice_locs, query) - - # another unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) - pytest.raises(KeyError, index.slice_locs, query) - - @pytest.mark.parametrize("query", [ - Interval(1, 3, closed='right'), - Interval(1, 3, closed='left'), - Interval(1, 3, closed='both'), - Interval(1, 3, closed='neither'), - Interval(1, 4, closed='right'), - Interval(0, 4, closed='right'), - Interval(1, 2, closed='right')]) - @pytest.mark.parametrize("expected_result", [1, -1, -1, -1, -1, -1, -1]) - def test_get_indexer_with_interval_single_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 2.5), (1, 3), (2, 4)], closed='right') - - result = index.get_indexer([query]) - expect = np.array([expected_result], dtype='intp') - tm.assert_numpy_array_equal(result, expect) - - @pytest.mark.parametrize("query", [ - [Interval(2, 4, closed='right'), Interval(1, 3, closed='right')], - [Interval(1, 3, closed='right'), Interval(0, 2, closed='right')], - [Interval(1, 3, closed='right'), Interval(1, 3, closed='left')]]) - @pytest.mark.parametrize("expected_result", [[2, 1], [1, -1], [1, -1]]) - def test_get_indexer_with_interval_multiple_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 2.5), (1, 3), (2, 4)], closed='right') - - result = index.get_indexer(query) - expect = np.array(expected_result, dtype='intp') - tm.assert_numpy_array_equal(result, expect) - - @pytest.mark.parametrize( - "query", - [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5]) - @pytest.mark.parametrize( - "expected_result", - [-1, -1, 0, 0, 1, 1, -1, -1, 2, 2, -1]) - def test_get_indexer_with_ints_and_floats_single_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 1), (1, 2), (3, 4)], closed='right') - - result = index.get_indexer([query]) - expect = np.array([expected_result], dtype='intp') - tm.assert_numpy_array_equal(result, expect) - - @pytest.mark.parametrize( - "query", - [[1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 2]]) - @pytest.mark.parametrize( - "expected_result", - [[0, 1], [0, 1, -1], [0, 1, -1, 2], [0, 1, -1, 2, 1]]) - def test_get_indexer_with_ints_and_floats_multiple_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 1), (1, 2), (3, 4)], closed='right') - - result = index.get_indexer(query) - expect = np.array(expected_result, dtype='intp') - tm.assert_numpy_array_equal(result, expect) - - index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)]) - # TODO: @shoyer believes this should raise, master branch doesn't - - @pytest.mark.parametrize( - "query", - [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5]) - @pytest.mark.parametrize("expected_result", [ - (Int64Index([], dtype='int64'), np.array([0])), - (Int64Index([0], dtype='int64'), np.array([])), - (Int64Index([0], dtype='int64'), np.array([])), - (Int64Index([0, 1], dtype='int64'), np.array([])), - (Int64Index([0, 1], dtype='int64'), np.array([])), - (Int64Index([0, 1, 2], dtype='int64'), np.array([])), - (Int64Index([1, 2], dtype='int64'), np.array([])), - (Int64Index([2], dtype='int64'), np.array([])), - (Int64Index([2], dtype='int64'), np.array([])), - (Int64Index([], dtype='int64'), np.array([0])), - (Int64Index([], dtype='int64'), np.array([0]))]) - def test_get_indexer_non_unique_with_ints_and_floats_single_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 2.5), (1, 3), (2, 4)], closed='left') - - result = index.get_indexer_non_unique([query]) - tm.assert_numpy_array_equal(result, expected_result) - - @pytest.mark.parametrize( - "query", - [[1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 2]]) - @pytest.mark.parametrize("expected_result", [ - (Int64Index([0, 1, 0, 1, 2], dtype='int64'), np.array([])), - (Int64Index([0, 1, 0, 1, 2, 2], dtype='int64'), np.array([])), - (Int64Index([0, 1, 0, 1, 2, 2, -1], dtype='int64'), np.array([3])), - (Int64Index([0, 1, 0, 1, 2, 2, -1, 0, 1, 2], dtype='int64'), - np.array([3]))]) - def test_get_indexer_non_unique_with_ints_and_floats_multiple_queries( - self, query, expected_result): - - index = IntervalIndex.from_tuples( - [(0, 2.5), (1, 3), (2, 4)], closed='left') - - result = index.get_indexer_non_unique(query) - tm.assert_numpy_array_equal(result, expected_result) - - # TODO we may also want to test get_indexer for the case when - # the intervals are duplicated, decreasing, non-monotonic, etc.. - - def test_contains(self): - - index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right') - - # __contains__ requires perfect matches to intervals. - assert 0 not in index - assert 1 not in index - assert 2 not in index - - assert Interval(0, 1, closed='right') in index - assert Interval(0, 2, closed='right') not in index - assert Interval(0, 0.5, closed='right') not in index - assert Interval(3, 5, closed='right') not in index - assert Interval(-1, 0, closed='left') not in index - assert Interval(0, 1, closed='left') not in index - assert Interval(0, 1, closed='both') not in index - - def test_contains_method(self): - - index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right') - - assert not index.contains(0) - assert index.contains(0.1) - assert index.contains(0.5) - assert index.contains(1) - - assert index.contains(Interval(0, 1), closed='right') - assert not index.contains(Interval(0, 1), closed='left') - assert not index.contains(Interval(0, 1), closed='both') - assert not index.contains(Interval(0, 2), closed='right') - - assert not index.contains(Interval(0, 3), closed='right') - assert not index.contains(Interval(1, 3), closed='right') - - assert not index.contains(20) - assert not index.contains(-20) diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 233fbd2c8d7be..897ed84f77744 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -12,25 +12,6 @@ class TestIntervalIndex(object): def setup_method(self, method): self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_loc_with_scalar(self): - - s = self.s - - expected = s.iloc[:3] - tm.assert_series_equal(expected, s.loc[:3]) - tm.assert_series_equal(expected, s.loc[:2.5]) - tm.assert_series_equal(expected, s.loc[0.1:2.5]) - tm.assert_series_equal(expected, s.loc[-1:3]) - - expected = s.iloc[1:4] - tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]]) - tm.assert_series_equal(expected, s.loc[[2, 3, 4]]) - tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]]) - - expected = s.iloc[2:5] - tm.assert_series_equal(expected, s.loc[s >= 2]) - # TODO: check this behavior is consistent with test_interval_new.py def test_getitem_with_scalar(self): @@ -50,6 +31,8 @@ def test_getitem_with_scalar(self): expected = s.iloc[2:5] tm.assert_series_equal(expected, s[s >= 2]) + # TODO: where is test_getitem_with_interval? + # TODO: check this behavior is consistent with test_interval_new.py @pytest.mark.parametrize('direction, closed', product(('increasing', 'decreasing'), @@ -86,31 +69,42 @@ def test_nonoverlapping_monotonic(self, direction, closed): assert s[key] == expected assert s.loc[key] == expected - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_with_interval(self): + def test_loc_with_interval(self): + + # loc with single label / list of labels: + # - Intervals: only exact matches + # - scalars: those that contain it s = self.s - expected = 0 + expected = 0 result = s.loc[Interval(0, 1)] assert result == expected - result = s[Interval(0, 1)] assert result == expected expected = s.iloc[3:5] - result = s.loc[Interval(3, 6)] + result = s.loc[[Interval(3, 4), Interval(4, 5)]] tm.assert_series_equal(expected, result) - - expected = s.iloc[3:5] - result = s.loc[[Interval(3, 6)]] + result = s[[Interval(3, 4), Interval(4, 5)]] tm.assert_series_equal(expected, result) - expected = s.iloc[3:5] - result = s.loc[[Interval(3, 5)]] - tm.assert_series_equal(expected, result) + # missing or not exact + with pytest.raises(KeyError): + s.loc[Interval(3, 5, closed='left')] + + with pytest.raises(KeyError): + s[Interval(3, 5, closed='left')] + + with pytest.raises(KeyError): + s[Interval(3, 5)] + + with pytest.raises(KeyError): + s.loc[Interval(3, 5)] + + with pytest.raises(KeyError): + s[Interval(3, 5)] - # missing with pytest.raises(KeyError): s.loc[Interval(-2, 0)] @@ -123,69 +117,161 @@ def test_with_interval(self): with pytest.raises(KeyError): s[Interval(5, 6)] - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_with_slices(self): + def test_loc_with_scalar(self): + + # loc with single label / list of labels: + # - Intervals: only exact matches + # - scalars: those that contain it + + s = self.s + + assert s.loc[1] == 0 + assert s.loc[1.5] == 1 + assert s.loc[2] == 1 + + # TODO with __getitem__ same rules as loc, or positional ? + # assert s[1] == 0 + # assert s[1.5] == 1 + # assert s[2] == 1 + + expected = s.iloc[1:4] + tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]]) + tm.assert_series_equal(expected, s.loc[[2, 3, 4]]) + tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]]) + + expected = s.iloc[[1, 1, 2, 1]] + tm.assert_series_equal(expected, s.loc[[1.5, 2, 2.5, 1.5]]) + + expected = s.iloc[2:5] + tm.assert_series_equal(expected, s.loc[s >= 2]) + + def test_loc_with_slices(self): + + # loc with slices: + # - Interval objects: only works with exact matches + # - scalars: only works for non-overlapping, monotonic intervals, + # and start/stop select location based on the interval that + # contains them: + # (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop)) s = self.s # slice of interval - with pytest.raises(NotImplementedError): + + expected = s.iloc[:3] + result = s.loc[Interval(0, 1):Interval(2, 3)] + tm.assert_series_equal(expected, result) + result = s[Interval(0, 1):Interval(2, 3)] + tm.assert_series_equal(expected, result) + + expected = s.iloc[4:] + result = s.loc[Interval(3, 4):] + tm.assert_series_equal(expected, result) + result = s[Interval(3, 4):] + tm.assert_series_equal(expected, result) + + with pytest.raises(KeyError): s.loc[Interval(3, 6):] - with pytest.raises(NotImplementedError): + with pytest.raises(KeyError): s[Interval(3, 6):] - expected = s.iloc[3:5] - result = s[[Interval(3, 6)]] - tm.assert_series_equal(expected, result) + with pytest.raises(KeyError): + s.loc[Interval(3, 4, closed='left'):] + + with pytest.raises(KeyError): + s[Interval(3, 4, closed='left'):] + + # TODO with non-existing intervals ? + # s.loc[Interval(-1, 0):Interval(2, 3)] + + # slice of scalar + + expected = s.iloc[:3] + tm.assert_series_equal(expected, s.loc[:3]) + tm.assert_series_equal(expected, s.loc[:2.5]) + tm.assert_series_equal(expected, s.loc[0.1:2.5]) + + # TODO should this work? (-1 is not contained in any of the Intervals) + # tm.assert_series_equal(expected, s.loc[-1:3]) + + # TODO with __getitem__ same rules as loc, or positional ? + # tm.assert_series_equal(expected, s[:3]) + # tm.assert_series_equal(expected, s[:2.5]) + # tm.assert_series_equal(expected, s[0.1:2.5]) # slice of scalar with step != 1 - with pytest.raises(ValueError): + with pytest.raises(NotImplementedError): s[0:4:2] - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def test_with_overlaps(self): - - s = self.s - expected = s.iloc[[3, 4, 3, 4]] - result = s.loc[[Interval(3, 6), Interval(3, 6)]] - tm.assert_series_equal(expected, result) + def test_loc_with_overlap(self): idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) s = Series(range(len(idx)), index=idx) - result = s[4] + # scalar expected = s + result = s.loc[4] tm.assert_series_equal(expected, result) - result = s[[4]] - expected = s + result = s[4] tm.assert_series_equal(expected, result) result = s.loc[[4]] - expected = s tm.assert_series_equal(expected, result) - result = s[Interval(3, 5)] - expected = s + result = s[[4]] + tm.assert_series_equal(expected, result) + + # interval + expected = 0 + result = s.loc[Interval(1, 5)] + tm.assert_series_equal(expected, result) + + result = s[Interval(1, 5)] tm.assert_series_equal(expected, result) - result = s.loc[Interval(3, 5)] expected = s + result = s.loc[[Interval(1, 5), Interval(3, 7)]] + tm.assert_series_equal(expected, result) + + result = s[[Interval(1, 5), Interval(3, 7)]] tm.assert_series_equal(expected, result) - # doesn't intersect unique set of intervals with pytest.raises(KeyError): - s[[Interval(3, 5)]] + s.loc[Interval(3, 5)] with pytest.raises(KeyError): s.loc[[Interval(3, 5)]] - # To be removed, replaced by test_interval_new.py (see #16316, #16386) + with pytest.raises(KeyError): + s[Interval(3, 5)] + + with pytest.raises(KeyError): + s[[Interval(3, 5)]] + + # slices with interval (only exact matches) + expected = s + result = s.loc[Interval(1, 5):Interval(3, 7)] + tm.assert_series_equal(expected, result) + + result = s[Interval(1, 5):Interval(3, 7)] + tm.assert_series_equal(expected, result) + + with pytest.raises(KeyError): + s.loc[Interval(1, 6):Interval(3, 8)] + + with pytest.raises(KeyError): + s[Interval(1, 6):Interval(3, 8)] + + # slices with scalar raise for overlapping intervals + # TODO KeyError is the appropriate error? + with pytest.raises(KeyError): + s.loc[1:4] + def test_non_unique(self): idx = IntervalIndex.from_tuples([(1, 3), (3, 7)]) - s = Series(range(len(idx)), index=idx) result = s.loc[Interval(1, 3)] @@ -195,26 +281,26 @@ def test_non_unique(self): expected = s.iloc[0:1] tm.assert_series_equal(expected, result) - # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_non_unique_moar(self): idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)]) s = Series(range(len(idx)), index=idx) - result = s.loc[Interval(1, 3)] expected = s.iloc[[0, 1]] + result = s.loc[Interval(1, 3)] tm.assert_series_equal(expected, result) - # non-unique index and slices not allowed - with pytest.raises(ValueError): - s.loc[Interval(1, 3):] + expected = s + result = s.loc[Interval(1, 3):] + tm.assert_series_equal(expected, result) - with pytest.raises(ValueError): - s[Interval(1, 3):] + expected = s + result = s[Interval(1, 3):] + tm.assert_series_equal(expected, result) - # non-unique - with pytest.raises(ValueError): - s[[Interval(1, 3)]] + expected = s.iloc[[0, 1]] + result = s[[Interval(1, 3)]] + tm.assert_series_equal(expected, result) # TODO: check this behavior is consistent with test_interval_new.py def test_non_matching(self): diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py deleted file mode 100644 index 3eb5f38ba0c80..0000000000000 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ /dev/null @@ -1,247 +0,0 @@ -import pytest -import numpy as np - -from pandas import Series, IntervalIndex, Interval -import pandas.util.testing as tm - - -pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316") - - -class TestIntervalIndex(object): - - def setup_method(self, method): - self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) - - def test_loc_with_interval(self): - - # loc with single label / list of labels: - # - Intervals: only exact matches - # - scalars: those that contain it - - s = self.s - - expected = 0 - result = s.loc[Interval(0, 1)] - assert result == expected - result = s[Interval(0, 1)] - assert result == expected - - expected = s.iloc[3:5] - result = s.loc[[Interval(3, 4), Interval(4, 5)]] - tm.assert_series_equal(expected, result) - result = s[[Interval(3, 4), Interval(4, 5)]] - tm.assert_series_equal(expected, result) - - # missing or not exact - with pytest.raises(KeyError): - s.loc[Interval(3, 5, closed='left')] - - with pytest.raises(KeyError): - s[Interval(3, 5, closed='left')] - - with pytest.raises(KeyError): - s[Interval(3, 5)] - - with pytest.raises(KeyError): - s.loc[Interval(3, 5)] - - with pytest.raises(KeyError): - s[Interval(3, 5)] - - with pytest.raises(KeyError): - s.loc[Interval(-2, 0)] - - with pytest.raises(KeyError): - s[Interval(-2, 0)] - - with pytest.raises(KeyError): - s.loc[Interval(5, 6)] - - with pytest.raises(KeyError): - s[Interval(5, 6)] - - def test_loc_with_scalar(self): - - # loc with single label / list of labels: - # - Intervals: only exact matches - # - scalars: those that contain it - - s = self.s - - assert s.loc[1] == 0 - assert s.loc[1.5] == 1 - assert s.loc[2] == 1 - - # TODO with __getitem__ same rules as loc, or positional ? - # assert s[1] == 0 - # assert s[1.5] == 1 - # assert s[2] == 1 - - expected = s.iloc[1:4] - tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]]) - tm.assert_series_equal(expected, s.loc[[2, 3, 4]]) - tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]]) - - expected = s.iloc[[1, 1, 2, 1]] - tm.assert_series_equal(expected, s.loc[[1.5, 2, 2.5, 1.5]]) - - expected = s.iloc[2:5] - tm.assert_series_equal(expected, s.loc[s >= 2]) - - def test_loc_with_slices(self): - - # loc with slices: - # - Interval objects: only works with exact matches - # - scalars: only works for non-overlapping, monotonic intervals, - # and start/stop select location based on the interval that - # contains them: - # (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop)) - - s = self.s - - # slice of interval - - expected = s.iloc[:3] - result = s.loc[Interval(0, 1):Interval(2, 3)] - tm.assert_series_equal(expected, result) - result = s[Interval(0, 1):Interval(2, 3)] - tm.assert_series_equal(expected, result) - - expected = s.iloc[4:] - result = s.loc[Interval(3, 4):] - tm.assert_series_equal(expected, result) - result = s[Interval(3, 4):] - tm.assert_series_equal(expected, result) - - with pytest.raises(KeyError): - s.loc[Interval(3, 6):] - - with pytest.raises(KeyError): - s[Interval(3, 6):] - - with pytest.raises(KeyError): - s.loc[Interval(3, 4, closed='left'):] - - with pytest.raises(KeyError): - s[Interval(3, 4, closed='left'):] - - # TODO with non-existing intervals ? - # s.loc[Interval(-1, 0):Interval(2, 3)] - - # slice of scalar - - expected = s.iloc[:3] - tm.assert_series_equal(expected, s.loc[:3]) - tm.assert_series_equal(expected, s.loc[:2.5]) - tm.assert_series_equal(expected, s.loc[0.1:2.5]) - - # TODO should this work? (-1 is not contained in any of the Intervals) - # tm.assert_series_equal(expected, s.loc[-1:3]) - - # TODO with __getitem__ same rules as loc, or positional ? - # tm.assert_series_equal(expected, s[:3]) - # tm.assert_series_equal(expected, s[:2.5]) - # tm.assert_series_equal(expected, s[0.1:2.5]) - - # slice of scalar with step != 1 - with pytest.raises(NotImplementedError): - s[0:4:2] - - def test_loc_with_overlap(self): - - idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) - s = Series(range(len(idx)), index=idx) - - # scalar - expected = s - result = s.loc[4] - tm.assert_series_equal(expected, result) - - result = s[4] - tm.assert_series_equal(expected, result) - - result = s.loc[[4]] - tm.assert_series_equal(expected, result) - - result = s[[4]] - tm.assert_series_equal(expected, result) - - # interval - expected = 0 - result = s.loc[Interval(1, 5)] - tm.assert_series_equal(expected, result) - - result = s[Interval(1, 5)] - tm.assert_series_equal(expected, result) - - expected = s - result = s.loc[[Interval(1, 5), Interval(3, 7)]] - tm.assert_series_equal(expected, result) - - result = s[[Interval(1, 5), Interval(3, 7)]] - tm.assert_series_equal(expected, result) - - with pytest.raises(KeyError): - s.loc[Interval(3, 5)] - - with pytest.raises(KeyError): - s.loc[[Interval(3, 5)]] - - with pytest.raises(KeyError): - s[Interval(3, 5)] - - with pytest.raises(KeyError): - s[[Interval(3, 5)]] - - # slices with interval (only exact matches) - expected = s - result = s.loc[Interval(1, 5):Interval(3, 7)] - tm.assert_series_equal(expected, result) - - result = s[Interval(1, 5):Interval(3, 7)] - tm.assert_series_equal(expected, result) - - with pytest.raises(KeyError): - s.loc[Interval(1, 6):Interval(3, 8)] - - with pytest.raises(KeyError): - s[Interval(1, 6):Interval(3, 8)] - - # slices with scalar raise for overlapping intervals - # TODO KeyError is the appropriate error? - with pytest.raises(KeyError): - s.loc[1:4] - - def test_non_unique(self): - - idx = IntervalIndex.from_tuples([(1, 3), (3, 7)]) - s = Series(range(len(idx)), index=idx) - - result = s.loc[Interval(1, 3)] - assert result == 0 - - result = s.loc[[Interval(1, 3)]] - expected = s.iloc[0:1] - tm.assert_series_equal(expected, result) - - def test_non_unique_moar(self): - - idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)]) - s = Series(range(len(idx)), index=idx) - - expected = s.iloc[[0, 1]] - result = s.loc[Interval(1, 3)] - tm.assert_series_equal(expected, result) - - expected = s - result = s.loc[Interval(1, 3):] - tm.assert_series_equal(expected, result) - - expected = s - result = s[Interval(1, 3):] - tm.assert_series_equal(expected, result) - - expected = s.iloc[[0, 1]] - result = s[[Interval(1, 3)]] - tm.assert_series_equal(expected, result)
- [x] More for #16316 - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20587
2018-04-02T21:32:03Z
2018-09-25T15:43:15Z
null
2018-09-25T15:43:15Z
API: rolling.apply will pass Series to function
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 18c4dca5b69da..110550d9f85cd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -65,6 +65,35 @@ The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtyp pd.get_dummies(df, columns=['c'], dtype=bool).dtypes +.. _whatsnew_0230.enhancements.window_raw: + +Rolling/Expanding.apply() accepts a ``raw`` keyword to pass a ``Series`` to the function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, +:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. +This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The +default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. +In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) + +.. ipython:: python + + s = pd.Series(np.arange(5), np.arange(5) + 1) + s + +Pass a ``Series``: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x.iloc[-1], raw=False) + +Mimic the original behavior of passing a ndarray: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x[-1], raw=True) + + .. _whatsnew_0230.enhancements.merge_on_columns_and_levels: Merging on a combination of columns and index levels @@ -815,6 +844,7 @@ Other API Changes - :func:`DatetimeIndex.strftime` and :func:`PeriodIndex.strftime` now return an ``Index`` instead of a numpy array to be consistent with similar accessors (:issue:`20127`) - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). - :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) +- A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than an ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) .. _whatsnew_0230.deprecations: @@ -843,6 +873,8 @@ Deprecations - ``Index.summary()`` is deprecated and will be removed in a future version (:issue:`18217`) - ``NDFrame.get_ftype_counts()`` is deprecated and will be removed in a future version (:issue:`18243`) - The ``convert_datetime64`` parameter in :func:`DataFrame.to_records` has been deprecated and will be removed in a future version. The NumPy bug motivating this parameter has been resolved. The default value for this parameter has also changed from ``True`` to ``None`` (:issue:`18160`). +- :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, + :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index aa13f03d8e9e4..e524f823605a4 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1432,30 +1432,35 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, return output -def roll_generic(ndarray[float64_t, cast=True] input, +def roll_generic(object obj, int64_t win, int64_t minp, object index, object closed, - int offset, object func, + int offset, object func, bint raw, object args, object kwargs): cdef: ndarray[double_t] output, counts, bufarr + ndarray[float64_t, cast=True] arr float64_t *buf float64_t *oldbuf int64_t nobs = 0, i, j, s, e, N bint is_variable ndarray[int64_t] start, end - if not input.flags.c_contiguous: - input = input.copy('C') - - n = len(input) + n = len(obj) if n == 0: - return input + return obj + + arr = np.asarray(obj) + + # ndarray input + if raw: + if not arr.flags.c_contiguous: + arr = arr.copy('C') - counts = roll_sum(np.concatenate([np.isfinite(input).astype(float), + counts = roll_sum(np.concatenate([np.isfinite(arr).astype(float), np.array([0.] * offset)]), win, minp, index, closed)[offset:] - start, end, N, win, minp, is_variable = get_window_indexer(input, win, + start, end, N, win, minp, is_variable = get_window_indexer(arr, win, minp, index, closed, floor=0) @@ -1463,8 +1468,8 @@ def roll_generic(ndarray[float64_t, cast=True] input, output = np.empty(N, dtype=float) if is_variable: + # variable window arr or series - # variable window if offset != 0: raise ValueError("unable to roll_generic with a non-zero offset") @@ -1473,7 +1478,20 @@ def roll_generic(ndarray[float64_t, cast=True] input, e = end[i] if counts[i] >= minp: - output[i] = func(input[s:e], *args, **kwargs) + if raw: + output[i] = func(arr[s:e], *args, **kwargs) + else: + output[i] = func(obj.iloc[s:e], *args, **kwargs) + else: + output[i] = NaN + + elif not raw: + # series + for i from 0 <= i < N: + if counts[i] >= minp: + sl = slice(int_max(i + offset - win + 1, 0), + int_min(i + offset + 1, N)) + output[i] = func(obj.iloc[sl], *args, **kwargs) else: output[i] = NaN @@ -1482,12 +1500,12 @@ def roll_generic(ndarray[float64_t, cast=True] input, # truncated windows at the beginning, through first full-length window for i from 0 <= i < (int_min(win, N) - offset): if counts[i] >= minp: - output[i] = func(input[0: (i + offset + 1)], *args, **kwargs) + output[i] = func(arr[0: (i + offset + 1)], *args, **kwargs) else: output[i] = NaN # remaining full-length windows - buf = <float64_t *> input.data + buf = <float64_t *> arr.data bufarr = np.empty(win, dtype=float) oldbuf = <float64_t *> bufarr.data for i from (win - offset) <= i < (N - offset): @@ -1502,7 +1520,7 @@ def roll_generic(ndarray[float64_t, cast=True] input, # truncated windows at the end for i from int_max(N - offset, 0) <= i < N: if counts[i] >= minp: - output[i] = func(input[int_max(i + offset - win + 1, 0): N], + output[i] = func(arr[int_max(i + offset - win + 1, 0): N], *args, **kwargs) else: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ae9d160db08e9..d3ab7afc025c9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4292,6 +4292,8 @@ def pipe(self, func, *args, **kwargs): Notes ----- `agg` is an alias for `aggregate`. Use the alias. + + A passed user-defined-function will be passed a Series for evaluation. """) _shared_docs['transform'] = (""" diff --git a/pandas/core/window.py b/pandas/core/window.py index 5cd4fffb5d7dd..f8b5aa292f309 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -314,7 +314,7 @@ def _center_window(self, result, window): def aggregate(self, arg, *args, **kwargs): result, how = self._aggregate(arg, *args, **kwargs) if result is None: - return self.apply(arg, args=args, kwargs=kwargs) + return self.apply(arg, raw=False, args=args, kwargs=kwargs) return result agg = aggregate @@ -954,23 +954,53 @@ def count(self): Parameters ---------- func : function - Must produce a single value from an ndarray input - \*args and \*\*kwargs are passed to the function""") + Must produce a single value from an ndarray input if ``raw=True`` + or a Series if ``raw=False`` + raw : bool, default None + * ``False`` : passes each row or column as a Series to the + function. + * ``True`` or ``None`` : the passed function will receive ndarray + objects instead. + If you are just applying a NumPy reduction function this will + achieve much better performance. + + The `raw` parameter is required and will show a FutureWarning if + not passed. In the future `raw` will default to False. + + .. versionadded:: 0.23.0 + + \*args and \*\*kwargs are passed to the function""") + + def apply(self, func, raw=None, args=(), kwargs={}): + from pandas import Series - def apply(self, func, args=(), kwargs={}): # TODO: _level is unused? _level = kwargs.pop('_level', None) # noqa window = self._get_window() offset = _offset(window, self.center) index, indexi = self._get_index() + # TODO: default is for backward compat + # change to False in the future + if raw is None: + warnings.warn( + "Currently, 'apply' passes the values as ndarrays to the " + "applied function. In the future, this will change to passing " + "it as Series objects. You need to specify 'raw=True' to keep " + "the current behaviour, and you can pass 'raw=False' to " + "silence this warning", FutureWarning, stacklevel=3) + raw = True + def f(arg, window, min_periods, closed): minp = _use_window(min_periods, window) - return _window.roll_generic(arg, window, minp, indexi, closed, - offset, func, args, kwargs) + if not raw: + arg = Series(arg, index=self.obj.index) + return _window.roll_generic( + arg, window, minp, indexi, + closed, offset, func, raw, args, kwargs) return self._apply(f, func, args=args, kwargs=kwargs, - center=False) + center=False, raw=raw) def sum(self, *args, **kwargs): nv.validate_window_func('sum', args, kwargs) @@ -1498,8 +1528,9 @@ def count(self): @Substitution(name='rolling') @Appender(_doc_template) @Appender(_shared_docs['apply']) - def apply(self, func, args=(), kwargs={}): - return super(Rolling, self).apply(func, args=args, kwargs=kwargs) + def apply(self, func, raw=None, args=(), kwargs={}): + return super(Rolling, self).apply( + func, raw=raw, args=args, kwargs=kwargs) @Substitution(name='rolling') @Appender(_shared_docs['sum']) @@ -1756,8 +1787,9 @@ def count(self, **kwargs): @Substitution(name='expanding') @Appender(_doc_template) @Appender(_shared_docs['apply']) - def apply(self, func, args=(), kwargs={}): - return super(Expanding, self).apply(func, args=args, kwargs=kwargs) + def apply(self, func, raw=None, args=(), kwargs={}): + return super(Expanding, self).apply( + func, raw=raw, args=args, kwargs=kwargs) @Substitution(name='expanding') @Appender(_shared_docs['sum']) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index dabdb1e8e689c..605230390ff1d 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -29,6 +29,22 @@ def assert_equal(left, right): tm.assert_frame_equal(left, right) +@pytest.fixture(params=[True, False]) +def raw(request): + return request.param + + +@pytest.fixture(params=['triang', 'blackman', 'hamming', 'bartlett', 'bohman', + 'blackmanharris', 'nuttall', 'barthann']) +def win_types(request): + return request.param + + +@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian', 'slepian']) +def win_types_special(request): + return request.param + + class Base(object): _nan_locs = np.arange(20, 40) @@ -157,9 +173,16 @@ def test_agg(self): expected.columns = pd.MultiIndex.from_tuples(exp_cols) tm.assert_frame_equal(result, expected, check_like=True) + def test_agg_apply(self, raw): + # passed lambda + df = DataFrame({'A': range(5), 'B': range(0, 10, 2)}) + + r = df.rolling(window=3) + a_sum = r['A'].sum() + result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)}) - rcustom = r['B'].apply(lambda x: np.std(x, ddof=1)) + rcustom = r['B'].apply(lambda x: np.std(x, ddof=1), raw=raw) expected = concat([a_sum, rcustom], axis=1) tm.assert_frame_equal(result, expected, check_like=True) @@ -289,43 +312,51 @@ def setup_method(self, method): self._create_data() @td.skip_if_no_scipy - def test_constructor(self): + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor(self, which): # GH 12669 - for o in [self.series, self.frame]: - c = o.rolling + o = getattr(self, which) + c = o.rolling - # valid - c(win_type='boxcar', window=2, min_periods=1) - c(win_type='boxcar', window=2, min_periods=1, center=True) - c(win_type='boxcar', window=2, min_periods=1, center=False) + # valid + c(win_type='boxcar', window=2, min_periods=1) + c(win_type='boxcar', window=2, min_periods=1, center=True) + c(win_type='boxcar', window=2, min_periods=1, center=False) - for wt in ['boxcar', 'triang', 'blackman', 'hamming', 'bartlett', - 'bohman', 'blackmanharris', 'nuttall', 'barthann']: - c(win_type=wt, window=2) + # not valid + for w in [2., 'foo', np.array([2])]: + with pytest.raises(ValueError): + c(win_type='boxcar', window=2, min_periods=w) + with pytest.raises(ValueError): + c(win_type='boxcar', window=2, min_periods=1, center=w) - # not valid - for w in [2., 'foo', np.array([2])]: - with pytest.raises(ValueError): - c(win_type='boxcar', window=2, min_periods=w) - with pytest.raises(ValueError): - c(win_type='boxcar', window=2, min_periods=1, center=w) + for wt in ['foobar', 1]: + with pytest.raises(ValueError): + c(win_type=wt, window=2) - for wt in ['foobar', 1]: - with pytest.raises(ValueError): - c(win_type=wt, window=2) + @td.skip_if_no_scipy + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor_with_win_type(self, which, win_types): + # GH 12669 + o = getattr(self, which) + c = o.rolling + c(win_type=win_types, window=2) - def test_numpy_compat(self): + @pytest.mark.parametrize( + 'method', ['sum', 'mean']) + def test_numpy_compat(self, method): # see gh-12811 w = rwindow.Window(Series([2, 4, 6]), window=[0, 2]) msg = "numpy operations are not valid with window objects" - for func in ('sum', 'mean'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(w, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(w, func), dtype=np.float64) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(w, method), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(w, method), dtype=np.float64) class TestRolling(Base): @@ -340,59 +371,65 @@ def test_doc_string(self): df.rolling(2).sum() df.rolling(2, min_periods=1).sum() - def test_constructor(self): + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor(self, which): # GH 12669 - for o in [self.series, self.frame]: - c = o.rolling + o = getattr(self, which) + c = o.rolling - # valid - c(window=2) - c(window=2, min_periods=1) - c(window=2, min_periods=1, center=True) - c(window=2, min_periods=1, center=False) + # valid + c(window=2) + c(window=2, min_periods=1) + c(window=2, min_periods=1, center=True) + c(window=2, min_periods=1, center=False) - # GH 13383 - c(0) - with pytest.raises(ValueError): - c(-1) + # GH 13383 + c(0) + with pytest.raises(ValueError): + c(-1) - # not valid - for w in [2., 'foo', np.array([2])]: - with pytest.raises(ValueError): - c(window=w) - with pytest.raises(ValueError): - c(window=2, min_periods=w) - with pytest.raises(ValueError): - c(window=2, min_periods=1, center=w) + # not valid + for w in [2., 'foo', np.array([2])]: + with pytest.raises(ValueError): + c(window=w) + with pytest.raises(ValueError): + c(window=2, min_periods=w) + with pytest.raises(ValueError): + c(window=2, min_periods=1, center=w) @td.skip_if_no_scipy - def test_constructor_with_win_type(self): + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor_with_win_type(self, which): # GH 13383 - for o in [self.series, self.frame]: - c = o.rolling - c(0, win_type='boxcar') - with pytest.raises(ValueError): - c(-1, win_type='boxcar') + o = getattr(self, which) + c = o.rolling + c(0, win_type='boxcar') + with pytest.raises(ValueError): + c(-1, win_type='boxcar') - def test_constructor_with_timedelta_window(self): + @pytest.mark.parametrize( + 'window', [timedelta(days=3), pd.Timedelta(days=3)]) + def test_constructor_with_timedelta_window(self, window): # GH 15440 n = 10 df = DataFrame({'value': np.arange(n)}, index=pd.date_range('2015-12-24', periods=n, freq="D")) expected_data = np.append([0., 1.], np.arange(3., 27., 3)) - for window in [timedelta(days=3), pd.Timedelta(days=3)]: - result = df.rolling(window=window).sum() - expected = DataFrame({'value': expected_data}, - index=pd.date_range('2015-12-24', periods=n, - freq="D")) - tm.assert_frame_equal(result, expected) - expected = df.rolling('3D').sum() - tm.assert_frame_equal(result, expected) + + result = df.rolling(window=window).sum() + expected = DataFrame({'value': expected_data}, + index=pd.date_range('2015-12-24', periods=n, + freq="D")) + tm.assert_frame_equal(result, expected) + expected = df.rolling('3D').sum() + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( 'window', [timedelta(days=3), pd.Timedelta(days=3), '3D']) - def test_constructor_with_timedelta_window_and_minperiods(self, window): + def test_constructor_timedelta_window_and_minperiods(self, window, raw): # GH 15305 n = 10 df = DataFrame({'value': np.arange(n)}, @@ -402,21 +439,22 @@ def test_constructor_with_timedelta_window_and_minperiods(self, window): index=pd.date_range('2017-08-08', periods=n, freq="D")) result_roll_sum = df.rolling(window=window, min_periods=2).sum() result_roll_generic = df.rolling(window=window, - min_periods=2).apply(sum) + min_periods=2).apply(sum, raw=raw) tm.assert_frame_equal(result_roll_sum, expected) tm.assert_frame_equal(result_roll_generic, expected) - def test_numpy_compat(self): + @pytest.mark.parametrize( + 'method', ['std', 'mean', 'sum', 'max', 'min', 'var']) + def test_numpy_compat(self, method): # see gh-12811 r = rwindow.Rolling(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" - for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), dtype=np.float64) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, method), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, method), dtype=np.float64) def test_closed(self): df = DataFrame({'A': [0, 1, 2, 3, 4]}) @@ -483,35 +521,38 @@ def test_doc_string(self): df df.expanding(2).sum() - def test_constructor(self): + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor(self, which): # GH 12669 - for o in [self.series, self.frame]: - c = o.expanding + o = getattr(self, which) + c = o.expanding - # valid - c(min_periods=1) - c(min_periods=1, center=True) - c(min_periods=1, center=False) + # valid + c(min_periods=1) + c(min_periods=1, center=True) + c(min_periods=1, center=False) - # not valid - for w in [2., 'foo', np.array([2])]: - with pytest.raises(ValueError): - c(min_periods=w) - with pytest.raises(ValueError): - c(min_periods=1, center=w) + # not valid + for w in [2., 'foo', np.array([2])]: + with pytest.raises(ValueError): + c(min_periods=w) + with pytest.raises(ValueError): + c(min_periods=1, center=w) - def test_numpy_compat(self): + @pytest.mark.parametrize( + 'method', ['std', 'mean', 'sum', 'max', 'min', 'var']) + def test_numpy_compat(self, method): # see gh-12811 e = rwindow.Expanding(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" - for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), dtype=np.float64) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, method), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, method), dtype=np.float64) @pytest.mark.parametrize( 'expander', @@ -558,55 +599,58 @@ def test_doc_string(self): df df.ewm(com=0.5).mean() - def test_constructor(self): - for o in [self.series, self.frame]: - c = o.ewm - - # valid - c(com=0.5) - c(span=1.5) - c(alpha=0.5) - c(halflife=0.75) - c(com=0.5, span=None) - c(alpha=0.5, com=None) - c(halflife=0.75, alpha=None) + @pytest.mark.parametrize( + 'which', ['series', 'frame']) + def test_constructor(self, which): + o = getattr(self, which) + c = o.ewm + + # valid + c(com=0.5) + c(span=1.5) + c(alpha=0.5) + c(halflife=0.75) + c(com=0.5, span=None) + c(alpha=0.5, com=None) + c(halflife=0.75, alpha=None) + + # not valid: mutually exclusive + with pytest.raises(ValueError): + c(com=0.5, alpha=0.5) + with pytest.raises(ValueError): + c(span=1.5, halflife=0.75) + with pytest.raises(ValueError): + c(alpha=0.5, span=1.5) - # not valid: mutually exclusive - with pytest.raises(ValueError): - c(com=0.5, alpha=0.5) - with pytest.raises(ValueError): - c(span=1.5, halflife=0.75) - with pytest.raises(ValueError): - c(alpha=0.5, span=1.5) + # not valid: com < 0 + with pytest.raises(ValueError): + c(com=-0.5) - # not valid: com < 0 - with pytest.raises(ValueError): - c(com=-0.5) + # not valid: span < 1 + with pytest.raises(ValueError): + c(span=0.5) - # not valid: span < 1 - with pytest.raises(ValueError): - c(span=0.5) + # not valid: halflife <= 0 + with pytest.raises(ValueError): + c(halflife=0) - # not valid: halflife <= 0 + # not valid: alpha <= 0 or alpha > 1 + for alpha in (-0.5, 1.5): with pytest.raises(ValueError): - c(halflife=0) + c(alpha=alpha) - # not valid: alpha <= 0 or alpha > 1 - for alpha in (-0.5, 1.5): - with pytest.raises(ValueError): - c(alpha=alpha) - - def test_numpy_compat(self): + @pytest.mark.parametrize( + 'method', ['std', 'mean', 'var']) + def test_numpy_compat(self, method): # see gh-12811 e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) msg = "numpy operations are not valid with window objects" - for func in ('std', 'mean', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), dtype=np.float64) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, method), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, method), dtype=np.float64) # gh-12373 : rolling functions error on float32 data @@ -943,11 +987,8 @@ def test_cmov_window_na_min_periods(self): tm.assert_series_equal(xp, rs) @td.skip_if_no_scipy - def test_cmov_window_regular(self): + def test_cmov_window_regular(self, win_types): # GH 8238 - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', - 'blackmanharris', 'nuttall', 'barthann'] - vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) xps = { @@ -969,33 +1010,25 @@ def test_cmov_window_regular(self): 14.0825, 11.5675, np.nan, np.nan] } - for wt in win_types: - xp = Series(xps[wt]) - rs = Series(vals).rolling(5, win_type=wt, center=True).mean() - tm.assert_series_equal(xp, rs) + xp = Series(xps[win_types]) + rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() + tm.assert_series_equal(xp, rs) @td.skip_if_no_scipy - def test_cmov_window_regular_linear_range(self): + def test_cmov_window_regular_linear_range(self, win_types): # GH 8238 - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', - 'blackmanharris', 'nuttall', 'barthann'] - vals = np.array(range(10), dtype=np.float) xp = vals.copy() xp[:2] = np.nan xp[-2:] = np.nan xp = Series(xp) - for wt in win_types: - rs = Series(vals).rolling(5, win_type=wt, center=True).mean() - tm.assert_series_equal(xp, rs) + rs = Series(vals).rolling(5, win_type=win_types, center=True).mean() + tm.assert_series_equal(xp, rs) @td.skip_if_no_scipy - def test_cmov_window_regular_missing_data(self): + def test_cmov_window_regular_missing_data(self, win_types): # GH 8238 - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', - 'blackmanharris', 'nuttall', 'barthann'] - vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]) xps = { @@ -1017,17 +1050,18 @@ def test_cmov_window_regular_missing_data(self): 9.16438, 13.05052, 14.02175, 16.1098, 13.65509] } - for wt in win_types: - xp = Series(xps[wt]) - rs = Series(vals).rolling(5, win_type=wt, min_periods=3).mean() - tm.assert_series_equal(xp, rs) + xp = Series(xps[win_types]) + rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean() + tm.assert_series_equal(xp, rs) @td.skip_if_no_scipy - def test_cmov_window_special(self): + def test_cmov_window_special(self, win_types_special): # GH 8238 - win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian'] - kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., - 'width': 2.}, {'width': 0.5}] + kwds = { + 'kaiser': {'beta': 1.}, + 'gaussian': {'std': 1.}, + 'general_gaussian': {'power': 2., 'width': 2.}, + 'slepian': {'width': 0.5}} vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) @@ -1043,17 +1077,20 @@ def test_cmov_window_special(self): 12.90702, 12.83757, np.nan, np.nan] } - for wt, k in zip(win_types, kwds): - xp = Series(xps[wt]) - rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k) - tm.assert_series_equal(xp, rs) + xp = Series(xps[win_types_special]) + rs = Series(vals).rolling( + 5, win_type=win_types_special, center=True).mean( + **kwds[win_types_special]) + tm.assert_series_equal(xp, rs) @td.skip_if_no_scipy - def test_cmov_window_special_linear_range(self): + def test_cmov_window_special_linear_range(self, win_types_special): # GH 8238 - win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian'] - kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., - 'width': 2.}, {'width': 0.5}] + kwds = { + 'kaiser': {'beta': 1.}, + 'gaussian': {'std': 1.}, + 'general_gaussian': {'power': 2., 'width': 2.}, + 'slepian': {'width': 0.5}} vals = np.array(range(10), dtype=np.float) xp = vals.copy() @@ -1061,9 +1098,10 @@ def test_cmov_window_special_linear_range(self): xp[-2:] = np.nan xp = Series(xp) - for wt, k in zip(win_types, kwds): - rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k) - tm.assert_series_equal(xp, rs) + rs = Series(vals).rolling( + 5, win_type=win_types_special, center=True).mean( + **kwds[win_types_special]) + tm.assert_series_equal(xp, rs) def test_rolling_median(self): self._check_moment_func(np.median, name='median') @@ -1150,43 +1188,76 @@ def test_rolling_quantile_param(self): with pytest.raises(TypeError): ser.rolling(3).quantile('foo') - def test_rolling_apply(self): + def test_rolling_apply(self, raw): # suppress warnings about empty slices, as we are deliberately testing # with a 0-length Series + with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning) - ser = Series([]) - tm.assert_series_equal(ser, - ser.rolling(10).apply(lambda x: x.mean())) - def f(x): return x[np.isfinite(x)].mean() - self._check_moment_func(np.mean, name='apply', func=f) + self._check_moment_func(np.mean, name='apply', func=f, raw=raw) - # GH 8080 + expected = Series([]) + result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw) + tm.assert_series_equal(result, expected) + + # gh-8080 s = Series([None, None, None]) - result = s.rolling(2, min_periods=0).apply(lambda x: len(x)) + result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw) expected = Series([1., 2., 2.]) tm.assert_series_equal(result, expected) - result = s.rolling(2, min_periods=0).apply(len) + result = s.rolling(2, min_periods=0).apply(len, raw=raw) tm.assert_series_equal(result, expected) - def test_rolling_apply_out_of_bounds(self): - # #1850 + @pytest.mark.parametrize('klass', [Series, DataFrame]) + @pytest.mark.parametrize( + 'method', [lambda x: x.rolling(window=2), lambda x: x.expanding()]) + def test_apply_future_warning(self, klass, method): + + # gh-5071 + s = klass(np.arange(3)) + + with tm.assert_produces_warning(FutureWarning): + method(s).apply(lambda x: len(x)) + + def test_rolling_apply_out_of_bounds(self, raw): + # gh-1850 vals = pd.Series([1, 2, 3, 4]) - result = vals.rolling(10).apply(np.sum) + result = vals.rolling(10).apply(np.sum, raw=raw) assert result.isna().all() - result = vals.rolling(10, min_periods=1).apply(np.sum) + result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw) expected = pd.Series([1, 3, 6, 10], dtype=float) tm.assert_almost_equal(result, expected) + @pytest.mark.parametrize('window', [2, '2s']) + def test_rolling_apply_with_pandas_objects(self, window): + # 5071 + df = pd.DataFrame({'A': np.random.randn(5), + 'B': np.random.randint(0, 10, size=5)}, + index=pd.date_range('20130101', periods=5, freq='s')) + + # we have an equal spaced timeseries index + # so simulate removing the first period + def f(x): + if x.index[0] == df.index[0]: + return np.nan + return x.iloc[-1] + + result = df.rolling(window).apply(f, raw=False) + expected = df.iloc[2:].reindex_like(df) + tm.assert_frame_equal(result, expected) + + with pytest.raises(AttributeError): + df.rolling(window).apply(f, raw=True) + def test_rolling_std(self): self._check_moment_func(lambda x: np.std(x, ddof=1), name='std') @@ -1256,10 +1327,10 @@ def get_result(obj, window, min_periods=None, center=False): frame_result = get_result(self.frame, window=50) assert isinstance(frame_result, DataFrame) - tm.assert_series_equal(frame_result.iloc[-1, :], - self.frame.iloc[-50:, :].apply(static_comp, - axis=0), - check_names=False) + tm.assert_series_equal( + frame_result.iloc[-1, :], + self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw), + check_names=False) # check time_rule works if has_time_rule: @@ -1287,7 +1358,7 @@ def get_result(obj, window, min_periods=None, center=False): static_comp(trunc_series)) tm.assert_series_equal(frame_result.xs(last_date), - trunc_frame.apply(static_comp), + trunc_frame.apply(static_comp, raw=raw), check_names=False) # excluding NaNs correctly @@ -1402,26 +1473,20 @@ def test_ewma(self): result = vals.ewm(span=100, adjust=False).mean().sum() assert np.abs(result - 1) < 1e-2 + @pytest.mark.parametrize('adjust', [True, False]) + @pytest.mark.parametrize('ignore_na', [True, False]) + def test_ewma_cases(self, adjust, ignore_na): + # try adjust/ignore_na args matrix + s = Series([1.0, 2.0, 4.0, 8.0]) - expected = Series([1.0, 1.6, 2.736842, 4.923077]) - for f in [lambda s: s.ewm(com=2.0, adjust=True).mean(), - lambda s: s.ewm(com=2.0, adjust=True, - ignore_na=False).mean(), - lambda s: s.ewm(com=2.0, adjust=True, ignore_na=True).mean(), - ]: - result = f(s) - tm.assert_series_equal(result, expected) + if adjust: + expected = Series([1.0, 1.6, 2.736842, 4.923077]) + else: + expected = Series([1.0, 1.333333, 2.222222, 4.148148]) - expected = Series([1.0, 1.333333, 2.222222, 4.148148]) - for f in [lambda s: s.ewm(com=2.0, adjust=False).mean(), - lambda s: s.ewm(com=2.0, adjust=False, - ignore_na=False).mean(), - lambda s: s.ewm(com=2.0, adjust=False, - ignore_na=True).mean(), - ]: - result = f(s) - tm.assert_series_equal(result, expected) + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + tm.assert_series_equal(result, expected) def test_ewma_nan_handling(self): s = Series([1.] + [np.nan] * 5 + [1.]) @@ -1555,14 +1620,13 @@ def test_ewm_domain_checks(self): s.ewm(alpha=1.0) pytest.raises(ValueError, s.ewm, alpha=1.1) - def test_ew_empty_series(self): + @pytest.mark.parametrize('method', ['mean', 'vol', 'var']) + def test_ew_empty_series(self, method): vals = pd.Series([], dtype=np.float64) ewm = vals.ewm(3) - funcs = ['mean', 'vol', 'var'] - for f in funcs: - result = getattr(ewm, f)() - tm.assert_almost_equal(result, vals) + result = getattr(ewm, method)() + tm.assert_almost_equal(result, vals) def _check_ew(self, name=None, preserve_nan=False): series_result = getattr(self.series.ewm(com=10), name)() @@ -2160,7 +2224,7 @@ def test_expanding_consistency(self, min_periods): if name == 'count': expanding_f_result = expanding_f() expanding_apply_f_result = x.expanding( - min_periods=0).apply(func=f) + min_periods=0).apply(func=f, raw=True) else: if name in ['cov', 'corr']: expanding_f_result = expanding_f( @@ -2168,7 +2232,7 @@ def test_expanding_consistency(self, min_periods): else: expanding_f_result = expanding_f() expanding_apply_f_result = x.expanding( - min_periods=min_periods).apply(func=f) + min_periods=min_periods).apply(func=f, raw=True) # GH 9422 if name in ['sum', 'prod']: @@ -2259,7 +2323,7 @@ def test_rolling_consistency(self, window, min_periods, center): rolling_f_result = rolling_f() rolling_apply_f_result = x.rolling( window=window, min_periods=0, - center=center).apply(func=f) + center=center).apply(func=f, raw=True) else: if name in ['cov', 'corr']: rolling_f_result = rolling_f( @@ -2268,7 +2332,7 @@ def test_rolling_consistency(self, window, min_periods, center): rolling_f_result = rolling_f() rolling_apply_f_result = x.rolling( window=window, min_periods=min_periods, - center=center).apply(func=f) + center=center).apply(func=f, raw=True) # GH 9422 if name in ['sum', 'prod']: @@ -2348,29 +2412,25 @@ def test_corr_sanity(self): except AssertionError: print(res) - def test_flex_binary_frame(self): - def _check(method): - series = self.frame[1] + @pytest.mark.parametrize('method', ['corr', 'cov']) + def test_flex_binary_frame(self, method): + series = self.frame[1] - res = getattr(series.rolling(window=10), method)(self.frame) - res2 = getattr(self.frame.rolling(window=10), method)(series) - exp = self.frame.apply(lambda x: getattr( - series.rolling(window=10), method)(x)) + res = getattr(series.rolling(window=10), method)(self.frame) + res2 = getattr(self.frame.rolling(window=10), method)(series) + exp = self.frame.apply(lambda x: getattr( + series.rolling(window=10), method)(x)) - tm.assert_frame_equal(res, exp) - tm.assert_frame_equal(res2, exp) + tm.assert_frame_equal(res, exp) + tm.assert_frame_equal(res2, exp) - frame2 = self.frame.copy() - frame2.values[:] = np.random.randn(*frame2.shape) + frame2 = self.frame.copy() + frame2.values[:] = np.random.randn(*frame2.shape) - res3 = getattr(self.frame.rolling(window=10), method)(frame2) - exp = DataFrame(dict((k, getattr(self.frame[k].rolling( - window=10), method)(frame2[k])) for k in self.frame)) - tm.assert_frame_equal(res3, exp) - - methods = ['corr', 'cov'] - for meth in methods: - _check(meth) + res3 = getattr(self.frame.rolling(window=10), method)(frame2) + exp = DataFrame(dict((k, getattr(self.frame[k].rolling( + window=10), method)(frame2[k])) for k in self.frame)) + tm.assert_frame_equal(res3, exp) def test_ewmcov(self): self._check_binary_ew('cov') @@ -2417,19 +2477,24 @@ def func(A, B, com, **kwargs): pytest.raises(Exception, func, A, randn(50), 20, min_periods=5) - def test_expanding_apply_args_kwargs(self): + def test_expanding_apply_args_kwargs(self, raw): + def mean_w_arg(x, const): return np.mean(x) + const df = DataFrame(np.random.rand(20, 3)) - expected = df.expanding().apply(np.mean) + 20. + expected = df.expanding().apply(np.mean, raw=raw) + 20. - tm.assert_frame_equal(df.expanding().apply(mean_w_arg, args=(20, )), - expected) - tm.assert_frame_equal(df.expanding().apply(mean_w_arg, - kwargs={'const': 20}), - expected) + result = df.expanding().apply(mean_w_arg, + raw=raw, + args=(20, )) + tm.assert_frame_equal(result, expected) + + result = df.expanding().apply(mean_w_arg, + raw=raw, + kwargs={'const': 20}) + tm.assert_frame_equal(result, expected) def test_expanding_corr(self): A = self.series.dropna() @@ -2539,42 +2604,47 @@ def test_rolling_corr_diff_length(self): result = s1.rolling(window=3, min_periods=2).corr(s2a) tm.assert_series_equal(result, expected) - def test_rolling_functions_window_non_shrinkage(self): + @pytest.mark.parametrize( + 'f', + [ + lambda x: (x.rolling(window=10, min_periods=5) + .cov(x, pairwise=False)), + lambda x: (x.rolling(window=10, min_periods=5) + .corr(x, pairwise=False)), + lambda x: x.rolling(window=10, min_periods=5).max(), + lambda x: x.rolling(window=10, min_periods=5).min(), + lambda x: x.rolling(window=10, min_periods=5).sum(), + lambda x: x.rolling(window=10, min_periods=5).mean(), + lambda x: x.rolling(window=10, min_periods=5).std(), + lambda x: x.rolling(window=10, min_periods=5).var(), + lambda x: x.rolling(window=10, min_periods=5).skew(), + lambda x: x.rolling(window=10, min_periods=5).kurt(), + lambda x: x.rolling( + window=10, min_periods=5).quantile(quantile=0.5), + lambda x: x.rolling(window=10, min_periods=5).median(), + lambda x: x.rolling(window=10, min_periods=5).apply( + sum, raw=False), + lambda x: x.rolling(window=10, min_periods=5).apply( + sum, raw=True), + lambda x: x.rolling(win_type='boxcar', + window=10, min_periods=5).mean()]) + def test_rolling_functions_window_non_shrinkage(self, f): # GH 7764 s = Series(range(4)) s_expected = Series(np.nan, index=s.index) df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B']) df_expected = DataFrame(np.nan, index=df.index, columns=df.columns) - functions = [lambda x: (x.rolling(window=10, min_periods=5) - .cov(x, pairwise=False)), - lambda x: (x.rolling(window=10, min_periods=5) - .corr(x, pairwise=False)), - lambda x: x.rolling(window=10, min_periods=5).max(), - lambda x: x.rolling(window=10, min_periods=5).min(), - lambda x: x.rolling(window=10, min_periods=5).sum(), - lambda x: x.rolling(window=10, min_periods=5).mean(), - lambda x: x.rolling(window=10, min_periods=5).std(), - lambda x: x.rolling(window=10, min_periods=5).var(), - lambda x: x.rolling(window=10, min_periods=5).skew(), - lambda x: x.rolling(window=10, min_periods=5).kurt(), - lambda x: x.rolling( - window=10, min_periods=5).quantile(quantile=0.5), - lambda x: x.rolling(window=10, min_periods=5).median(), - lambda x: x.rolling(window=10, min_periods=5).apply(sum), - lambda x: x.rolling(win_type='boxcar', - window=10, min_periods=5).mean()] - for f in functions: - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) + try: + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) - df_result = f(df) - tm.assert_frame_equal(df_result, df_expected) - except (ImportError): + df_result = f(df) + tm.assert_frame_equal(df_result, df_expected) + except (ImportError): - # scipy needed for rolling_window - continue + # scipy needed for rolling_window + pytest.skip("scipy not available") def test_rolling_functions_window_non_shrinkage_binary(self): @@ -2620,7 +2690,10 @@ def test_moment_functions_zero_length(self): lambda x: x.expanding(min_periods=5).kurt(), lambda x: x.expanding(min_periods=5).quantile(0.5), lambda x: x.expanding(min_periods=5).median(), - lambda x: x.expanding(min_periods=5).apply(sum), + lambda x: x.expanding(min_periods=5).apply( + sum, raw=False), + lambda x: x.expanding(min_periods=5).apply( + sum, raw=True), lambda x: x.rolling(window=10).count(), lambda x: x.rolling(window=10, min_periods=5).cov( x, pairwise=False), @@ -2637,7 +2710,10 @@ def test_moment_functions_zero_length(self): lambda x: x.rolling( window=10, min_periods=5).quantile(0.5), lambda x: x.rolling(window=10, min_periods=5).median(), - lambda x: x.rolling(window=10, min_periods=5).apply(sum), + lambda x: x.rolling(window=10, min_periods=5).apply( + sum, raw=False), + lambda x: x.rolling(window=10, min_periods=5).apply( + sum, raw=True), lambda x: x.rolling(win_type='boxcar', window=10, min_periods=5).mean(), ] @@ -2805,20 +2881,25 @@ def expanding_func(x, min_periods=1, center=False, axis=0): return getattr(exp, func)() self._check_expanding(expanding_func, static_comp, preserve_nan=False) - def test_expanding_apply(self): + def test_expanding_apply(self, raw): def expanding_mean(x, min_periods=1): + exp = x.expanding(min_periods=min_periods) - return exp.apply(lambda x: x.mean()) + result = exp.apply(lambda x: x.mean(), raw=raw) + return result - self._check_expanding(expanding_mean, np.mean) + # TODO(jreback), needed to add preserve_nan=False + # here to make this pass + self._check_expanding(expanding_mean, np.mean, preserve_nan=False) ser = Series([]) - tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean())) + tm.assert_series_equal(ser, ser.expanding().apply( + lambda x: x.mean(), raw=raw)) # GH 8080 s = Series([None, None, None]) - result = s.expanding(min_periods=0).apply(lambda x: len(x)) + result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw) expected = Series([1., 2., 3.]) tm.assert_series_equal(result, expected) @@ -3057,13 +3138,14 @@ def func(x): expected = g.apply(func) tm.assert_series_equal(result, expected) - def test_rolling_apply(self): + def test_rolling_apply(self, raw): g = self.frame.groupby('A') r = g.rolling(window=4) # reduction - result = r.apply(lambda x: x.sum()) - expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum())) + result = r.apply(lambda x: x.sum(), raw=raw) + expected = g.apply( + lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) def test_expanding(self): @@ -3104,13 +3186,14 @@ def func(x): expected = g.apply(func) tm.assert_series_equal(result, expected) - def test_expanding_apply(self): + def test_expanding_apply(self, raw): g = self.frame.groupby('A') r = g.expanding() # reduction - result = r.apply(lambda x: x.sum()) - expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum())) + result = r.apply(lambda x: x.sum(), raw=raw) + expected = g.apply( + lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) @@ -3624,22 +3707,22 @@ def test_ragged_max(self): expected['B'] = [0.0, 1, 2, 3, 4] tm.assert_frame_equal(result, expected) - def test_ragged_apply(self): + def test_ragged_apply(self, raw): df = self.ragged f = lambda x: 1 - result = df.rolling(window='1s', min_periods=1).apply(f) + result = df.rolling(window='1s', min_periods=1).apply(f, raw=raw) expected = df.copy() expected['B'] = 1. tm.assert_frame_equal(result, expected) - result = df.rolling(window='2s', min_periods=1).apply(f) + result = df.rolling(window='2s', min_periods=1).apply(f, raw=raw) expected = df.copy() expected['B'] = 1. tm.assert_frame_equal(result, expected) - result = df.rolling(window='5s', min_periods=1).apply(f) + result = df.rolling(window='5s', min_periods=1).apply(f, raw=raw) expected = df.copy() expected['B'] = 1. tm.assert_frame_equal(result, expected) @@ -3662,8 +3745,14 @@ def test_all(self): expected = er.quantile(0.5) tm.assert_frame_equal(result, expected) - result = r.apply(lambda x: 1) - expected = er.apply(lambda x: 1) + def test_all_apply(self, raw): + + df = self.regular * 2 + er = df.rolling(window=1) + r = df.rolling(window='1s') + + result = r.apply(lambda x: 1, raw=raw) + expected = er.apply(lambda x: 1, raw=raw) tm.assert_frame_equal(result, expected) def test_all2(self):
closes #5071
https://api.github.com/repos/pandas-dev/pandas/pulls/20584
2018-04-02T14:09:07Z
2018-04-16T14:54:04Z
2018-04-16T14:54:04Z
2018-08-02T14:16:32Z
API: categorical grouping will no longer return the cartesian product
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 407fad39ba232..3616a7e1b41d2 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -91,10 +91,10 @@ The mapping can be specified many different ways: - A Python function, to be called on each of the axis labels. - A list or NumPy array of the same length as the selected axis. - A dict or ``Series``, providing a ``label -> group name`` mapping. - - For ``DataFrame`` objects, a string indicating a column to be used to group. + - For ``DataFrame`` objects, a string indicating a column to be used to group. Of course ``df.groupby('A')`` is just syntactic sugar for ``df.groupby(df['A'])``, but it makes life simpler. - - For ``DataFrame`` objects, a string indicating an index level to be used to + - For ``DataFrame`` objects, a string indicating an index level to be used to group. - A list of any of the above things. @@ -120,7 +120,7 @@ consider the following ``DataFrame``: 'D' : np.random.randn(8)}) df -On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`. +On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`. We could naturally group by either the ``A`` or ``B`` columns, or both: .. ipython:: python @@ -360,8 +360,8 @@ Index level names may be specified as keys directly to ``groupby``. DataFrame column selection in GroupBy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once you have created the GroupBy object from a DataFrame, you might want to do -something different for each of the columns. Thus, using ``[]`` similar to +Once you have created the GroupBy object from a DataFrame, you might want to do +something different for each of the columns. Thus, using ``[]`` similar to getting a column from a DataFrame, you can do: .. ipython:: python @@ -421,7 +421,7 @@ statement if you wish: ``for (k1, k2), group in grouped:``. Selecting a group ----------------- -A single group can be selected using +A single group can be selected using :meth:`~pandas.core.groupby.DataFrameGroupBy.get_group`: .. ipython:: python @@ -444,8 +444,8 @@ perform a computation on the grouped data. These operations are similar to the :ref:`aggregating API <basics.aggregate>`, :ref:`window functions API <stats.aggregate>`, and :ref:`resample API <timeseries.aggregate>`. -An obvious one is aggregation via the -:meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` or equivalently +An obvious one is aggregation via the +:meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` or equivalently :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` method: .. ipython:: python @@ -517,12 +517,12 @@ Some common aggregating functions are tabulated below: :meth:`~pd.core.groupby.DataFrameGroupBy.nth`;Take nth value, or a subset if n is a list :meth:`~pd.core.groupby.DataFrameGroupBy.min`;Compute min of group values :meth:`~pd.core.groupby.DataFrameGroupBy.max`;Compute max of group values - -The aggregating functions above will exclude NA values. Any function which + +The aggregating functions above will exclude NA values. Any function which reduces a :class:`Series` to a scalar value is an aggregation function and will work, a trivial example is ``df.groupby('A').agg(lambda ser: 1)``. Note that -:meth:`~pd.core.groupby.DataFrameGroupBy.nth` can act as a reducer *or* a +:meth:`~pd.core.groupby.DataFrameGroupBy.nth` can act as a reducer *or* a filter, see :ref:`here <groupby.nth>`. .. _groupby.aggregate.multifunc: @@ -732,7 +732,7 @@ and that the transformed data contains no NAs. .. note:: Some functions will automatically transform the input when applied to a - GroupBy object, but returning an object of the same shape as the original. + GroupBy object, but returning an object of the same shape as the original. Passing ``as_index=False`` will not affect these transformation methods. For example: ``fillna, ffill, bfill, shift.``. @@ -926,7 +926,7 @@ The dimension of the returned result can also change: In [11]: grouped.apply(f) -``apply`` on a Series can operate on a returned value from the applied function, +``apply`` on a Series can operate on a returned value from the applied function, that is itself a series, and possibly upcast the result to a DataFrame: .. ipython:: python @@ -984,20 +984,48 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() -Note that ``df.groupby('A').colname.std().`` is more efficient than +Note that ``df.groupby('A').colname.std().`` is more efficient than ``df.groupby('A').std().colname``, so if the result of an aggregation function -is only interesting over one column (here ``colname``), it may be filtered +is only interesting over one column (here ``colname``), it may be filtered *before* applying the aggregation function. +.. _groupby.observed: + +Handling of (un)observed Categorical values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When using a ``Categorical`` grouper (as a single or as part of multipler groupers), the ``observed`` keyword +controls whether to return a cartesian product of all possible groupers values (``observed=False``) or only those +that are observed groupers (``observed=True``). + +Show all values: + +.. ipython:: python + + pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], categories=['a', 'b']), observed=False).count() + +Show only the observed values: + +.. ipython:: python + + pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], categories=['a', 'b']), observed=True).count() + +The returned dtype of the grouped will *always* include *all* of the catergories that were grouped. + +.. ipython:: python + + s = pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], categories=['a', 'b']), observed=False).count() + s.index.dtype + .. _groupby.missing: NA and NaT group handling ~~~~~~~~~~~~~~~~~~~~~~~~~ -If there are any NaN or NaT values in the grouping key, these will be -automatically excluded. In other words, there will never be an "NA group" or -"NaT group". This was not the case in older versions of pandas, but users were -generally discarding the NA group anyway (and supporting it was an +If there are any NaN or NaT values in the grouping key, these will be +automatically excluded. In other words, there will never be an "NA group" or +"NaT group". This was not the case in older versions of pandas, but users were +generally discarding the NA group anyway (and supporting it was an implementation headache). Grouping with ordered factors @@ -1084,8 +1112,8 @@ This shows the first or last n rows from each group. Taking the nth row of each group ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To select from a DataFrame or Series the nth item, use -:meth:`~pd.core.groupby.DataFrameGroupBy.nth`. This is a reduction method, and +To select from a DataFrame or Series the nth item, use +:meth:`~pd.core.groupby.DataFrameGroupBy.nth`. This is a reduction method, and will return a single row (or no row) per group if you pass an int for n: .. ipython:: python @@ -1153,7 +1181,7 @@ Enumerate groups .. versionadded:: 0.20.2 To see the ordering of the groups (as opposed to the order of rows -within a group given by ``cumcount``) you can use +within a group given by ``cumcount``) you can use :meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`. @@ -1273,7 +1301,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on Multi-column factorization ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By using :meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`, we can extract +By using :meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`, we can extract information about the groups in a way similar to :func:`factorize` (as described further in the :ref:`reshaping API <reshaping.factorize>`) but which applies naturally to multiple columns of mixed type and different diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 604b68b650201..5af703822829b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -396,6 +396,58 @@ documentation. If you build an extension array, publicize it on our .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ +.. _whatsnew_0230.enhancements.categorical_grouping: + +Categorical Groupers has gained an observed keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In previous versions, grouping by 1 or more categorical columns would result in an index that was the cartesian product of all of the categories for +each grouper, not just the observed values.``.groupby()`` has gained the ``observed`` keyword to toggle this behavior. The default remains backward +compatible (generate a cartesian product). (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`) + + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df['C'] = ['foo', 'bar'] * 2 + df + +To show all values, the previous behavior: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=False).count() + + +To show only observed values: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=True).count() + +For pivotting operations, this behavior is *already* controlled by the ``dropna`` keyword: + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df + +.. ipython:: python + + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=True) + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=False) + + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/conftest.py b/pandas/conftest.py index 559b5e44631b6..c4aab1b632b00 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -66,6 +66,17 @@ def ip(): return InteractiveShell() +@pytest.fixture(params=[True, False, None]) +def observed(request): + """ pass in the observed keyword to groupby for [True, False] + This indicates whether categoricals should return values for + values which are not in the grouper [False / None], or only values which + appear in the grouper [True]. [None] is supported for future compatiblity + if we decide to change the default (and would need to warn if this + parameter is not passed)""" + return request.param + + @pytest.fixture(params=[None, 'gzip', 'bz2', 'zip', pytest.param('xz', marks=td.skip_if_no_lzma)]) def compression(request): diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 517c21cc1bc3a..f91782459df67 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -647,8 +647,13 @@ def _set_categories(self, categories, fastpath=False): self._dtype = new_dtype - def _codes_for_groupby(self, sort): + def _codes_for_groupby(self, sort, observed): """ + Code the categories to ensure we can groupby for categoricals. + + If observed=True, we return a new Categorical with the observed + categories only. + If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. @@ -661,6 +666,8 @@ def _codes_for_groupby(self, sort): ---------- sort : boolean The value of the sort parameter groupby was called with. + observed : boolean + Account only for the observed values Returns ------- @@ -671,6 +678,26 @@ def _codes_for_groupby(self, sort): categories in the original order. """ + # we only care about observed values + if observed: + unique_codes = unique1d(self.codes) + cat = self.copy() + + take_codes = unique_codes[unique_codes != -1] + if self.ordered: + take_codes = np.sort(take_codes) + + # we recode according to the uniques + categories = self.categories.take(take_codes) + codes = _recode_for_categories(self.codes, + self.categories, + categories) + + # return a new categorical that maps our new codes + # and categories + dtype = CategoricalDtype(categories, ordered=self.ordered) + return type(self)(codes, dtype=dtype, fastpath=True) + # Already sorted according to self.categories; all is fine if sort: return self @@ -2161,7 +2188,7 @@ def unique(self): # exclude nan from indexer for categories take_codes = unique_codes[unique_codes != -1] if self.ordered: - take_codes = sorted(take_codes) + take_codes = np.sort(take_codes) return cat.set_categories(cat.categories.take(take_codes)) def _values_for_factorize(self): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index af19acbb416ee..e68662037b43d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6599,7 +6599,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): axis=axis, inplace=inplace) def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, - group_keys=True, squeeze=False, **kwargs): + group_keys=True, squeeze=False, observed=None, **kwargs): """ Group series using mapper (dict or key function, apply given function to group, return result as series) or by a series of columns. @@ -6632,6 +6632,13 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, squeeze : boolean, default False reduce the dimensionality of the return type if possible, otherwise return a consistent type + observed : boolean, default None + if True: only show observed values for categorical groupers. + if False: show all values for categorical groupers. + if None: if any categorical groupers, show a FutureWarning, + default to False. + + .. versionadded:: 0.23.0 Returns ------- @@ -6665,7 +6672,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, - **kwargs) + observed=observed, **kwargs) def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8c20d62117e25..8613ab4d8c59d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -556,7 +556,8 @@ class _GroupBy(PandasObject, SelectionMixin): def __init__(self, obj, keys=None, axis=0, level=None, grouper=None, exclusions=None, selection=None, as_index=True, - sort=True, group_keys=True, squeeze=False, **kwargs): + sort=True, group_keys=True, squeeze=False, + observed=None, **kwargs): self._selection = selection @@ -576,6 +577,7 @@ def __init__(self, obj, keys=None, axis=0, level=None, self.sort = sort self.group_keys = group_keys self.squeeze = squeeze + self.observed = observed self.mutated = kwargs.pop('mutated', False) if grouper is None: @@ -583,6 +585,7 @@ def __init__(self, obj, keys=None, axis=0, level=None, axis=axis, level=level, sort=sort, + observed=observed, mutated=self.mutated) self.obj = obj @@ -1661,10 +1664,11 @@ def nth(self, n, dropna=None): if dropna not in ['any', 'all']: if isinstance(self._selected_obj, Series) and dropna is True: - warnings.warn("the dropna='%s' keyword is deprecated," + warnings.warn("the dropna={dropna} keyword is deprecated," "use dropna='all' instead. " "For a Series groupby, dropna must be " - "either None, 'any' or 'all'." % (dropna), + "either None, 'any' or 'all'.".format( + dropna=dropna), FutureWarning, stacklevel=2) dropna = 'all' @@ -2331,27 +2335,30 @@ def ngroups(self): def recons_labels(self): comp_ids, obs_ids, _ = self.group_info labels = (ping.labels for ping in self.groupings) - return decons_obs_group_ids(comp_ids, - obs_ids, self.shape, labels, xnull=True) + return decons_obs_group_ids( + comp_ids, obs_ids, self.shape, labels, xnull=True) @cache_readonly def result_index(self): if not self.compressed and len(self.groupings) == 1: - return self.groupings[0].group_index.rename(self.names[0]) - - return MultiIndex(levels=[ping.group_index for ping in self.groupings], - labels=self.recons_labels, - verify_integrity=False, - names=self.names) + return self.groupings[0].result_index.rename(self.names[0]) + + labels = self.recons_labels + levels = [ping.result_index for ping in self.groupings] + result = MultiIndex(levels=levels, + labels=labels, + verify_integrity=False, + names=self.names) + return result def get_group_levels(self): if not self.compressed and len(self.groupings) == 1: - return [self.groupings[0].group_index] + return [self.groupings[0].result_index] name_list = [] for ping, labels in zip(self.groupings, self.recons_labels): labels = _ensure_platform_int(labels) - levels = ping.group_index.take(labels) + levels = ping.result_index.take(labels) name_list.append(levels) @@ -2883,6 +2890,8 @@ class Grouping(object): obj : name : level : + observed : boolean, default False + If we are a Categorical, use the observed values in_axis : if the Grouping is a column in self.obj and hence among Groupby.exclusions list @@ -2898,14 +2907,16 @@ class Grouping(object): """ def __init__(self, index, grouper=None, obj=None, name=None, level=None, - sort=True, in_axis=False): + sort=True, observed=None, in_axis=False): self.name = name self.level = level self.grouper = _convert_grouper(index, grouper) + self.all_grouper = None self.index = index self.sort = sort self.obj = obj + self.observed = observed self.in_axis = in_axis # right place for this? @@ -2953,17 +2964,30 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # a passed Categorical elif is_categorical_dtype(self.grouper): - self.grouper = self.grouper._codes_for_groupby(self.sort) + # observed can be True/False/None + # we treat None as False. If in the future + # we need to warn if observed is not passed + # then we have this option + # gh-20583 + + self.all_grouper = self.grouper + self.grouper = self.grouper._codes_for_groupby( + self.sort, observed) + categories = self.grouper.categories # we make a CategoricalIndex out of the cat grouper # preserving the categories / ordered attributes self._labels = self.grouper.codes + if observed: + codes = algorithms.unique1d(self.grouper.codes) + else: + codes = np.arange(len(categories)) - c = self.grouper.categories self._group_index = CategoricalIndex( - Categorical.from_codes(np.arange(len(c)), - categories=c, - ordered=self.grouper.ordered)) + Categorical.from_codes( + codes=codes, + categories=categories, + ordered=self.grouper.ordered)) # we are done if isinstance(self.grouper, Grouping): @@ -3022,6 +3046,22 @@ def labels(self): self._make_labels() return self._labels + @cache_readonly + def result_index(self): + if self.all_grouper is not None: + all_categories = self.all_grouper.categories + + # we re-order to the original category orderings + if self.sort: + return self.group_index.set_categories(all_categories) + + # we are not sorting, so add unobserved to the end + categories = self.group_index.categories + return self.group_index.add_categories( + all_categories[~all_categories.isin(categories)]) + + return self.group_index + @property def group_index(self): if self._group_index is None: @@ -3048,7 +3088,7 @@ def groups(self): def _get_grouper(obj, key=None, axis=0, level=None, sort=True, - mutated=False, validate=True): + observed=None, mutated=False, validate=True): """ create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. @@ -3065,6 +3105,9 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, are and then creates a Grouping for each one, combined into a BaseGrouper. + If observed & we have a categorical grouper, only show the observed + values + If validate, then check for key/level overlaps """ @@ -3243,6 +3286,7 @@ def is_in_obj(gpr): name=name, level=level, sort=sort, + observed=observed, in_axis=in_axis) \ if not isinstance(gpr, Grouping) else gpr @@ -4154,7 +4198,7 @@ def first_not_none(values): not_indexed_same=not_indexed_same) elif self.grouper.groupings is not None: if len(self.grouper.groupings) > 1: - key_index = MultiIndex.from_tuples(keys, names=key_names) + key_index = self.grouper.result_index else: ping = self.grouper.groupings[0] @@ -4244,8 +4288,9 @@ def first_not_none(values): # normally use vstack as its faster than concat # and if we have mi-columns - if isinstance(v.index, - MultiIndex) or key_index is None: + if (isinstance(v.index, MultiIndex) or + key_index is None or + isinstance(key_index, MultiIndex)): stacked_values = np.vstack(map(np.asarray, values)) result = DataFrame(stacked_values, index=key_index, columns=index) @@ -4696,6 +4741,14 @@ def _reindex_output(self, result): This can re-expand the output space """ + + # TODO(jreback): remove completely + # when observed parameter is defaulted to True + # gh-20583 + + if self.observed: + return result + groupings = self.grouper.groupings if groupings is None: return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 71caa098c7a28..3ffef5804acf7 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -782,9 +782,9 @@ def _concat_same_dtype(self, to_concat, name): result.name = name return result - def _codes_for_groupby(self, sort): + def _codes_for_groupby(self, sort, observed): """ Return a Categorical adjusted for groupby """ - return self.values._codes_for_groupby(sort) + return self.values._codes_for_groupby(sort, observed) @classmethod def _add_comparison_methods(cls): diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 74a9b59d3194a..39fb57e68c9c0 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -79,7 +79,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', pass values = list(values) - grouped = data.groupby(keys) + grouped = data.groupby(keys, observed=dropna) agged = grouped.agg(aggfunc) table = agged @@ -120,6 +120,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', data = data[data.notna().all(axis=1)] table = _add_margins(table, data, values, rows=index, cols=columns, aggfunc=aggfunc, + observed=dropna, margins_name=margins_name, fill_value=fill_value) # discard the top level @@ -138,7 +139,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', def _add_margins(table, data, values, rows, cols, aggfunc, - margins_name='All', fill_value=None): + observed=None, margins_name='All', fill_value=None): if not isinstance(margins_name, compat.string_types): raise ValueError('margins_name argument must be a string') @@ -168,6 +169,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc, if values: marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, + observed, grand_margin, margins_name) if not isinstance(marginal_result_set, tuple): @@ -175,7 +177,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc, result, margin_keys, row_margin = marginal_result_set else: marginal_result_set = _generate_marginal_results_without_values( - table, data, rows, cols, aggfunc, margins_name) + table, data, rows, cols, aggfunc, observed, margins_name) if not isinstance(marginal_result_set, tuple): return marginal_result_set result, margin_keys, row_margin = marginal_result_set @@ -230,6 +232,7 @@ def _compute_grand_margin(data, values, aggfunc, def _generate_marginal_results(table, data, values, rows, cols, aggfunc, + observed, grand_margin, margins_name='All'): if len(cols) > 0: @@ -241,10 +244,13 @@ def _all_key(key): return (key, margins_name) + ('',) * (len(cols) - 1) if len(rows) > 0: - margin = data[rows + values].groupby(rows).agg(aggfunc) + margin = data[rows + values].groupby( + rows, observed=observed).agg(aggfunc) cat_axis = 1 - for key, piece in table.groupby(level=0, axis=cat_axis): + for key, piece in table.groupby(level=0, + axis=cat_axis, + observed=observed): all_key = _all_key(key) # we are going to mutate this, so need to copy! @@ -264,7 +270,9 @@ def _all_key(key): else: margin = grand_margin cat_axis = 0 - for key, piece in table.groupby(level=0, axis=cat_axis): + for key, piece in table.groupby(level=0, + axis=cat_axis, + observed=observed): all_key = _all_key(key) table_pieces.append(piece) table_pieces.append(Series(margin[key], index=[all_key])) @@ -279,7 +287,8 @@ def _all_key(key): margin_keys = table.columns if len(cols) > 0: - row_margin = data[cols + values].groupby(cols).agg(aggfunc) + row_margin = data[cols + values].groupby( + cols, observed=observed).agg(aggfunc) row_margin = row_margin.stack() # slight hack @@ -293,7 +302,7 @@ def _all_key(key): def _generate_marginal_results_without_values( table, data, rows, cols, aggfunc, - margins_name='All'): + observed, margins_name='All'): if len(cols) > 0: # need to "interleave" the margins margin_keys = [] @@ -304,14 +313,17 @@ def _all_key(): return (margins_name, ) + ('', ) * (len(cols) - 1) if len(rows) > 0: - margin = data[rows].groupby(rows).apply(aggfunc) + margin = data[rows].groupby(rows, + observed=observed).apply(aggfunc) all_key = _all_key() table[all_key] = margin result = table margin_keys.append(all_key) else: - margin = data.groupby(level=0, axis=0).apply(aggfunc) + margin = data.groupby(level=0, + axis=0, + observed=observed).apply(aggfunc) all_key = _all_key() table[all_key] = margin result = table @@ -322,7 +334,7 @@ def _all_key(): margin_keys = table.columns if len(cols): - row_margin = data[cols].groupby(cols).apply(aggfunc) + row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc) else: row_margin = Series(np.nan, index=result.columns) diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index 5bd239f8a3034..b60eb89e87da5 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -573,7 +573,7 @@ def test_sort_index_intervalindex(self): bins=[-3, -0.5, 0, 0.5, 3]) model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2']) - result = model.groupby(['X1', 'X2']).mean().unstack() + result = model.groupby(['X1', 'X2'], observed=True).mean().unstack() expected = IntervalIndex.from_tuples( [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 80383c895a5e5..48a45e93e1e8e 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -158,35 +158,46 @@ def test__cython_agg_general(op, targop): ('min', np.min), ('max', np.max), ] ) -def test_cython_agg_empty_buckets(op, targop): +def test_cython_agg_empty_buckets(op, targop, observed): df = pd.DataFrame([11, 12, 13]) grps = range(0, 55, 5) # calling _cython_agg_general directly, instead of via the user API # which sets different values for min_count, so do that here. - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + g = df.groupby(pd.cut(df[0], grps), observed=observed) + result = g._cython_agg_general(op) + + g = df.groupby(pd.cut(df[0], grps), observed=observed) + expected = g.agg(lambda x: targop(x)) tm.assert_frame_equal(result, expected) -def test_cython_agg_empty_buckets_nanops(): +def test_cython_agg_empty_buckets_nanops(observed): # GH-18869 can't call nanops on empty groups, so hardcode expected # for these df = pd.DataFrame([11, 12, 13], columns=['a']) grps = range(0, 25, 5) # add / sum - result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + result = df.groupby(pd.cut(df['a'], grps), + observed=observed)._cython_agg_general('add') intervals = pd.interval_range(0, 20, freq=5) expected = pd.DataFrame( {"a": [0, 0, 36, 0]}, index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + if observed: + expected = expected[expected.a != 0] + tm.assert_frame_equal(result, expected) # prod - result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + result = df.groupby(pd.cut(df['a'], grps), + observed=observed)._cython_agg_general('prod') expected = pd.DataFrame( {"a": [1, 1, 1716, 1]}, index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + if observed: + expected = expected[expected.a != 1] + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index a10f7f6e46210..34489051efc18 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -488,15 +488,17 @@ def test_agg_structs_series(structure, expected): @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") -def test_agg_category_nansum(): +def test_agg_category_nansum(observed): categories = ['a', 'b', 'c'] df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], categories=categories), 'B': [1, 2, 3]}) - result = df.groupby("A").B.agg(np.nansum) + result = df.groupby("A", observed=observed).B.agg(np.nansum) expected = pd.Series([3, 3, 0], index=pd.CategoricalIndex(['a', 'b', 'c'], categories=categories, name='A'), name='B') + if observed: + expected = expected[expected != 0] tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 160b60e69f39d..e0793b8e1bd64 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -5,16 +5,43 @@ import pytest import numpy as np -from numpy import nan - import pandas as pd from pandas import (Index, MultiIndex, CategoricalIndex, - DataFrame, Categorical, Series, Interval, qcut) + DataFrame, Categorical, Series, qcut) from pandas.util.testing import assert_frame_equal, assert_series_equal import pandas.util.testing as tm -def test_groupby(): +def cartesian_product_for_groupers(result, args, names): + """ Reindex to a cartesian production for the groupers, + preserving the nature (Categorical) of each grouper """ + + def f(a): + if isinstance(a, (CategoricalIndex, Categorical)): + categories = a.categories + a = Categorical.from_codes(np.arange(len(categories)), + categories=categories, + ordered=a.ordered) + return a + + index = pd.MultiIndex.from_product(map(f, args), names=names) + return result.reindex(index).sort_index() + + +def test_apply_use_categorical_name(df): + cats = qcut(df.C, 4) + + def get_stats(group): + return {'min': group.min(), + 'max': group.max(), + 'count': group.count(), + 'mean': group.mean()} + + result = df.groupby(cats, observed=False).D.apply(get_stats) + assert result.index.names[0] == 'C' + + +def test_basic(): cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c", "d"], ordered=True) @@ -22,56 +49,29 @@ def test_groupby(): exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True) expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) - result = data.groupby("b").mean() + result = data.groupby("b", observed=False).mean() tm.assert_frame_equal(result, expected) - raw_cat1 = Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - raw_cat2 = Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) + cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) # single grouper - gb = df.groupby("A") + gb = df.groupby("A", observed=False) exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) result = gb.sum() tm.assert_frame_equal(result, expected) - # multiple groupers - gb = df.groupby(['A', 'B']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True)], - names=['A', 'B']) - expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, - np.nan, np.nan, np.nan]}, - index=exp_index) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers with a non-cat - df = df.copy() - df['C'] = ['foo', 'bar'] * 2 - gb = df.groupby(['A', 'B', 'C']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True), - ['foo', 'bar']], - names=['A', 'B', 'C']) - expected = DataFrame({'values': Series( - np.nan, index=exp_index)}).sort_index() - expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] - result = gb.sum() - tm.assert_frame_equal(result, expected) - # GH 8623 x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], [1, 'John P. Doe']], columns=['person_id', 'person_name']) x['person_name'] = Categorical(x.person_name) - g = x.groupby(['person_id']) + g = x.groupby(['person_id'], observed=False) result = g.transform(lambda x: x) tm.assert_frame_equal(result, x[['person_name']]) @@ -93,36 +93,48 @@ def f(x): df = DataFrame({"a": [5, 15, 25]}) c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) - result = df.a.groupby(c).transform(sum) + result = df.a.groupby(c, observed=False).transform(sum) tm.assert_series_equal(result, df['a']) tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), + df['a']) + tm.assert_frame_equal( + df.groupby(c, observed=False).transform(sum), + df[['a']]) tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) + df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), + df[['a']]) # Filter - tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) - tm.assert_frame_equal(df.groupby(c).filter(np.all), df) + tm.assert_series_equal( + df.a.groupby(c, observed=False).filter(np.all), + df['a']) + tm.assert_frame_equal( + df.groupby(c, observed=False).filter(np.all), + df) # Non-monotonic df = DataFrame({"a": [5, 15, 25, -5]}) c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) - result = df.a.groupby(c).transform(sum) + result = df.a.groupby(c, observed=False).transform(sum) tm.assert_series_equal(result, df['a']) tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), + df['a']) tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) + df.groupby(c, observed=False).transform(sum), + df[['a']]) + tm.assert_frame_equal( + df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), + df[['a']]) # GH 9603 df = DataFrame({'a': [1, 0, 0, 0]}) c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd'))) - result = df.groupby(c).apply(len) + result = df.groupby(c, observed=False).apply(len) exp_index = CategoricalIndex( c.values.categories, ordered=c.values.ordered) @@ -130,36 +142,56 @@ def f(x): expected.index.name = 'a' tm.assert_series_equal(result, expected) + # more basic + levels = ['foo', 'bar', 'baz', 'qux'] + codes = np.random.randint(0, 4, size=100) -def test_groupby_sort(): + cats = Categorical.from_codes(codes, levels, ordered=True) - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby - # This should result in a properly sorted Series so that the plot - # has a sorted x axis - # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + data = DataFrame(np.random.randn(100, 4)) - df = DataFrame({'value': np.random.randint(0, 10000, 100)}) - labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] - cat_labels = Categorical(labels, labels) + result = data.groupby(cats, observed=False).mean() - df = df.sort_values(by=['value'], ascending=True) - df['value_group'] = pd.cut(df.value, range(0, 10500, 500), - right=False, labels=cat_labels) + expected = data.groupby(np.asarray(cats), observed=False).mean() + exp_idx = CategoricalIndex(levels, categories=cats.categories, + ordered=True) + expected = expected.reindex(exp_idx) - res = df.groupby(['value_group'])['value_group'].count() - exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] - exp.index = CategoricalIndex(exp.index, name=exp.index.name) - tm.assert_series_equal(res, exp) + assert_frame_equal(result, expected) + grouped = data.groupby(cats, observed=False) + desc_result = grouped.describe() -def test_level_groupby_get_group(): + idx = cats.codes.argsort() + ord_labels = np.asarray(cats).take(idx) + ord_data = data.take(idx) + + exp_cats = Categorical(ord_labels, ordered=True, + categories=['foo', 'bar', 'baz', 'qux']) + expected = ord_data.groupby( + exp_cats, sort=False, observed=False).describe() + assert_frame_equal(desc_result, expected) + + # GH 10460 + expc = Categorical.from_codes(np.arange(4).repeat(8), + levels, ordered=True) + exp = CategoricalIndex(expc) + tm.assert_index_equal((desc_result.stack().index + .get_level_values(0)), exp) + exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', + '75%', 'max'] * 4) + tm.assert_index_equal((desc_result.stack().index + .get_level_values(1)), exp) + + +def test_level_get_group(observed): # GH15155 df = DataFrame(data=np.arange(2, 22, 2), index=MultiIndex( levels=[pd.CategoricalIndex(["a", "b"]), range(10)], labels=[[0] * 5 + [1] * 5, range(10)], names=["Index1", "Index2"])) - g = df.groupby(level=["Index1"]) + g = df.groupby(level=["Index1"], observed=observed) # expected should equal test.loc[["a"]] # GH15166 @@ -173,94 +205,217 @@ def test_level_groupby_get_group(): assert_frame_equal(result, expected) -def test_apply_use_categorical_name(df): - cats = qcut(df.C, 4) +@pytest.mark.parametrize('ordered', [True, False]) +def test_apply(ordered): + # GH 10138 - def get_stats(group): - return {'min': group.min(), - 'max': group.max(), - 'count': group.count(), - 'mean': group.mean()} + dense = Categorical(list('abc'), ordered=ordered) + + # 'b' is in the categories but not in the list + missing = Categorical( + list('aaa'), categories=['a', 'b'], ordered=ordered) + values = np.arange(len(dense)) + df = DataFrame({'missing': missing, + 'dense': dense, + 'values': values}) + grouped = df.groupby(['missing', 'dense'], observed=True) + + # missing category 'b' should still exist in the output index + idx = MultiIndex.from_arrays( + [missing, dense], names=['missing', 'dense']) + expected = DataFrame([0, 1, 2.], + index=idx, + columns=['values']) + + result = grouped.apply(lambda x: np.mean(x)) + assert_frame_equal(result, expected) - result = df.groupby(cats).D.apply(get_stats) - assert result.index.names[0] == 'C' + # we coerce back to ints + expected = expected.astype('int') + result = grouped.mean() + assert_frame_equal(result, expected) + result = grouped.agg(np.mean) + assert_frame_equal(result, expected) -def test_apply_categorical_data(): - # GH 10138 - for ordered in [True, False]: - dense = Categorical(list('abc'), ordered=ordered) - # 'b' is in the categories but not in the list - missing = Categorical( - list('aaa'), categories=['a', 'b'], ordered=ordered) - values = np.arange(len(dense)) - df = DataFrame({'missing': missing, - 'dense': dense, - 'values': values}) - grouped = df.groupby(['missing', 'dense']) - - # missing category 'b' should still exist in the output index - idx = MultiIndex.from_product( - [Categorical(['a', 'b'], ordered=ordered), - Categorical(['a', 'b', 'c'], ordered=ordered)], - names=['missing', 'dense']) - expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan], - index=idx, - columns=['values']) - - assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected) - assert_frame_equal(grouped.mean(), expected) - assert_frame_equal(grouped.agg(np.mean), expected) - - # but for transform we should still get back the original index - idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']], - names=['missing', 'dense']) - expected = Series(1, index=idx) - assert_series_equal(grouped.apply(lambda x: 1), expected) - - -def test_groupby_categorical(): - levels = ['foo', 'bar', 'baz', 'qux'] - codes = np.random.randint(0, 4, size=100) + # but for transform we should still get back the original index + idx = MultiIndex.from_arrays([missing, dense], + names=['missing', 'dense']) + expected = Series(1, index=idx) + result = grouped.apply(lambda x: 1) + assert_series_equal(result, expected) + + +def test_observed(observed): + # multiple groupers, don't re-expand the output space + # of the grouper + # gh-14942 (implement) + # gh-10132 (back-compat) + # gh-8138 (back-compat) + # gh-8869 + + cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df['C'] = ['foo', 'bar'] * 2 - cats = Categorical.from_codes(codes, levels, ordered=True) + # multiple groupers with a non-cat + gb = df.groupby(['A', 'B', 'C'], observed=observed) + exp_index = pd.MultiIndex.from_arrays( + [cat1, cat2, ['foo', 'bar'] * 2], + names=['A', 'B', 'C']) + expected = DataFrame({'values': Series( + [1, 2, 3, 4], index=exp_index)}).sort_index() + result = gb.sum() + if not observed: + expected = cartesian_product_for_groupers( + expected, + [cat1, cat2, ['foo', 'bar']], + list('ABC')) - data = DataFrame(np.random.randn(100, 4)) + tm.assert_frame_equal(result, expected) - result = data.groupby(cats).mean() + gb = df.groupby(['A', 'B'], observed=observed) + exp_index = pd.MultiIndex.from_arrays( + [cat1, cat2], + names=['A', 'B']) + expected = DataFrame({'values': [1, 2, 3, 4]}, + index=exp_index) + result = gb.sum() + if not observed: + expected = cartesian_product_for_groupers( + expected, + [cat1, cat2], + list('AB')) - expected = data.groupby(np.asarray(cats)).mean() - exp_idx = CategoricalIndex(levels, categories=cats.categories, - ordered=True) - expected = expected.reindex(exp_idx) + tm.assert_frame_equal(result, expected) - assert_frame_equal(result, expected) + # https://github.com/pandas-dev/pandas/issues/8138 + d = {'cat': + pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"], + ordered=True), + 'ints': [1, 1, 2, 2], + 'val': [10, 20, 30, 40]} + df = pd.DataFrame(d) - grouped = data.groupby(cats) - desc_result = grouped.describe() + # Grouping on a single column + groups_single_key = df.groupby("cat", observed=observed) + result = groups_single_key.mean() - idx = cats.codes.argsort() - ord_labels = np.asarray(cats).take(idx) - ord_data = data.take(idx) + exp_index = pd.CategoricalIndex(list('ab'), name="cat", + categories=list('abc'), + ordered=True) + expected = DataFrame({"ints": [1.5, 1.5], "val": [20., 30]}, + index=exp_index) + if not observed: + index = pd.CategoricalIndex(list('abc'), name="cat", + categories=list('abc'), + ordered=True) + expected = expected.reindex(index) - exp_cats = Categorical(ord_labels, ordered=True, - categories=['foo', 'bar', 'baz', 'qux']) - expected = ord_data.groupby(exp_cats, sort=False).describe() - assert_frame_equal(desc_result, expected) + tm.assert_frame_equal(result, expected) - # GH 10460 - expc = Categorical.from_codes(np.arange(4).repeat(8), - levels, ordered=True) - exp = CategoricalIndex(expc) - tm.assert_index_equal((desc_result.stack().index - .get_level_values(0)), exp) - exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', - '75%', 'max'] * 4) - tm.assert_index_equal((desc_result.stack().index - .get_level_values(1)), exp) + # Grouping on two columns + groups_double_key = df.groupby(["cat", "ints"], observed=observed) + result = groups_double_key.agg('mean') + expected = DataFrame( + {"val": [10, 30, 20, 40], + "cat": pd.Categorical(['a', 'a', 'b', 'b'], + categories=['a', 'b', 'c'], + ordered=True), + "ints": [1, 2, 1, 2]}).set_index(["cat", "ints"]) + if not observed: + expected = cartesian_product_for_groupers( + expected, + [df.cat.values, [1, 2]], + ['cat', 'ints']) + + tm.assert_frame_equal(result, expected) + # GH 10132 + for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]: + c, i = key + result = groups_double_key.get_group(key) + expected = df[(df.cat == c) & (df.ints == i)] + assert_frame_equal(result, expected) + + # gh-8869 + # with as_index + d = {'foo': [10, 8, 4, 8, 4, 1, 1], 'bar': [10, 20, 30, 40, 50, 60, 70], + 'baz': ['d', 'c', 'e', 'a', 'a', 'd', 'c']} + df = pd.DataFrame(d) + cat = pd.cut(df['foo'], np.linspace(0, 10, 3)) + df['range'] = cat + groups = df.groupby(['range', 'baz'], as_index=False, observed=observed) + result = groups.agg('mean') + + groups2 = df.groupby(['range', 'baz'], as_index=True, observed=observed) + expected = groups2.agg('mean').reset_index() + tm.assert_frame_equal(result, expected) + + +def test_observed_codes_remap(observed): + d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]} + df = pd.DataFrame(d) + values = pd.cut(df['C1'], [1, 2, 3, 6]) + values.name = "cat" + groups_double_key = df.groupby([values, 'C2'], observed=observed) + + idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], + names=["cat", "C2"]) + expected = DataFrame({"C1": [3, 3, 4, 5], + "C3": [10, 100, 200, 34]}, index=idx) + if not observed: + expected = cartesian_product_for_groupers( + expected, + [values.values, [1, 2, 3, 4]], + ['cat', 'C2']) + + result = groups_double_key.agg('mean') + tm.assert_frame_equal(result, expected) + + +def test_observed_perf(): + # we create a cartesian product, so this is + # non-performant if we don't use observed values + # gh-14942 + df = DataFrame({ + 'cat': np.random.randint(0, 255, size=30000), + 'int_id': np.random.randint(0, 255, size=30000), + 'other_id': np.random.randint(0, 10000, size=30000), + 'foo': 0}) + df['cat'] = df.cat.astype(str).astype('category') -def test_groupby_datetime_categorical(): + grouped = df.groupby(['cat', 'int_id', 'other_id'], observed=True) + result = grouped.count() + assert result.index.levels[0].nunique() == df.cat.nunique() + assert result.index.levels[1].nunique() == df.int_id.nunique() + assert result.index.levels[2].nunique() == df.other_id.nunique() + + +def test_observed_groups(observed): + # gh-20583 + # test that we have the appropriate groups + + cat = pd.Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c']) + df = pd.DataFrame({'cat': cat, 'vals': [1, 2, 3]}) + g = df.groupby('cat', observed=observed) + + result = g.groups + if observed: + expected = {'a': Index([0, 2], dtype='int64'), + 'c': Index([1], dtype='int64')} + else: + expected = {'a': Index([0, 2], dtype='int64'), + 'b': Index([], dtype='int64'), + 'c': Index([1], dtype='int64')} + + tm.assert_dict_equal(result, expected) + + +def test_datetime(): # GH9049: ensure backward compatibility levels = pd.date_range('2014-01-01', periods=4) codes = np.random.randint(0, 4, size=100) @@ -268,9 +423,9 @@ def test_groupby_datetime_categorical(): cats = Categorical.from_codes(codes, levels, ordered=True) data = DataFrame(np.random.randn(100, 4)) - result = data.groupby(cats).mean() + result = data.groupby(cats, observed=False).mean() - expected = data.groupby(np.asarray(cats)).mean() + expected = data.groupby(np.asarray(cats), observed=False).mean() expected = expected.reindex(levels) expected.index = CategoricalIndex(expected.index, categories=expected.index, @@ -278,13 +433,13 @@ def test_groupby_datetime_categorical(): assert_frame_equal(result, expected) - grouped = data.groupby(cats) + grouped = data.groupby(cats, observed=False) desc_result = grouped.describe() idx = cats.codes.argsort() ord_labels = cats.take_nd(idx) ord_data = data.take(idx) - expected = ord_data.groupby(ord_labels).describe() + expected = ord_data.groupby(ord_labels, observed=False).describe() assert_frame_equal(desc_result, expected) tm.assert_index_equal(desc_result.index, expected.index) tm.assert_index_equal( @@ -303,7 +458,7 @@ def test_groupby_datetime_categorical(): .get_level_values(1)), exp) -def test_groupby_categorical_index(): +def test_categorical_index(): s = np.random.RandomState(12345) levels = ['foo', 'bar', 'baz', 'qux'] @@ -315,23 +470,23 @@ def test_groupby_categorical_index(): df['cats'] = cats # with a cat index - result = df.set_index('cats').groupby(level=0).sum() - expected = df[list('abcd')].groupby(cats.codes).sum() + result = df.set_index('cats').groupby(level=0, observed=False).sum() + expected = df[list('abcd')].groupby(cats.codes, observed=False).sum() expected.index = CategoricalIndex( Categorical.from_codes( [0, 1, 2, 3], levels, ordered=True), name='cats') assert_frame_equal(result, expected) # with a cat column, should produce a cat index - result = df.groupby('cats').sum() - expected = df[list('abcd')].groupby(cats.codes).sum() + result = df.groupby('cats', observed=False).sum() + expected = df[list('abcd')].groupby(cats.codes, observed=False).sum() expected.index = CategoricalIndex( Categorical.from_codes( [0, 1, 2, 3], levels, ordered=True), name='cats') assert_frame_equal(result, expected) -def test_groupby_describe_categorical_columns(): +def test_describe_categorical_columns(): # GH 11558 cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'], categories=['foo', 'bar', 'baz', 'qux'], @@ -343,14 +498,15 @@ def test_groupby_describe_categorical_columns(): tm.assert_categorical_equal(result.stack().columns.values, cats.values) -def test_groupby_unstack_categorical(): +def test_unstack_categorical(): # GH11558 (example is taken from the original issue) df = pd.DataFrame({'a': range(10), 'medium': ['A', 'B'] * 5, 'artist': list('XYXXY') * 2}) df['medium'] = df['medium'].astype('category') - gcat = df.groupby(['artist', 'medium'])['a'].count().unstack() + gcat = df.groupby( + ['artist', 'medium'], observed=False)['a'].count().unstack() result = gcat.describe() exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False, @@ -363,7 +519,7 @@ def test_groupby_unstack_categorical(): tm.assert_series_equal(result, expected) -def test_groupby_bins_unequal_len(): +def test_bins_unequal_len(): # GH3011 series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4]) bins = pd.cut(series.dropna().values, 4) @@ -374,47 +530,45 @@ def f(): pytest.raises(ValueError, f) -def test_groupby_multi_categorical_as_index(): +def test_as_index(): # GH13204 df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]), 'A': [10, 11, 11], 'B': [101, 102, 103]}) - result = df.groupby(['cat', 'A'], as_index=False).sum() - expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), - 'A': [10, 11, 10, 11, 10, 11], - 'B': [101.0, nan, nan, 205.0, nan, nan]}, - columns=['cat', 'A', 'B']) + result = df.groupby(['cat', 'A'], as_index=False, observed=True).sum() + expected = DataFrame( + {'cat': Categorical([1, 2], categories=df.cat.cat.categories), + 'A': [10, 11], + 'B': [101, 205]}, + columns=['cat', 'A', 'B']) tm.assert_frame_equal(result, expected) # function grouper f = lambda r: df.loc[r, 'A'] - result = df.groupby(['cat', f], as_index=False).sum() - expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), - 'A': [10.0, nan, nan, 22.0, nan, nan], - 'B': [101.0, nan, nan, 205.0, nan, nan]}, - columns=['cat', 'A', 'B']) + result = df.groupby(['cat', f], as_index=False, observed=True).sum() + expected = DataFrame( + {'cat': Categorical([1, 2], categories=df.cat.cat.categories), + 'A': [10, 22], + 'B': [101, 205]}, + columns=['cat', 'A', 'B']) tm.assert_frame_equal(result, expected) # another not in-axis grouper s = Series(['a', 'b', 'b'], name='cat2') - result = df.groupby(['cat', s], as_index=False).sum() - expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), - 'A': [10.0, nan, nan, 22.0, nan, nan], - 'B': [101.0, nan, nan, 205.0, nan, nan]}, - columns=['cat', 'A', 'B']) + result = df.groupby(['cat', s], as_index=False, observed=True).sum() tm.assert_frame_equal(result, expected) # GH18872: conflicting names in desired index - pytest.raises(ValueError, lambda: df.groupby(['cat', - s.rename('cat')]).sum()) + with pytest.raises(ValueError): + df.groupby(['cat', s.rename('cat')], observed=True).sum() # is original index dropped? - expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), - 'A': [10, 11, 10, 11, 10, 11], - 'B': [101.0, nan, nan, 205.0, nan, nan]}, - columns=['cat', 'A', 'B']) - group_columns = ['cat', 'A'] + expected = DataFrame( + {'cat': Categorical([1, 2], categories=df.cat.cat.categories), + 'A': [10, 11], + 'B': [101, 205]}, + columns=['cat', 'A', 'B']) for name in [None, 'X', 'B', 'cat']: df.index = Index(list("abc"), name=name) @@ -422,15 +576,17 @@ def test_groupby_multi_categorical_as_index(): if name in group_columns and name in df.index.names: with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df.groupby(group_columns, as_index=False).sum() + result = df.groupby( + group_columns, as_index=False, observed=True).sum() else: - result = df.groupby(group_columns, as_index=False).sum() + result = df.groupby( + group_columns, as_index=False, observed=True).sum() - tm.assert_frame_equal(result, expected, check_index_type=True) + tm.assert_frame_equal(result, expected) -def test_groupby_preserve_categories(): +def test_preserve_categories(): # GH-13179 categories = list('abc') @@ -439,8 +595,10 @@ def test_groupby_preserve_categories(): categories=categories, ordered=True)}) index = pd.CategoricalIndex(categories, categories, ordered=True) - tm.assert_index_equal(df.groupby('A', sort=True).first().index, index) - tm.assert_index_equal(df.groupby('A', sort=False).first().index, index) + tm.assert_index_equal( + df.groupby('A', sort=True, observed=False).first().index, index) + tm.assert_index_equal( + df.groupby('A', sort=False, observed=False).first().index, index) # ordered=False df = DataFrame({'A': pd.Categorical(list('ba'), @@ -449,13 +607,15 @@ def test_groupby_preserve_categories(): sort_index = pd.CategoricalIndex(categories, categories, ordered=False) nosort_index = pd.CategoricalIndex(list('bac'), list('bac'), ordered=False) - tm.assert_index_equal(df.groupby('A', sort=True).first().index, - sort_index) - tm.assert_index_equal(df.groupby('A', sort=False).first().index, - nosort_index) + tm.assert_index_equal( + df.groupby('A', sort=True, observed=False).first().index, + sort_index) + tm.assert_index_equal( + df.groupby('A', sort=False, observed=False).first().index, + nosort_index) -def test_groupby_preserve_categorical_dtype(): +def test_preserve_categorical_dtype(): # GH13743, GH13854 df = DataFrame({'A': [1, 2, 1, 1, 2], 'B': [10, 16, 22, 28, 34], @@ -475,38 +635,22 @@ def test_groupby_preserve_categorical_dtype(): categories=list("bac"), ordered=True)}) for col in ['C1', 'C2']: - result1 = df.groupby(by=col, as_index=False).mean() - result2 = df.groupby(by=col, as_index=True).mean().reset_index() - expected = exp_full.reindex(columns=result1.columns) - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - - # multiple grouper - exp_full = DataFrame({'A': [1, 1, 1, 2, 2, 2], - 'B': [np.nan, 20.0, np.nan, 25.0, np.nan, - np.nan], - 'C1': Categorical(list("bacbac"), - categories=list("bac"), - ordered=False), - 'C2': Categorical(list("bacbac"), - categories=list("bac"), - ordered=True)}) - for cols in [['A', 'C1'], ['A', 'C2']]: - result1 = df.groupby(by=cols, as_index=False).mean() - result2 = df.groupby(by=cols, as_index=True).mean().reset_index() + result1 = df.groupby(by=col, as_index=False, observed=False).mean() + result2 = df.groupby( + by=col, as_index=True, observed=False).mean().reset_index() expected = exp_full.reindex(columns=result1.columns) tm.assert_frame_equal(result1, expected) tm.assert_frame_equal(result2, expected) -def test_groupby_categorical_no_compress(): +def test_categorical_no_compress(): data = Series(np.random.randn(9)) codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True) - result = data.groupby(cats).mean() - exp = data.groupby(codes).mean() + result = data.groupby(cats, observed=False).mean() + exp = data.groupby(codes, observed=False).mean() exp.index = CategoricalIndex(exp.index, categories=cats.categories, ordered=cats.ordered) @@ -515,8 +659,8 @@ def test_groupby_categorical_no_compress(): codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3]) cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True) - result = data.groupby(cats).mean() - exp = data.groupby(codes).mean().reindex(cats.categories) + result = data.groupby(cats, observed=False).mean() + exp = data.groupby(codes, observed=False).mean().reindex(cats.categories) exp.index = CategoricalIndex(exp.index, categories=cats.categories, ordered=cats.ordered) assert_series_equal(result, exp) @@ -525,13 +669,34 @@ def test_groupby_categorical_no_compress(): categories=["a", "b", "c", "d"], ordered=True) data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) - result = data.groupby("b").mean() + result = data.groupby("b", observed=False).mean() result = result["a"].values exp = np.array([1, 2, 4, np.nan]) tm.assert_numpy_array_equal(result, exp) -def test_groupby_sort_categorical(): +def test_sort(): + + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8 + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + df = DataFrame({'value': np.random.randint(0, 10000, 100)}) + labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=['value'], ascending=True) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), + right=False, labels=cat_labels) + + res = df.groupby(['value_group'], observed=False)['value_group'].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + exp.index = CategoricalIndex(exp.index, name=exp.index.name) + tm.assert_series_equal(res, exp) + + +def test_sort2(): # dataframe groupby sort was being ignored # GH 8868 df = DataFrame([['(7.5, 10]', 10, 10], ['(7.5, 10]', 8, 20], @@ -543,35 +708,43 @@ def test_groupby_sort_categorical(): df['range'] = Categorical(df['range'], ordered=True) index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range', ordered=True) - result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], - columns=['foo', 'bar'], index=index) + expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], + columns=['foo', 'bar'], index=index) col = 'range' - assert_frame_equal(result_sort, df.groupby(col, sort=True).first()) + result_sort = df.groupby(col, sort=True, observed=False).first() + assert_frame_equal(result_sort, expected_sort) + # when categories is ordered, group is ordered by category's order - assert_frame_equal(result_sort, df.groupby(col, sort=False).first()) + expected_sort = result_sort + result_sort = df.groupby(col, sort=False, observed=False).first() + assert_frame_equal(result_sort, expected_sort) df['range'] = Categorical(df['range'], ordered=False) index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range') - result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], - columns=['foo', 'bar'], index=index) + expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], + columns=['foo', 'bar'], index=index) index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'], categories=['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'], name='range') - result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]], - index=index, columns=['foo', 'bar']) + expected_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]], + index=index, columns=['foo', 'bar']) col = 'range' + # this is an unordered categorical, but we allow this #### - assert_frame_equal(result_sort, df.groupby(col, sort=True).first()) - assert_frame_equal(result_nosort, df.groupby(col, sort=False).first()) + result_sort = df.groupby(col, sort=True, observed=False).first() + assert_frame_equal(result_sort, expected_sort) + + result_nosort = df.groupby(col, sort=False, observed=False).first() + assert_frame_equal(result_nosort, expected_nosort) -def test_groupby_sort_categorical_datetimelike(): +def test_sort_datetimelike(): # GH10505 # use same data as test_groupby_sort_categorical, which category is @@ -600,9 +773,12 @@ def test_groupby_sort_categorical_datetimelike(): name='dt', ordered=True) col = 'dt' - assert_frame_equal(result_sort, df.groupby(col, sort=True).first()) + assert_frame_equal( + result_sort, df.groupby(col, sort=True, observed=False).first()) + # when categories is ordered, group is ordered by category's order - assert_frame_equal(result_sort, df.groupby(col, sort=False).first()) + assert_frame_equal( + result_sort, df.groupby(col, sort=False, observed=False).first()) # ordered = False df['dt'] = Categorical(df['dt'], ordered=False) @@ -620,65 +796,10 @@ def test_groupby_sort_categorical_datetimelike(): name='dt') col = 'dt' - assert_frame_equal(result_sort, df.groupby(col, sort=True).first()) - assert_frame_equal(result_nosort, df.groupby(col, sort=False).first()) - - -def test_groupby_categorical_two_columns(): - - # https://github.com/pandas-dev/pandas/issues/8138 - d = {'cat': - pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"], - ordered=True), - 'ints': [1, 1, 2, 2], - 'val': [10, 20, 30, 40]} - test = pd.DataFrame(d) - - # Grouping on a single column - groups_single_key = test.groupby("cat") - res = groups_single_key.agg('mean') - - exp_index = pd.CategoricalIndex(["a", "b", "c"], name="cat", - ordered=True) - exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]}, - index=exp_index) - tm.assert_frame_equal(res, exp) - - # Grouping on two columns - groups_double_key = test.groupby(["cat", "ints"]) - res = groups_double_key.agg('mean') - exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan], - "cat": pd.Categorical(["a", "a", "b", "b", "c", "c"], - ordered=True), - "ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints" - ]) - tm.assert_frame_equal(res, exp) - - # GH 10132 - for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]: - c, i = key - result = groups_double_key.get_group(key) - expected = test[(test.cat == c) & (test.ints == i)] - assert_frame_equal(result, expected) - - d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]} - test = pd.DataFrame(d) - values = pd.cut(test['C1'], [1, 2, 3, 6]) - values.name = "cat" - groups_double_key = test.groupby([values, 'C2']) - - res = groups_double_key.agg('mean') - nan = np.nan - idx = MultiIndex.from_product( - [Categorical([Interval(1, 2), Interval(2, 3), - Interval(3, 6)], ordered=True), - [1, 2, 3, 4]], - names=["cat", "C2"]) - exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3, - nan, nan, nan, nan, 4, 5], - "C3": [nan, nan, nan, nan, 10, 100, - nan, nan, nan, nan, 200, 34]}, index=idx) - tm.assert_frame_equal(res, exp) + assert_frame_equal( + result_sort, df.groupby(col, sort=True, observed=False).first()) + assert_frame_equal( + result_nosort, df.groupby(col, sort=False, observed=False).first()) def test_empty_sum(): @@ -689,22 +810,22 @@ def test_empty_sum(): expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') # 0 by default - result = df.groupby("A").B.sum() + result = df.groupby("A", observed=False).B.sum() expected = pd.Series([3, 1, 0], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=0 - result = df.groupby("A").B.sum(min_count=0) + result = df.groupby("A", observed=False).B.sum(min_count=0) expected = pd.Series([3, 1, 0], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=1 - result = df.groupby("A").B.sum(min_count=1) + result = df.groupby("A", observed=False).B.sum(min_count=1) expected = pd.Series([3, 1, np.nan], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count>1 - result = df.groupby("A").B.sum(min_count=2) + result = df.groupby("A", observed=False).B.sum(min_count=2) expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B') tm.assert_series_equal(result, expected) @@ -718,16 +839,16 @@ def test_empty_prod(): expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') # 1 by default - result = df.groupby("A").B.prod() + result = df.groupby("A", observed=False).B.prod() expected = pd.Series([2, 1, 1], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=0 - result = df.groupby("A").B.prod(min_count=0) + result = df.groupby("A", observed=False).B.prod(min_count=0) expected = pd.Series([2, 1, 1], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=1 - result = df.groupby("A").B.prod(min_count=1) + result = df.groupby("A", observed=False).B.prod(min_count=1) expected = pd.Series([2, 1, np.nan], expected_idx, name='B') tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index ba1371fe9f931..f1d678db4ff7f 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -313,14 +313,14 @@ def test_cython_median(): tm.assert_frame_equal(rs, xp) -def test_median_empty_bins(): +def test_median_empty_bins(observed): df = pd.DataFrame(np.random.randint(0, 44, 500)) grps = range(0, 55, 5) bins = pd.cut(df[0], grps) - result = df.groupby(bins).median() - expected = df.groupby(bins).agg(lambda x: x.median()) + result = df.groupby(bins, observed=observed).median() + expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 743237f5b386c..c0f5c43b2fd35 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -251,7 +251,7 @@ def test_groupby_levels_and_columns(self): by_columns.columns = pd.Index(by_columns.columns, dtype=np.int64) tm.assert_frame_equal(by_levels, by_columns) - def test_groupby_categorical_index_and_columns(self): + def test_groupby_categorical_index_and_columns(self, observed): # GH18432 columns = ['A', 'B', 'A', 'B'] categories = ['B', 'A'] @@ -260,17 +260,26 @@ def test_groupby_categorical_index_and_columns(self): categories=categories, ordered=True) df = DataFrame(data=data, columns=cat_columns) - result = df.groupby(axis=1, level=0).sum() + result = df.groupby(axis=1, level=0, observed=observed).sum() expected_data = 2 * np.ones((5, 2), int) - expected_columns = CategoricalIndex(categories, - categories=categories, - ordered=True) + + if observed: + # if we are not-observed we undergo a reindex + # so need to adjust the output as our expected sets us up + # to be non-observed + expected_columns = CategoricalIndex(['A', 'B'], + categories=categories, + ordered=True) + else: + expected_columns = CategoricalIndex(categories, + categories=categories, + ordered=True) expected = DataFrame(data=expected_data, columns=expected_columns) assert_frame_equal(result, expected) # test transposed version df = DataFrame(data.T, index=cat_columns) - result = df.groupby(axis=0, level=0).sum() + result = df.groupby(axis=0, level=0, observed=observed).sum() expected = DataFrame(data=expected_data.T, index=expected_columns) assert_frame_equal(result, expected) @@ -572,11 +581,11 @@ def test_get_group(self): pytest.raises(ValueError, lambda: g.get_group(('foo', 'bar', 'baz'))) - def test_get_group_empty_bins(self): + def test_get_group_empty_bins(self, observed): d = pd.DataFrame([3, 1, 7, 6]) bins = [0, 5, 10, 15] - g = d.groupby(pd.cut(d[0], bins)) + g = d.groupby(pd.cut(d[0], bins), observed=observed) # TODO: should prob allow a str of Interval work as well # IOW '(0, 5]' diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 1004b40bfb4c1..76cdc1d2a195d 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -93,23 +93,24 @@ def test_pivot_table_dropna(self): def test_pivot_table_categorical(self): - raw_cat1 = Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - raw_cat2 = Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) - result = pd.pivot_table(df, values='values', index=['A', 'B']) - - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True)], + cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + result = pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=True) + + exp_index = pd.MultiIndex.from_arrays( + [cat1, cat2], names=['A', 'B']) expected = DataFrame( - {'values': [1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan]}, + {'values': [1, 2, 3, 4]}, index=exp_index) tm.assert_frame_equal(result, expected) - def test_pivot_table_dropna_categoricals(self): + @pytest.mark.parametrize('dropna', [True, False]) + def test_pivot_table_dropna_categoricals(self, dropna): # GH 15193 categories = ['a', 'b', 'c', 'd'] @@ -118,30 +119,23 @@ def test_pivot_table_dropna_categoricals(self): 'C': range(0, 9)}) df['A'] = df['A'].astype(CDT(categories, ordered=False)) - result_true = df.pivot_table(index='B', columns='A', values='C', - dropna=True) + result = df.pivot_table(index='B', columns='A', values='C', + dropna=dropna) expected_columns = Series(['a', 'b', 'c'], name='A') expected_columns = expected_columns.astype( CDT(categories, ordered=False)) expected_index = Series([1, 2, 3], name='B') - expected_true = DataFrame([[0.0, 3.0, 6.0], - [1.0, 4.0, 7.0], - [2.0, 5.0, 8.0]], - index=expected_index, - columns=expected_columns,) - tm.assert_frame_equal(expected_true, result_true) - - result_false = df.pivot_table(index='B', columns='A', values='C', - dropna=False) - expected_columns = ( - Series(['a', 'b', 'c', 'd'], name='A').astype('category') - ) - expected_false = DataFrame([[0.0, 3.0, 6.0, np.NaN], - [1.0, 4.0, 7.0, np.NaN], - [2.0, 5.0, 8.0, np.NaN]], - index=expected_index, - columns=expected_columns,) - tm.assert_frame_equal(expected_false, result_false) + expected = DataFrame([[0, 3, 6], + [1, 4, 7], + [2, 5, 8]], + index=expected_index, + columns=expected_columns,) + if not dropna: + # add back the non observed to compare + expected = expected.reindex( + columns=Categorical(categories)).astype('float') + + tm.assert_frame_equal(result, expected) def test_pass_array(self): result = self.data.pivot_table( @@ -1068,7 +1062,7 @@ def test_pivot_table_margins_name_with_aggfunc_list(self): @pytest.mark.xfail(reason='GH 17035 (np.mean of ints is casted back to ' 'ints)') - def test_categorical_margins(self): + def test_categorical_margins(self, observed): # GH 10989 df = pd.DataFrame({'x': np.arange(8), 'y': np.arange(8) // 4, @@ -1078,12 +1072,12 @@ def test_categorical_margins(self): expected.index = Index([0, 1, 'All'], name='y') expected.columns = Index([0, 1, 'All'], name='z') - table = df.pivot_table('x', 'y', 'z', margins=True) + table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True) tm.assert_frame_equal(table, expected) @pytest.mark.xfail(reason='GH 17035 (np.mean of ints is casted back to ' 'ints)') - def test_categorical_margins_category(self): + def test_categorical_margins_category(self, observed): df = pd.DataFrame({'x': np.arange(8), 'y': np.arange(8) // 4, 'z': np.arange(8) % 2}) @@ -1094,16 +1088,17 @@ def test_categorical_margins_category(self): df.y = df.y.astype('category') df.z = df.z.astype('category') - table = df.pivot_table('x', 'y', 'z', margins=True) + table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True) tm.assert_frame_equal(table, expected) - def test_categorical_aggfunc(self): + def test_categorical_aggfunc(self, observed): # GH 9534 df = pd.DataFrame({"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]}) df["C1"] = df["C1"].astype("category") - result = df.pivot_table("V", index="C1", columns="C2", aggfunc="count") + result = df.pivot_table("V", index="C1", columns="C2", + dropna=observed, aggfunc="count") expected_index = pd.CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], @@ -1118,7 +1113,7 @@ def test_categorical_aggfunc(self): columns=expected_columns) tm.assert_frame_equal(result, expected) - def test_categorical_pivot_index_ordering(self): + def test_categorical_pivot_index_ordering(self, observed): # GH 8731 df = pd.DataFrame({'Sales': [100, 120, 220], 'Month': ['January', 'January', 'January'], @@ -1130,18 +1125,19 @@ def test_categorical_pivot_index_ordering(self): result = df.pivot_table(values='Sales', index='Month', columns='Year', + dropna=observed, aggfunc='sum') expected_columns = pd.Int64Index([2013, 2014], name='Year') - expected_index = pd.CategoricalIndex(months, + expected_index = pd.CategoricalIndex(['January'], categories=months, ordered=False, name='Month') - expected_data = np.empty((12, 2)) - expected_data.fill(np.nan) - expected_data[0, :] = [320., 120.] - expected = pd.DataFrame(expected_data, + expected = pd.DataFrame([[320, 120]], index=expected_index, columns=expected_columns) + if not observed: + result = result.dropna().astype(np.int64) + tm.assert_frame_equal(result, expected) def test_pivot_table_not_series(self):
closes #14942 closes #15217 closes #17594 closes #8869 xref #8138
https://api.github.com/repos/pandas-dev/pandas/pulls/20583
2018-04-02T14:06:17Z
2018-05-01T15:09:11Z
2018-05-01T15:09:10Z
2019-01-30T17:44:14Z
TST: add tests for take() on empty arrays
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index c281bd80cb274..d49a0d799526a 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -458,11 +458,23 @@ def take(self, indexer, allow_fill=True, fill_value=None): Fill value to replace -1 values with. If applicable, this should use the sentinel missing value for this type. + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indexer is out of bounds for the array. + Notes ----- This should follow pandas' semantics where -1 indicates missing values. Positions where indexer is ``-1`` should be filled with the missing value for this type. + This gives rise to the special case of a take on an empty + ExtensionArray that does not raises an IndexError straight away + when the `indexer` is all ``-1``. This is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when the indexer is a sequence of values. @@ -477,6 +489,12 @@ def take(self, indexer, allow_fill=True, fill_value=None): def take(self, indexer, allow_fill=True, fill_value=None): indexer = np.asarray(indexer) mask = indexer == -1 + + # take on empty array not handled as desired by numpy + # in case of -1 (all missing take) + if not len(self) and mask.all(): + return type(self)([np.nan] * len(indexer)) + result = self.data.take(indexer) result[mask] = np.nan # NA for this type return type(self)(result) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index 566ba1721d13c..4e2a65eba06dc 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -1,6 +1,8 @@ +import pytest import numpy as np import pandas as pd +import pandas.util.testing as tm from .base import BaseExtensionTests @@ -120,3 +122,48 @@ def test_take_sequence(self, data): assert result.iloc[0] == data[0] assert result.iloc[1] == data[1] assert result.iloc[2] == data[3] + + def test_take(self, data, na_value, na_cmp): + result = data.take([0, -1]) + assert result.dtype == data.dtype + assert result[0] == data[0] + na_cmp(result[1], na_value) + + with tm.assert_raises_regex(IndexError, "out of bounds"): + data.take([len(data) + 1]) + + def test_take_empty(self, data, na_value, na_cmp): + empty = data[:0] + result = empty.take([-1]) + na_cmp(result[0], na_value) + + with tm.assert_raises_regex(IndexError, "cannot do a non-empty take"): + empty.take([0, 1]) + + @pytest.mark.xfail(reason="Series.take with extension array buggy for -1") + def test_take_series(self, data): + s = pd.Series(data) + result = s.take([0, -1]) + expected = pd.Series( + data._constructor_from_sequence([data[0], data[len(data) - 1]]), + index=[0, len(data) - 1]) + self.assert_series_equal(result, expected) + + def test_reindex(self, data, na_value): + s = pd.Series(data) + result = s.reindex([0, 1, 3]) + expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3]) + self.assert_series_equal(result, expected) + + n = len(data) + result = s.reindex([-1, 0, n]) + expected = pd.Series( + data._constructor_from_sequence([na_value, data[0], na_value]), + index=[-1, 0, n]) + self.assert_series_equal(result, expected) + + result = s.reindex([n, n + 1]) + expected = pd.Series( + data._constructor_from_sequence([na_value, na_value]), + index=[n, n + 1]) + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py index 6abf1f7f9a65a..27c156c15203f 100644 --- a/pandas/tests/extension/category/test_categorical.py +++ b/pandas/tests/extension/category/test_categorical.py @@ -84,6 +84,19 @@ def test_getitem_scalar(self): # to break things by changing. pass + @pytest.mark.xfail(reason="Categorical.take buggy") + def test_take(self): + # TODO remove this once Categorical.take is fixed + pass + + @pytest.mark.xfail(reason="Categorical.take buggy") + def test_take_empty(self): + pass + + @pytest.mark.xfail(reason="test not written correctly for categorical") + def test_reindex(self): + pass + class TestSetitem(base.BaseSetitemTests): pass diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index f93d11f579f11..a8e88365b5648 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -81,6 +81,10 @@ def take(self, indexer, allow_fill=True, fill_value=None): indexer = np.asarray(indexer) mask = indexer == -1 + # take on empty array not handled as desired by numpy in case of -1 + if not len(self) and mask.all(): + return type(self)([self._na_value] * len(indexer)) + indexer = _ensure_platform_int(indexer) out = self.values.take(indexer) out[mask] = self._na_value diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index d9ae49d87804a..33843492cb706 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -89,8 +89,12 @@ def isna(self): return np.array([x == self._na_value for x in self.data]) def take(self, indexer, allow_fill=True, fill_value=None): - output = [self.data[loc] if loc != -1 else self._na_value - for loc in indexer] + try: + output = [self.data[loc] if loc != -1 else self._na_value + for loc in indexer] + except IndexError: + raise IndexError("Index is out of bounds or cannot do a " + "non-empty take from an empty array.") return self._constructor_from_sequence(output) def copy(self, deep=False):
Another noticed during geopandas testing: `ExtensionArray.take` needs to be able to handle the case where it is empty. Added a example implementation for Decimal/JSONArray. And apparently, this was also failing for Categorical, so fixed that as well.
https://api.github.com/repos/pandas-dev/pandas/pulls/20582
2018-04-02T12:48:48Z
2018-04-17T07:53:32Z
2018-04-17T07:53:32Z
2018-04-17T07:53:36Z
BUG: Series[EA].astype(str) works
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index b0a6086c450ef..e8fab3748bacf 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -655,7 +655,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, # astype formatting else: - values = self.values + values = self.get_values() else: values = self.get_values(dtype=dtype) diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py index 74fe8f196a089..7146443bf8de5 100644 --- a/pandas/tests/extension/base/casting.py +++ b/pandas/tests/extension/base/casting.py @@ -16,3 +16,8 @@ def test_tolist(self, data): result = pd.Series(data).tolist() expected = list(data) assert result == expected + + def test_astype_str(self, data): + result = pd.Series(data[:5]).astype(str) + expected = pd.Series(data[:5].astype(str)) + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 5e9639c487c37..87668cc1196b6 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -127,7 +127,12 @@ def test_sort_values_missing(self, data_missing_for_sorting, ascending): class TestCasting(base.BaseCastingTests): - pass + @pytest.mark.xfail + def test_astype_str(self): + """This currently fails in NumPy on np.array(self, dtype=str) with + + *** ValueError: setting an array element with a sequence + """ class TestGroupby(base.BaseGroupbyTests):
Closes https://github.com/pandas-dev/pandas/issues/20578 cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/20581
2018-04-02T11:37:09Z
2018-04-03T06:32:08Z
2018-04-03T06:32:07Z
2018-04-03T06:32:14Z
BUG: Fixed Series.align(frame) with ExtensionArray
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index d19f19b7224a7..75434fcc2b40d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -12,6 +12,7 @@ is_complex, is_datetimetz, is_categorical_dtype, is_datetimelike, is_extension_type, + is_extension_array_dtype, is_object_dtype, is_datetime64tz_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, @@ -329,7 +330,7 @@ def maybe_promote(dtype, fill_value=np.nan): dtype = np.object_ # in case we have a string that looked like a number - if is_categorical_dtype(dtype): + if is_extension_array_dtype(dtype): pass elif is_datetimetz(dtype): pass diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 6f4d5b40515be..efc22c19a3eef 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -1,4 +1,5 @@ import pytest +import numpy as np import pandas as pd from pandas.core.internals import ExtensionBlock @@ -64,6 +65,19 @@ def test_align_frame(self, data, na_value): self.assert_frame_equal(r1, e1) self.assert_frame_equal(r2, e2) + def test_align_series_frame(self, data, na_value): + # https://github.com/pandas-dev/pandas/issues/20576 + ser = pd.Series(data, name='a') + df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) + r1, r2 = ser.align(df) + + e1 = pd.Series( + data._constructor_from_sequence(list(data) + [na_value]), + name=ser.name) + + self.assert_series_equal(r1, e1) + self.assert_frame_equal(r2, df) + def test_set_frame_expand_regular_with_extension(self, data): df = pd.DataFrame({"A": [1] * len(data)}) df['B'] = data
Closes https://github.com/pandas-dev/pandas/issues/20576 cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/20580
2018-04-02T11:11:59Z
2018-04-03T06:33:35Z
2018-04-03T06:33:35Z
2018-04-03T06:33:35Z
TST: remove skip for values/index length mismatch in ExtensionArray tests
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index 4ac04d71338fd..5ac3a84517fe9 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -41,10 +41,7 @@ def test_dataframe_from_series(self, data): assert result.shape == (len(data), 1) assert isinstance(result._data.blocks[0], ExtensionBlock) - @pytest.mark.xfail(reason="GH-19342") def test_series_given_mismatched_index_raises(self, data): - msg = 'Wrong number of items passed 3, placement implies 4' - with tm.assert_raises_regex(ValueError, None) as m: + msg = 'Length of passed values is 3, index implies 5' + with tm.assert_raises_regex(ValueError, msg): pd.Series(data[:3], index=[0, 1, 2, 3, 4]) - - assert m.match(msg)
https://github.com/pandas-dev/pandas/issues/19342 is fixed in the meantime
https://api.github.com/repos/pandas-dev/pandas/pulls/20577
2018-04-02T09:54:01Z
2018-04-02T13:35:29Z
2018-04-02T13:35:29Z
2018-04-03T21:02:15Z
Adding test_map_missing_mixed to test_apply.py in pandas test suite series
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 0780c846a6c19..b28b9f342695f 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -576,3 +576,14 @@ def f(x): result = s.map(f) exp = pd.Series(['Asia/Tokyo'] * 25, name='XX') tm.assert_series_equal(result, exp) + + @pytest.mark.parametrize("vals,mapping,exp", [ + (list('abc'), {np.nan: 'not NaN'}, [np.nan] * 3 + ['not NaN']), + (list('abc'), {'a': 'a letter'}, ['a letter'] + [np.nan] * 3), + (list(range(3)), {0: 42}, [42] + [np.nan] * 3)]) + def test_map_missing_mixed(self, vals, mapping, exp): + # GH20495 + s = pd.Series(vals + [np.nan]) + result = s.map(mapping) + + tm.assert_series_equal(result, pd.Series(exp))
Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20495 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (n/a)
https://api.github.com/repos/pandas-dev/pandas/pulls/20574
2018-04-02T04:22:32Z
2018-04-03T19:22:12Z
2018-04-03T19:22:11Z
2018-04-03T19:22:17Z
Feat/scatter by size
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 6c3d07124215b..3108aa74ef913 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -19,7 +19,9 @@ is_integer, is_number, is_hashable, - is_iterator) + is_iterator, + is_numeric_dtype, + is_categorical_dtype) from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.core.generic import _shared_docs, _shared_doc_kwargs @@ -829,11 +831,30 @@ def _post_plot_logic(self, ax, data): class ScatterPlot(PlanePlot): _kind = 'scatter' - def __init__(self, data, x, y, s=None, c=None, **kwargs): + def __init__(self, data, x, y, s=None, c=None, size_factor=1, **kwargs): if s is None: - # hide the matplotlib default for size, in case we want to change - # the handling of this argument later + # Set default size if no argument is given. s = 20 + elif is_hashable(s) and s in data.columns: + # Handle the case where s is a label of a column of the df. + # The data is normalized to 200 * size_factor. + size_data = data[s] + if is_categorical_dtype(size_data): + if size_data.cat.ordered: + size_data = size_data.cat.codes + 1 + else: + raise TypeError( + "'s' must be numeric or ordered categorical dtype") + if is_numeric_dtype(size_data): + self.size_title = s + self.s_data_max = size_data.max() + self.size_factor = size_factor + self.bubble_points = 200 + s = self.bubble_points * size_factor * size_data / \ + self.s_data_max + else: + raise TypeError("'s' must be numeric or " + "ordered categorical dtype") super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs) if is_integer(c) and not self.data.columns.holds_integer(): c = self.data.columns[c] @@ -842,7 +863,6 @@ def __init__(self, data, x, y, s=None, c=None, **kwargs): def _make_plot(self): x, y, c, data = self.x, self.y, self.c, self.data ax = self.axes[0] - c_is_column = is_hashable(c) and c in self.data.columns # plot a colorbar only if a colormap is provided or necessary @@ -889,6 +909,66 @@ def _make_plot(self): ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds) + def _sci_notation(self, num): + """ + Returns mantissa and exponent of the number passed in argument. + Example: + >>> _sci_notation(89278.8924) + (8.9, 4.0) + """ + scientific_notation = '{:e}'.format(num) + regexp = re.compile(r'^([+-]?\d\.\d).*e([+-]\d*)$') + mantis, expnt = regexp.search(scientific_notation).groups() + return float(mantis), float(expnt) + + def _legend_bubbles(self, s_data_max, size_factor, bubble_points): + """ + Computes and returns appropriate bubble sizes and labels for the + legend of a bubble plot. Creates 4 bubbles with round values for the + labels, the largest of which is close to the maximum of the data. + """ + coef, expnt = self._sci_notation(s_data_max) + labels_catalog = { + (9, 10): [10, 5, 2.5, 1], + (7, 9): [8, 4, 2, 0.5], + (5.5, 7): [6, 3, 1.5, 0.5], + (4.5, 5.5): [5, 2, 1, 0.2], + (3.5, 4.5): [4, 2, 1, 0.2], + (2.5, 3.5): [3, 1, 0.5, 0.2], + (1.5, 2.5): [2, 1, 0.5, 0.2], + (0, 1.5): [1, 0.5, 0.25, 0.1] + } + for lower_bound, upper_bound in labels_catalog: + if (coef >= lower_bound) and (coef < upper_bound): + labels = 10**expnt * np.array(labels_catalog[lower_bound, + upper_bound]) + sizes = list(bubble_points * size_factor * labels / s_data_max) + labels = ['{:g}'.format(l) for l in labels] + return (sizes, labels) + + def _make_legend(self): + if hasattr(self, "size_title"): + ax = self.axes[0] + import matplotlib.legend as legend + from matplotlib.collections import CircleCollection + sizes, labels = self._legend_bubbles(self.s_data_max, + self.size_factor, + self.bubble_points) + color = self.plt.rcParams['axes.facecolor'], + edgecolor = self.plt.rcParams['axes.edgecolor'] + bubbles = [] + for size in sizes: + bubbles.append(CircleCollection(sizes=[size], + color=color, + edgecolor=edgecolor)) + bubble_legend = legend.Legend(ax, + handles=bubbles, + labels=labels, + loc='lower right') + bubble_legend.set_title(self.size_title) + ax.add_artist(bubble_legend) + super()._make_legend() + class HexBinPlot(PlanePlot): _kind = 'hexbin' @@ -3254,7 +3334,7 @@ def pie(self, y=None, **kwds): """ return self(kind='pie', y=y, **kwds) - def scatter(self, x, y, s=None, c=None, **kwds): + def scatter(self, x, y, s=None, c=None, size_factor=1, **kwds): """ Create a scatter plot with varying marker point size and color. @@ -3273,9 +3353,12 @@ def scatter(self, x, y, s=None, c=None, **kwds): y : int or str The column name or column position to be used as vertical coordinates for each point. - s : scalar or array_like, optional + s : int, str, scalar or array_like, optional The size of each point. Possible values are: + - The column name or column position to be used as bubble size for + each point. + - A single scalar so all points have the same size. - A sequence of scalars, which will be used for each point's size @@ -3296,6 +3379,9 @@ def scatter(self, x, y, s=None, c=None, **kwds): - A column name or position whose values will be used to color the marker points according to a colormap. + size_factor: scalar, optional + A multiplication factor to change the size of bubbles + **kwds Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. @@ -3333,7 +3419,7 @@ def scatter(self, x, y, s=None, c=None, **kwds): ... c='species', ... colormap='viridis') """ - return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds) + return self(kind='scatter', x=x, y=y, c=c, s=s, size_factor=size_factor, **kwds) def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwds): diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index ac02f5f4e4283..9069d6071ef79 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1118,6 +1118,40 @@ def test_scatter_colors(self): tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0], np.array([1, 1, 1, 1], dtype=np.float64)) + @pytest.mark.slow + def test_plot_scatter_with_s(self): + data = np.array([[3.1, 4.2, 1.9], + [1.9, 2.8, 3.1], + [5.4, 4.32, 2.0], + [0.4, 3.4, 0.46], + [4.4, 4.9, 0.8], + [2.7, 6.2, 1.49]]) + df = DataFrame(data, + columns=['x', 'y', 'z']) + ax = df.plot.scatter(x='x', y='y', s='z', size_factor=4) + bubbles = ax.collections[0] + bubble_sizes = bubbles.get_sizes() + max_data = df['z'].max() + expected_sizes = 200 * 4 * df['z'].values / max_data + tm.assert_numpy_array_equal(bubble_sizes, expected_sizes) + + @pytest.mark.slow + def test_plot_scatter_with_categorical_s(self): + data = np.array([[3.1, 4.2], + [1.9, 2.8], + [5.4, 4.32], + [0.4, 3.4], + [4.4, 4.9], + [2.7, 6.2]]) + df = DataFrame(data, columns=['x', 'y']) + df['z'] = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True) + ax = df.plot.scatter(x='x', y='y', s='z', size_factor=4) + bubbles = ax.collections[0] + bubble_sizes = bubbles.get_sizes() + max_data = df['z'].cat.codes.max() + 1 + expected_sizes = 200 * 4 * (df['z'].cat.codes.values + 1) / max_data + tm.assert_numpy_array_equal(bubble_sizes, expected_sizes) + @pytest.mark.slow def test_plot_bar(self): df = DataFrame(randn(6, 4),
- [x] closes part of #16827 : makes bubble plots easy with df.plot.scatter(x='col1', y='col2', s='col3') with nice automatic bubble sizing and bubble size legend - [x] 2 tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/20572
2018-04-01T16:58:22Z
2018-08-18T23:14:15Z
null
2018-08-18T23:14:15Z
BUG: .unique() on MultiIndex: preserve names
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ce63cb2473bc4..1f477c4f18811 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1069,6 +1069,8 @@ MultiIndex - Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`) - Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`) +- Bug in :func:`MultiIndex.unique` when called on empty :class:`MultiIndex` (:issue:`20568`) +- Bug in :func:`MultiIndex.unique` which would not preserve level names (:issue:`20570`) - Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`) - Bug in :func:`MultiIndex.from_tuples` which would fail to take zipped tuples in python3 (:issue:`18434`) - Bug in :func:`MultiIndex.get_loc` which would fail to automatically cast values between float and int (:issue:`18818`, :issue:`15994`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 8226c4bcac494..d4b9545999bc7 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -553,11 +553,10 @@ def __contains__(self, key): @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: - if 'name' in kwargs: - kwargs['names'] = kwargs.pop('name', None) + names = kwargs.pop('names', kwargs.pop('name', self.names)) # discards freq kwargs.pop('freq', None) - return MultiIndex.from_tuples(values, **kwargs) + return MultiIndex.from_tuples(values, names=names, **kwargs) return self.view() @cache_readonly diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 34abf7052da8c..984f37042d600 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2452,23 +2452,33 @@ def test_get_unique_index(self): assert result.unique tm.assert_index_equal(result, expected) - def test_unique(self): - mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]]) + @pytest.mark.parametrize('names', [None, ['first', 'second']]) + def test_unique(self, names): + mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], + names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]]) + exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')]) + mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], + names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')]) + exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], + names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')]) + mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], + names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([['a'], ['a']]) + exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) + # GH #20568 - empty MI + mi = pd.MultiIndex.from_arrays([[], []], names=names) + res = mi.unique() + tm.assert_index_equal(mi, res) + @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) def test_unique_level(self, level): # GH #17896 - with level= argument @@ -2483,6 +2493,11 @@ def test_unique_level(self, level): expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) + # With empty MI + mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) + result = mi.unique(level=level) + expected = mi.get_level_values(level) + def test_unique_datetimelike(self): idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', '2015-01-01', 'NaT', 'NaT'])
- [x] closes #20308 - [x] closes #20570 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20571
2018-03-31T22:25:04Z
2018-04-01T15:49:57Z
2018-04-01T15:49:56Z
2018-04-01T15:50:27Z
BUG: Fix first_last_valid_index, now preserves the frequency.
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ce63cb2473bc4..1f5948649c5e2 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1061,7 +1061,7 @@ Indexing - Bug in :meth:`DataFrame.drop_duplicates` where no ``KeyError`` is raised when passing in columns that don't exist on the ``DataFrame`` (issue:`19726`) - Bug in ``Index`` subclasses constructors that ignore unexpected keyword arguments (:issue:`19348`) - Bug in :meth:`Index.difference` when taking difference of an ``Index`` with itself (:issue:`20040`) - +- Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`). MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9626079660771..35f3a7c20e270 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5015,31 +5015,6 @@ def update(self, other, join='left', overwrite=True, filter_func=None, self[col] = expressions.where(mask, this, that) - # ---------------------------------------------------------------------- - # Misc methods - - def _get_valid_indices(self): - is_valid = self.count(1) > 0 - return self.index[is_valid] - - @Appender(_shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'DataFrame'}) - def first_valid_index(self): - if len(self) == 0: - return None - - valid_indices = self._get_valid_indices() - return valid_indices[0] if len(valid_indices) else None - - @Appender(_shared_docs['valid_index'] % { - 'position': 'last', 'klass': 'DataFrame'}) - def last_valid_index(self): - if len(self) == 0: - return None - - valid_indices = self._get_valid_indices() - return valid_indices[-1] if len(valid_indices) else None - # ---------------------------------------------------------------------- # Data reshaping diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d5cd22732f0a9..1931875799c73 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8763,6 +8763,51 @@ def transform(self, func, *args, **kwargs): scalar : type of index """ + def _find_valid_index(self, how): + """Retrieves the index of the first valid value. + + Parameters + ---------- + how : {'first', 'last'} + Use this parameter to change between the first or last valid index. + + Returns + ------- + idx_first_valid : type of index + """ + assert how in ['first', 'last'] + + if len(self) == 0: # early stop + return None + is_valid = ~self.isna() + + if self.ndim == 2: + is_valid = is_valid.any(1) # reduce axis 1 + + if how == 'first': + # First valid value case + i = is_valid.idxmax() + if not is_valid[i]: + return None + return i + + elif how == 'last': + # Last valid value case + i = is_valid.values[::-1].argmax() + if not is_valid.iat[len(self) - i - 1]: + return None + return self.index[len(self) - i - 1] + + @Appender(_shared_docs['valid_index'] % {'position': 'first', + 'klass': 'NDFrame'}) + def first_valid_index(self): + return self._find_valid_index('first') + + @Appender(_shared_docs['valid_index'] % {'position': 'last', + 'klass': 'NDFrame'}) + def last_valid_index(self): + return self._find_valid_index('last') + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index f3630dc43fbd1..808ac5e721fc8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3887,32 +3887,6 @@ def valid(self, inplace=False, **kwargs): "Use .dropna instead.", FutureWarning, stacklevel=2) return self.dropna(inplace=inplace, **kwargs) - @Appender(generic._shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'Series'}) - def first_valid_index(self): - if len(self) == 0: - return None - - mask = isna(self._values) - i = mask.argmin() - if mask[i]: - return None - else: - return self.index[i] - - @Appender(generic._shared_docs['valid_index'] % { - 'position': 'last', 'klass': 'Series'}) - def last_valid_index(self): - if len(self) == 0: - return None - - mask = isna(self._values[::-1]) - i = mask.argmin() - if mask[i]: - return None - else: - return self.index[len(self) - i - 1] - # ---------------------------------------------------------------------- # Time series-oriented methods diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index ceb6c942c81b1..277c3c9bc5c23 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -530,6 +530,15 @@ def test_first_last_valid(self): assert frame.last_valid_index() is None assert frame.first_valid_index() is None + # GH20499: its preserves freq with holes + frame.index = date_range("20110101", periods=N, freq="B") + frame.iloc[1] = 1 + frame.iloc[-2] = 1 + assert frame.first_valid_index() == frame.index[1] + assert frame.last_valid_index() == frame.index[-2] + assert frame.first_valid_index().freq == frame.index.freq + assert frame.last_valid_index().freq == frame.index.freq + def test_at_time_frame(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index baf2619c7b022..8e537b137baaf 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -432,6 +432,15 @@ def test_first_last_valid(self): assert empty.last_valid_index() is None assert empty.first_valid_index() is None + # GH20499: its preserves freq with holes + ts.index = date_range("20110101", periods=len(ts), freq="B") + ts.iloc[1] = 1 + ts.iloc[-2] = 1 + assert ts.first_valid_index() == ts.index[1] + assert ts.last_valid_index() == ts.index[-2] + assert ts.first_valid_index().freq == ts.index.freq + assert ts.last_valid_index().freq == ts.index.freq + def test_mpl_compat_hack(self): result = self.ts[:, np.newaxis] expected = self.ts.values[:, np.newaxis]
Checklist - [X] closes #20499 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I'm not sure if I should add a test somewhere else. Notice that I've added a new function [`_find_first_valid`](https://github.com/mmngreco/pandas/blob/ff8d3964cba04ed08f9f3b0dee3ee5c7514b5ed7/pandas/core/generic.py#L8766-L8785).
https://api.github.com/repos/pandas-dev/pandas/pulls/20569
2018-03-31T16:35:04Z
2018-04-01T13:40:21Z
2018-04-01T13:40:20Z
2018-04-18T07:20:58Z
TST: xfail matmul under numpy < 1.12
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index f9f079cb21858..430c571aab0a4 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -90,7 +90,7 @@ def test_factorize(self, data_for_grouping, na_sentinel): na_sentinel=na_sentinel) expected_labels = np.array([0, 0, na_sentinel, na_sentinel, 1, 1, 0, 2], - dtype='int64') + dtype=np.intp) expected_uniques = data_for_grouping.take([0, 4, 7]) tm.assert_numpy_array_equal(labels, expected_labels) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 7949636fcafbb..2763fcc2183d2 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -17,7 +17,7 @@ from pandas.compat import lrange, product, PY35 from pandas import (compat, isna, notna, DataFrame, Series, MultiIndex, date_range, Timestamp, Categorical, - _np_version_under1p15) + _np_version_under1p12, _np_version_under1p15) import pandas as pd import pandas.core.nanops as nanops import pandas.core.algorithms as algorithms @@ -2146,6 +2146,9 @@ def test_dot(self): @pytest.mark.skipif(not PY35, reason='matmul supported for Python>=3.5') + @pytest.mark.xfail( + _np_version_under1p12, + reason="unpredictable return types under numpy < 1.12") def test_matmul(self): # matmul test is for GH #10259 a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
https://api.github.com/repos/pandas-dev/pandas/pulls/20567
2018-03-31T15:43:48Z
2018-03-31T16:37:32Z
2018-03-31T16:37:32Z
2018-03-31T16:37:32Z
ENH/DOC: update pandas-gbq signature and docstring
diff --git a/doc/source/conf.py b/doc/source/conf.py index 43c7c23c5e20d..965b537c15ce5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -350,6 +350,7 @@ intersphinx_mapping = { 'statsmodels': ('http://www.statsmodels.org/devel/', None), 'matplotlib': ('http://matplotlib.org/', None), + 'pandas-gbq': ('https://pandas-gbq.readthedocs.io/en/latest/', None), 'python': ('https://docs.python.org/3/', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ce63cb2473bc4..4f05a6f108add 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -404,7 +404,10 @@ Other Enhancements - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) - :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 - +- Updated ``to_gbq`` and ``read_gbq`` signature and documentation to reflect changes from + the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ + library. (:issue:`20564`) + .. _whatsnew_0230.api_breaking: Backwards incompatible API changes diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9626079660771..af6b64057e358 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1116,60 +1116,90 @@ def to_dict(self, orient='dict', into=dict): else: raise ValueError("orient '%s' not understood" % orient) - def to_gbq(self, destination_table, project_id, chunksize=10000, - verbose=True, reauth=False, if_exists='fail', private_key=None): - """Write a DataFrame to a Google BigQuery table. - - The main method a user calls to export pandas DataFrame contents to - Google BigQuery table. + def to_gbq(self, destination_table, project_id, chunksize=None, + verbose=None, reauth=False, if_exists='fail', private_key=None, + auth_local_webserver=False, table_schema=None): + """ + Write a DataFrame to a Google BigQuery table. - Google BigQuery API Client Library v2 for Python is used. - Documentation is available `here - <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__ + This function requires the `pandas-gbq package + <https://pandas-gbq.readthedocs.io>`__. Authentication to the Google BigQuery service is via OAuth 2.0. - - If "private_key" is not provided: + - If ``private_key`` is provided, the library loads the JSON service + account credentials and uses those to authenticate. - By default "application default credentials" are used. + - If no ``private_key`` is provided, the library tries `application + default credentials`_. - If default application credentials are not found or are restrictive, - user account credentials are used. In this case, you will be asked to - grant permissions for product name 'pandas GBQ'. + .. _application default credentials: + https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application - - If "private_key" is provided: - - Service account credentials will be used to authenticate. + - If application default credentials are not found or cannot be used + with BigQuery, the library authenticates with user account + credentials. In this case, you will be asked to grant permissions + for product name 'pandas GBQ'. Parameters ---------- - dataframe : DataFrame - DataFrame to be written - destination_table : string - Name of table to be written, in the form 'dataset.tablename' + destination_table : str + Name of table to be written, in the form 'dataset.tablename'. project_id : str Google BigQuery Account project ID. - chunksize : int (default 10000) + chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. - verbose : boolean (default True) - Show percentage complete - reauth : boolean (default False) + Set to ``None`` to load the whole dataframe at once. + reauth : bool, default False Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. - if_exists : {'fail', 'replace', 'append'}, default 'fail' - 'fail': If table exists, do nothing. - 'replace': If table exists, drop it, recreate it, and insert data. - 'append': If table exists, insert data. Create if does not exist. - private_key : str (optional) + if_exists : str, default 'fail' + Behavior when the destination table exists. Value can be one of: + + ``'fail'`` + If table exists, do nothing. + ``'replace'`` + If table exists, drop it, recreate it, and insert data. + ``'append'`` + If table exists, insert data. Create if does not exist. + private_key : str, optional Service account private key in JSON format. Can be file path or string contents. This is useful for remote server - authentication (eg. Jupyter/IPython notebook on remote host) - """ + authentication (eg. Jupyter/IPython notebook on remote host). + auth_local_webserver : bool, default False + Use the `local webserver flow`_ instead of the `console flow`_ + when getting user credentials. + + .. _local webserver flow: + http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server + .. _console flow: + http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console + + *New in version 0.2.0 of pandas-gbq*. + table_schema : list of dicts, optional + List of BigQuery table fields to which according DataFrame + columns conform to, e.g. ``[{'name': 'col1', 'type': + 'STRING'},...]``. If schema is not provided, it will be + generated according to dtypes of DataFrame columns. See + BigQuery API documentation on available names of a field. + + *New in version 0.3.1 of pandas-gbq*. + verbose : boolean, deprecated + *Deprecated in Pandas-GBQ 0.4.0.* Use the `logging module + to adjust verbosity instead + <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. + See Also + -------- + pandas_gbq.to_gbq : This function in the pandas-gbq library. + pandas.read_gbq : Read a DataFrame from Google BigQuery. + """ from pandas.io import gbq - return gbq.to_gbq(self, destination_table, project_id=project_id, - chunksize=chunksize, verbose=verbose, reauth=reauth, - if_exists=if_exists, private_key=private_key) + return gbq.to_gbq( + self, destination_table, project_id, chunksize=chunksize, + verbose=verbose, reauth=reauth, if_exists=if_exists, + private_key=private_key, auth_local_webserver=auth_local_webserver, + table_schema=table_schema) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index f9bc6ae1a5451..236d70609e76c 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -22,12 +22,10 @@ def _try_import(): def read_gbq(query, project_id=None, index_col=None, col_order=None, - reauth=False, verbose=True, private_key=None, dialect='legacy', + reauth=False, verbose=None, private_key=None, dialect='legacy', **kwargs): - r"""Load data from Google BigQuery. - - The main method a user calls to execute a Query in Google BigQuery - and read results into a pandas DataFrame. + """ + Load data from Google BigQuery. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. @@ -49,32 +47,39 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, Parameters ---------- query : str - SQL-Like Query to return data values + SQL-Like Query to return data values. project_id : str Google BigQuery Account project ID. - index_col : str (optional) - Name of result column to use for index in results DataFrame - col_order : list(str) (optional) + index_col : str, optional + Name of result column to use for index in results DataFrame. + col_order : list(str), optional List of BigQuery column names in the desired order for results - DataFrame - reauth : boolean (default False) + DataFrame. + reauth : boolean, default False Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. - verbose : boolean (default True) - Verbose output - private_key : str (optional) + private_key : str, optional Service account private key in JSON format. Can be file path or string contents. This is useful for remote server - authentication (eg. Jupyter/IPython notebook on remote host) - - dialect : {'legacy', 'standard'}, default 'legacy' - 'legacy' : Use BigQuery's legacy SQL dialect. - 'standard' : Use BigQuery's standard SQL, which is - compliant with the SQL 2011 standard. For more information - see `BigQuery SQL Reference - <https://cloud.google.com/bigquery/sql-reference/>`__ - - `**kwargs` : Arbitrary keyword arguments + authentication (eg. Jupyter/IPython notebook on remote host). + dialect : str, default 'legacy' + SQL syntax dialect to use. Value can be one of: + + ``'legacy'`` + Use BigQuery's legacy SQL dialect. For more information see + `BigQuery Legacy SQL Reference + <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. + ``'standard'`` + Use BigQuery's standard SQL, which is + compliant with the SQL 2011 standard. For more information + see `BigQuery Standard SQL Reference + <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. + verbose : boolean, deprecated + *Deprecated in Pandas-GBQ 0.4.0.* Use the `logging module + to adjust verbosity instead + <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. + kwargs : dict + Arbitrary keyword arguments. configuration (dict): query config parameters for job processing. For example: @@ -86,8 +91,12 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, Returns ------- df: DataFrame - DataFrame representing results of query + DataFrame representing results of query. + See Also + -------- + pandas_gbq.read_gbq : This function in the pandas-gbq library. + pandas.DataFrame.to_gbq : Write a DataFrame to Google BigQuery. """ pandas_gbq = _try_import() return pandas_gbq.read_gbq( @@ -99,10 +108,12 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, **kwargs) -def to_gbq(dataframe, destination_table, project_id, chunksize=10000, - verbose=True, reauth=False, if_exists='fail', private_key=None): +def to_gbq(dataframe, destination_table, project_id, chunksize=None, + verbose=None, reauth=False, if_exists='fail', private_key=None, + auth_local_webserver=False, table_schema=None): pandas_gbq = _try_import() - pandas_gbq.to_gbq(dataframe, destination_table, project_id, - chunksize=chunksize, - verbose=verbose, reauth=reauth, - if_exists=if_exists, private_key=private_key) + return pandas_gbq.to_gbq( + dataframe, destination_table, project_id, chunksize=chunksize, + verbose=verbose, reauth=reauth, if_exists=if_exists, + private_key=private_key, auth_local_webserver=auth_local_webserver, + table_schema=table_schema)
Delegates more of the behavior and documentation for `to_gbq` and `read_gbq` methods to the `pandas-gbq` library. This duplicate documentation was getting out of sync. Please include the output of the validation script below between the "```" ticks: ``` $ scripts/validate_docstrings.py pandas.DataFrame.to_gbq ################################################################################ ##################### Docstring (pandas.DataFrame.to_gbq) ##################### ################################################################################ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See: :meth:`pandas_gbq.to_gbq` ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameters {'kwargs', 'args'} not documented No returns section found See Also section not found No examples section found (3.6.2/envs/pandas-dev) # swast @ swast-macbookpro2 in ~/src/pandas/pandas on git:master o [16:56:32] C:5 $ scripts/validate_docstrings.py pandas.io.gbq.read_gbq ################################################################################ ###################### Docstring (pandas.io.gbq.read_gbq) ###################### ################################################################################ Load data from Google BigQuery. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See :meth:`pandas_gbq.read_gbq`. Returns ------- df: DataFrame DataFrame representing results of query ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameters {'kwargs', 'args'} not documented See Also section not found No examples section found (3.6.2/envs/pandas-dev) # swast @ swast-macbookpro2 in ~/src/pandas/pandas on git:master o [16:56:39] C:4 $ scripts/validate_docstrings.py pandas.DataFrame.to_gbq ################################################################################ ##################### Docstring (pandas.DataFrame.to_gbq) ##################### ################################################################################ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See: :meth:`pandas_gbq.to_gbq` ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameters {'args', 'kwargs'} not documented No returns section found See Also section not found No examples section found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. **Validation script gives errors** This PR removes documentation which is duplicated in the `pandas-gbq` docs and add intersphinx links to the relevant methods. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20564
2018-03-30T23:58:01Z
2018-04-09T10:02:24Z
2018-04-09T10:02:24Z
2019-12-11T20:30:19Z
CLN: Use new-style classes instead of old-style classes
diff --git a/ci/lint.sh b/ci/lint.sh index 545ac9c90c5c1..2cbf6f7ae52a9 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -165,6 +165,14 @@ if [ "$LINT" ]; then RET=1 fi echo "Check for deprecated messages without sphinx directive DONE" + + echo "Check for old-style classes" + grep -R --include="*.py" -E "class\s\S*[^)]:" pandas scripts + + if [ $? = "0" ]; then + RET=1 + fi + echo "Check for old-style classes DONE" else echo "NOT Linting" diff --git a/pandas/_version.py b/pandas/_version.py index 624c7b5cd63a1..919db956b8f99 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -25,7 +25,7 @@ def get_keywords(): return keywords -class VersioneerConfig: +class VersioneerConfig(object): pass diff --git a/pandas/io/common.py b/pandas/io/common.py index 4769edd157b94..0827216975f15 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -534,7 +534,7 @@ def __next__(self): row = next(self.reader) return [compat.text_type(s, "utf-8") for s in row] - class UnicodeWriter: + class UnicodeWriter(object): """ A CSV writer which will write rows to CSV file "f", diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 806cbddaa2ee2..4d187a8282859 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -321,7 +321,7 @@ def _get_subheader_index(self, signature, compression, ptype): (compression == 0)) f2 = (ptype == const.compressed_subheader_type) if (self.compression != "") and f1 and f2: - index = const.index.dataSubheaderIndex + index = const.SASIndex.data_subheader_index else: self.close() raise ValueError("Unknown subheader signature") @@ -360,23 +360,23 @@ def _process_subheader(self, subheader_index, pointer): offset = pointer.offset length = pointer.length - if subheader_index == const.index.rowSizeIndex: + if subheader_index == const.SASIndex.row_size_index: processor = self._process_rowsize_subheader - elif subheader_index == const.index.columnSizeIndex: + elif subheader_index == const.SASIndex.column_size_index: processor = self._process_columnsize_subheader - elif subheader_index == const.index.columnTextIndex: + elif subheader_index == const.SASIndex.column_text_index: processor = self._process_columntext_subheader - elif subheader_index == const.index.columnNameIndex: + elif subheader_index == const.SASIndex.column_name_index: processor = self._process_columnname_subheader - elif subheader_index == const.index.columnAttributesIndex: + elif subheader_index == const.SASIndex.column_attributes_index: processor = self._process_columnattributes_subheader - elif subheader_index == const.index.formatAndLabelIndex: + elif subheader_index == const.SASIndex.format_and_label_index: processor = self._process_format_subheader - elif subheader_index == const.index.columnListIndex: + elif subheader_index == const.SASIndex.column_list_index: processor = self._process_columnlist_subheader - elif subheader_index == const.index.subheaderCountsIndex: + elif subheader_index == const.SASIndex.subheader_counts_index: processor = self._process_subheader_counts - elif subheader_index == const.index.dataSubheaderIndex: + elif subheader_index == const.SASIndex.data_subheader_index: self._current_page_data_subheader_pointers.append(pointer) return else: diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py index c4b3588164305..98502d32d39e8 100644 --- a/pandas/io/sas/sas_constants.py +++ b/pandas/io/sas/sas_constants.py @@ -102,49 +102,49 @@ 61: "wcyrillic", 62: "wlatin1", 90: "ebcdic870"} -class index: - rowSizeIndex = 0 - columnSizeIndex = 1 - subheaderCountsIndex = 2 - columnTextIndex = 3 - columnNameIndex = 4 - columnAttributesIndex = 5 - formatAndLabelIndex = 6 - columnListIndex = 7 - dataSubheaderIndex = 8 +class SASIndex(object): + row_size_index = 0 + column_size_index = 1 + subheader_counts_index = 2 + column_text_index = 3 + column_name_index = 4 + column_attributes_index = 5 + format_and_label_index = 6 + column_list_index = 7 + data_subheader_index = 8 subheader_signature_to_index = { - b"\xF7\xF7\xF7\xF7": index.rowSizeIndex, - b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": index.rowSizeIndex, - b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": index.rowSizeIndex, - b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": index.rowSizeIndex, - b"\xF6\xF6\xF6\xF6": index.columnSizeIndex, - b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": index.columnSizeIndex, - b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": index.columnSizeIndex, - b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": index.columnSizeIndex, - b"\x00\xFC\xFF\xFF": index.subheaderCountsIndex, - b"\xFF\xFF\xFC\x00": index.subheaderCountsIndex, - b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": index.subheaderCountsIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": index.subheaderCountsIndex, - b"\xFD\xFF\xFF\xFF": index.columnTextIndex, - b"\xFF\xFF\xFF\xFD": index.columnTextIndex, - b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnTextIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": index.columnTextIndex, - b"\xFF\xFF\xFF\xFF": index.columnNameIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnNameIndex, - b"\xFC\xFF\xFF\xFF": index.columnAttributesIndex, - b"\xFF\xFF\xFF\xFC": index.columnAttributesIndex, - b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnAttributesIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": index.columnAttributesIndex, - b"\xFE\xFB\xFF\xFF": index.formatAndLabelIndex, - b"\xFF\xFF\xFB\xFE": index.formatAndLabelIndex, - b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": index.formatAndLabelIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": index.formatAndLabelIndex, - b"\xFE\xFF\xFF\xFF": index.columnListIndex, - b"\xFF\xFF\xFF\xFE": index.columnListIndex, - b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnListIndex, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": index.columnListIndex} + b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index, + b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index, + b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index, + b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index} # List of frequently used SAS date and datetime formats diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 7949636fcafbb..0d3add4c4ca11 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1215,7 +1215,7 @@ def wrapper(x): getattr(mixed, name)(axis=0) getattr(mixed, name)(axis=1) - class NonzeroFail: + class NonzeroFail(object): def __nonzero__(self): raise ValueError diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py index c4f7bd28e4d90..81265c9f2941d 100644 --- a/pandas/tests/indexing/test_panel.py +++ b/pandas/tests/indexing/test_panel.py @@ -149,7 +149,7 @@ def test_panel_getitem(self): # with an object-like # GH 9140 - class TestObject: + class TestObject(object): def __str__(self): return "TestObject" diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ab9f61cffc16b..0aab30e299c86 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1621,7 +1621,7 @@ def test_pprint_pathological_object(self): If the test fails, it at least won't hang. """ - class A: + class A(object): def __getitem__(self, key): return 3 # obviously simplified diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index e949772981eb7..89acbfdc9a746 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -460,11 +460,11 @@ def test_decodeFromUnicode(self): def test_encodeRecursionMax(self): # 8 is the max recursion depth - class O2: + class O2(object): member = 0 pass - class O1: + class O1(object): member = 0 pass @@ -772,14 +772,14 @@ def test_dumpToFile(self): assert "[1,2,3]" == f.getvalue() def test_dumpToFileLikeObject(self): - class filelike: + class FileLike(object): def __init__(self): self.bytes = '' def write(self, bytes): self.bytes += bytes - f = filelike() + f = FileLike() ujson.dump([1, 2, 3], f) assert "[1,2,3]" == f.bytes @@ -800,7 +800,7 @@ def test_loadFile(self): np.array([1, 2, 3, 4]), ujson.load(f, numpy=True)) def test_loadFileLikeObject(self): - class filelike: + class FileLike(object): def read(self): try: @@ -808,10 +808,10 @@ def read(self): except AttributeError: self.end = True return "[1,2,3,4]" - f = filelike() + f = FileLike() assert [1, 2, 3, 4] == ujson.load(f) - f = filelike() + f = FileLike() tm.assert_numpy_array_equal( np.array([1, 2, 3, 4]), ujson.load(f, numpy=True)) @@ -837,7 +837,7 @@ def test_encodeNumericOverflow(self): def test_encodeNumericOverflowNested(self): for n in range(0, 100): - class Nested: + class Nested(object): x = 12839128391289382193812939 nested = Nested() @@ -886,7 +886,7 @@ def test_decodeBigEscape(self): def test_toDict(self): d = {u("key"): 31337} - class DictTest: + class DictTest(object): def toDict(self): return d diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index a80c5d6611b8a..ab2bf92a26826 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -37,7 +37,7 @@ def test_ops_error_str(self): assert left != right def test_ops_notimplemented(self): - class Other: + class Other(object): pass other = Other() diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 1062de3119efc..540933cb90be2 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -227,7 +227,7 @@ def test_constructor_from_dense_series(self): def test_constructor_from_unknown_type(self): # GH 19393 - class Unknown: + class Unknown(object): pass with pytest.raises(TypeError, message='SparseDataFrame called with unknown type ' diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index e2a142366a89e..7f9cddf9859a5 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -54,7 +54,7 @@ def test_error_rename(): pass -class Foo: +class Foo(object): @classmethod def classmethod(cls): raise AbstractMethodError(cls, methodtype='classmethod') diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 23cc18de34778..2264508fa9d91 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -1082,7 +1082,7 @@ def test_resample_how_callables(self): def fn(x, a=1): return str(type(x)) - class fn_class: + class FnClass(object): def __call__(self, x): return str(type(x)) @@ -1091,7 +1091,7 @@ def __call__(self, x): df_lambda = df.resample("M").apply(lambda x: str(type(x))) df_partial = df.resample("M").apply(partial(fn)) df_partial2 = df.resample("M").apply(partial(fn, a=2)) - df_class = df.resample("M").apply(fn_class()) + df_class = df.resample("M").apply(FnClass()) assert_frame_equal(df_standard, df_lambda) assert_frame_equal(df_standard, df_partial) diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index a7e121f9069fc..3863451757709 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -94,7 +94,7 @@ def _output_header(title, width=80, char='#'): full_line=full_line, title_line=title_line) -class Docstring: +class Docstring(object): def __init__(self, method_name, method_obj): self.method_name = method_name self.method_obj = method_obj diff --git a/versioneer.py b/versioneer.py index b0ae4fa2dc8e8..5dae3723ecf9c 100644 --- a/versioneer.py +++ b/versioneer.py @@ -352,7 +352,7 @@ import sys -class VersioneerConfig: +class VersioneerConfig(object): pass
Noticed some [lgtm.com alerts](https://lgtm.com/projects/g/pydata/pandas/snapshot/f85a69a8fdaed49747c4db61f5177a296cf290eb/files/scripts/validate_docstrings.py?sort=name&dir=ASC&mode=heatmap&showExcluded=false#L108) about the [`@property` decorator not working in old-style classes](https://lgtm.com/rules/10030086/) in `scripts/validate_docstrings.py`. I don't think this actually causes any undesired behavior in that script though. Figured this would be a good time to enforce new-style classes throughout the codebase: - Converted old-style classes to new-style classes - Basically just inserted `(object)`. - Added a check to `lint.sh` for old-style classes, since `flake8` doesn't look to be catching it. - Maybe there's a way to enforce this in `flake8` instead? - Didn't hit the Cython code; not exactly sure what the new-style vs. old-style conventions are there.
https://api.github.com/repos/pandas-dev/pandas/pulls/20563
2018-03-30T23:44:01Z
2018-04-03T07:00:44Z
2018-04-03T07:00:44Z
2018-09-24T17:24:43Z
[WIP] Complete offset prefix mapping
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 466c48b780861..58d3cb1091f97 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1155,6 +1155,8 @@ frequencies. We will refer to these aliases as *offset aliases*. "L, ms", "milliseconds" "U, us", "microseconds" "N", "nanoseconds" + "WOM, "x-th day of the y-th week of each month frequency" + "LWOM", "x-th day of the last week of each month frequency" Combining Aliases ~~~~~~~~~~~~~~~~~ diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 2e4be7fbdeebf..7758d48009436 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -2451,23 +2451,29 @@ def generate_range(start=None, end=None, periods=None, prefix_mapping = dict((offset._prefix, offset) for offset in [ - YearBegin, # 'AS' YearEnd, # 'A' - BYearBegin, # 'BAS' - BYearEnd, # 'BA' + YearBegin, # 'AS' BusinessDay, # 'B' + BYearEnd, # 'BA' + BYearBegin, # 'BAS' + BusinessHour, # 'BH' BusinessMonthBegin, # 'BMS' BusinessMonthEnd, # 'BM' BQuarterEnd, # 'BQ' BQuarterBegin, # 'BQS' - BusinessHour, # 'BH' CustomBusinessDay, # 'C' + CustomBusinessHour, # 'CBH' CustomBusinessMonthEnd, # 'CBM' CustomBusinessMonthBegin, # 'CBMS' - CustomBusinessHour, # 'CBH' + Day, # 'D' + Hour, # 'H' + Milli, # 'L' + LastWeekOfMonth, # 'LWOM' MonthEnd, # 'M' MonthBegin, # 'MS' Nano, # 'N' + FY5253, # 'RE' + FY5253Quarter, # 'REQ' SemiMonthEnd, # 'SM' SemiMonthBegin, # 'SMS' Week, # 'W' @@ -2476,10 +2482,5 @@ def generate_range(start=None, end=None, periods=None, Micro, # 'U' QuarterEnd, # 'Q' QuarterBegin, # 'QS' - Milli, # 'L' - Hour, # 'H' - Day, # 'D' WeekOfMonth, # 'WOM' - FY5253, - FY5253Quarter, ])
Checklist for PRs : - [x] closes #7985 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20562
2018-03-30T23:26:28Z
2018-11-04T16:13:15Z
null
2018-11-04T16:13:15Z
CI: move 2.7_SLOW -> 3.6_SLOW
diff --git a/.travis.yml b/.travis.yml index 22ef6c819c6d4..e4dab4eb53afb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,7 +56,7 @@ matrix: # In allow_failures - dist: trusty env: - - JOB="2.7_SLOW" SLOW=true + - JOB="3.6_SLOW" SLOW=true # In allow_failures - dist: trusty env: @@ -72,7 +72,7 @@ matrix: allow_failures: - dist: trusty env: - - JOB="2.7_SLOW" SLOW=true + - JOB="3.6_SLOW" SLOW=true - dist: trusty env: - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" diff --git a/ci/requirements-2.7_SLOW.build b/ci/requirements-3.6_SLOW.build similarity index 53% rename from ci/requirements-2.7_SLOW.build rename to ci/requirements-3.6_SLOW.build index a665ab9edd585..bdcfe28105866 100644 --- a/ci/requirements-2.7_SLOW.build +++ b/ci/requirements-3.6_SLOW.build @@ -1,5 +1,5 @@ -python=2.7* +python=3.6* python-dateutil pytz -numpy=1.10* +numpy cython diff --git a/ci/requirements-2.7_SLOW.pip b/ci/requirements-3.6_SLOW.pip similarity index 100% rename from ci/requirements-2.7_SLOW.pip rename to ci/requirements-3.6_SLOW.pip diff --git a/ci/requirements-2.7_SLOW.run b/ci/requirements-3.6_SLOW.run similarity index 83% rename from ci/requirements-2.7_SLOW.run rename to ci/requirements-3.6_SLOW.run index db95a6ccb2314..ab5253ad99e51 100644 --- a/ci/requirements-2.7_SLOW.run +++ b/ci/requirements-3.6_SLOW.run @@ -1,7 +1,7 @@ python-dateutil pytz -numpy=1.10* -matplotlib=1.4.3 +numpy +matplotlib scipy patsy xlwt
https://api.github.com/repos/pandas-dev/pandas/pulls/20559
2018-03-30T21:08:51Z
2018-03-31T15:27:26Z
2018-03-31T15:27:26Z
2018-03-31T15:28:00Z
BUG: usecols kwarg accepts string when it should only allow list-like or callable.
diff --git a/doc/source/io.rst b/doc/source/io.rst index ff505f525fc22..fd998d32cfbfb 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -130,11 +130,11 @@ index_col : int or sequence or ``False``, default ``None`` MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider ``index_col=False`` to force pandas to *not* use the first column as the index (row names). -usecols : array-like or callable, default ``None`` - Return a subset of the columns. If array-like, all elements must either +usecols : list-like or callable, default ``None`` + Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or - inferred from the document header row(s). For example, a valid array-like + inferred from the document header row(s). For example, a valid list-like `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e83f149db1f18..6524012d27fc9 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1077,6 +1077,7 @@ I/O - Bug in :meth:`pandas.io.stata.StataReader.value_labels` raising an ``AttributeError`` when called on very old files. Now returns an empty dict (:issue:`19417`) - Bug in :func:`read_pickle` when unpickling objects with :class:`TimedeltaIndex` or :class:`Float64Index` created with pandas prior to version 0.20 (:issue:`19939`) - Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`) +- Bug in ``usecols`` parameter in :func:`pandas.io.read_csv` and :func:`pandas.io.read_table` where error is not raised correctly when passing a string. (:issue:`20529`) Plotting ^^^^^^^^ diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 52ca3d1226f79..a24e2cdd99f6f 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -445,10 +445,9 @@ cdef class TextReader: # suboptimal if usecols is not None: self.has_usecols = 1 - if callable(usecols): - self.usecols = usecols - else: - self.usecols = set(usecols) + # GH-20558, validate usecols at higher level and only pass clean + # usecols into TextReader. + self.usecols = usecols # XXX if skipfooter > 0: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 469cd6d82e4b4..780aa5d02f598 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -97,11 +97,11 @@ MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider index_col=False to force pandas to _not_ use the first column as the index (row names) -usecols : array-like or callable, default None - Return a subset of the columns. If array-like, all elements must either +usecols : list-like or callable, default None + Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or - inferred from the document header row(s). For example, a valid array-like + inferred from the document header row(s). For example, a valid list-like `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To instantiate a DataFrame from ``data`` with element order preserved use @@ -1177,7 +1177,7 @@ def _validate_usecols_arg(usecols): Parameters ---------- - usecols : array-like, callable, or None + usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. @@ -1192,17 +1192,19 @@ def _validate_usecols_arg(usecols): 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in. """ - msg = ("'usecols' must either be all strings, all unicode, " - "all integers or a callable") - + msg = ("'usecols' must either be list-like of all strings, all unicode, " + "all integers or a callable.") if usecols is not None: if callable(usecols): return usecols, None - usecols_dtype = lib.infer_dtype(usecols) - if usecols_dtype not in ('empty', 'integer', - 'string', 'unicode'): + # GH20529, ensure is iterable container but not string. + elif not is_list_like(usecols): raise ValueError(msg) - + else: + usecols_dtype = lib.infer_dtype(usecols) + if usecols_dtype not in ('empty', 'integer', + 'string', 'unicode'): + raise ValueError(msg) return set(usecols), usecols_dtype return usecols, None @@ -1697,11 +1699,12 @@ def __init__(self, src, **kwds): # #2442 kwds['allow_leading_cols'] = self.index_col is not False - self._reader = parsers.TextReader(src, **kwds) - - # XXX + # GH20529, validate usecol arg before TextReader self.usecols, self.usecols_dtype = _validate_usecols_arg( - self._reader.usecols) + kwds['usecols']) + kwds['usecols'] = self.usecols + + self._reader = parsers.TextReader(src, **kwds) passed_names = self.names is None diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py index 195fb4cba2aed..584711528e9cb 100644 --- a/pandas/tests/io/parser/usecols.py +++ b/pandas/tests/io/parser/usecols.py @@ -16,6 +16,11 @@ class UsecolsTests(object): + msg_validate_usecols_arg = ("'usecols' must either be list-like of all " + "strings, all unicode, all integers or a " + "callable.") + msg_validate_usecols_names = ("Usecols do not match columns, columns " + "expected but not found: {0}") def test_raise_on_mixed_dtype_usecols(self): # See gh-12678 @@ -24,11 +29,9 @@ def test_raise_on_mixed_dtype_usecols(self): 4000,5000,6000 """ - msg = ("'usecols' must either be all strings, all unicode, " - "all integers or a callable") usecols = [0, 'b', 2] - with tm.assert_raises_regex(ValueError, msg): + with tm.assert_raises_regex(ValueError, self.msg_validate_usecols_arg): self.read_csv(StringIO(data), usecols=usecols) def test_usecols(self): @@ -85,6 +88,18 @@ def test_usecols(self): pytest.raises(ValueError, self.read_csv, StringIO(data), names=['a', 'b'], usecols=[1], header=None) + def test_usecols_single_string(self): + # GH 20558 + data = """foo, bar, baz + 1000, 2000, 3000 + 4000, 5000, 6000 + """ + + usecols = 'foo' + + with tm.assert_raises_regex(ValueError, self.msg_validate_usecols_arg): + self.read_csv(StringIO(data), usecols=usecols) + def test_usecols_index_col_False(self): # see gh-9082 s = "a,b,c,d\n1,2,3,4\n5,6,7,8" @@ -348,13 +363,10 @@ def test_usecols_with_mixed_encoding_strings(self): 3.568935038,7,False,a ''' - msg = ("'usecols' must either be all strings, all unicode, " - "all integers or a callable") - - with tm.assert_raises_regex(ValueError, msg): + with tm.assert_raises_regex(ValueError, self.msg_validate_usecols_arg): self.read_csv(StringIO(s), usecols=[u'AAA', b'BBB']) - with tm.assert_raises_regex(ValueError, msg): + with tm.assert_raises_regex(ValueError, self.msg_validate_usecols_arg): self.read_csv(StringIO(s), usecols=[b'AAA', u'BBB']) def test_usecols_with_multibyte_characters(self): @@ -480,11 +492,6 @@ def test_raise_on_usecols_names_mismatch(self): # GH 14671 data = 'a,b,c,d\n1,2,3,4\n5,6,7,8' - msg = ( - "Usecols do not match columns, " - "columns expected but not found: {missing}" - ) - usecols = ['a', 'b', 'c', 'd'] df = self.read_csv(StringIO(data), usecols=usecols) expected = DataFrame({'a': [1, 5], 'b': [2, 6], 'c': [3, 7], @@ -492,18 +499,21 @@ def test_raise_on_usecols_names_mismatch(self): tm.assert_frame_equal(df, expected) usecols = ['a', 'b', 'c', 'f'] - with tm.assert_raises_regex( - ValueError, msg.format(missing=r"\['f'\]")): + with tm.assert_raises_regex(ValueError, + self.msg_validate_usecols_names.format( + r"\['f'\]")): self.read_csv(StringIO(data), usecols=usecols) usecols = ['a', 'b', 'f'] - with tm.assert_raises_regex( - ValueError, msg.format(missing=r"\['f'\]")): + with tm.assert_raises_regex(ValueError, + self.msg_validate_usecols_names.format( + r"\['f'\]")): self.read_csv(StringIO(data), usecols=usecols) usecols = ['a', 'b', 'f', 'g'] - with tm.assert_raises_regex( - ValueError, msg.format(missing=r"\[('f', 'g'|'g', 'f')\]")): + with tm.assert_raises_regex(ValueError, + self.msg_validate_usecols_names.format( + r"\[('f', 'g'|'g', 'f')\]")): self.read_csv(StringIO(data), usecols=usecols) names = ['A', 'B', 'C', 'D'] @@ -527,11 +537,13 @@ def test_raise_on_usecols_names_mismatch(self): # tm.assert_frame_equal(df, expected) usecols = ['A', 'B', 'C', 'f'] - with tm.assert_raises_regex( - ValueError, msg.format(missing=r"\['f'\]")): + with tm.assert_raises_regex(ValueError, + self.msg_validate_usecols_names.format( + r"\['f'\]")): self.read_csv(StringIO(data), header=0, names=names, usecols=usecols) usecols = ['A', 'B', 'f'] - with tm.assert_raises_regex( - ValueError, msg.format(missing=r"\['f'\]")): + with tm.assert_raises_regex(ValueError, + self.msg_validate_usecols_names.format( + r"\['f'\]")): self.read_csv(StringIO(data), names=names, usecols=usecols)
- [x] closes #20529 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Since string is iterable, when passed into usecols in read_csv or read_table, it is currently treated as array of characters instead of being caught properly. For example, when usecols='bar', it is interpreted as ['b', 'a' ,'r'] in TextReader, and raising a column not found error in _validate_usecols_names when it really should raise ValueError as invalid value from _validate_usecols_arg, before the param being passed into TextReader. It's a bug in _validate_usecols_arg and TextReader which lack handling of string-type iterable. ``` >>> import os >>> from itertools import repeat >>> from pandas import * >>> >>> dummy = DataFrame({"foo": range(0, 5), ... "bar": range(10, 15), ... "b": [1, 2, 3, 5, 5], ... "a": list(repeat(3, 5)), ... "r": list(repeat(8, 5))}) >>> >>> dummy.to_csv('dummy.csv', index=False) >>> read_csv('dummy.csv', usecols=['foo']) foo 0 0 1 1 2 2 3 3 4 4 >>> read_csv('dummy.csv', usecols='bar') b a r 0 1 3 8 1 2 3 8 2 3 3 8 3 5 3 8 4 5 3 8 >>> >>> os.remove('dummy.csv') ``` Added is_list_like check in _validate_usecols_arg to ensure passed value is list-like but not string. Also, array_like is defined as list_like with dtype attribute so update docstring to list-like from array-like.
https://api.github.com/repos/pandas-dev/pandas/pulls/20558
2018-03-30T20:50:12Z
2018-04-01T13:42:26Z
2018-04-01T13:42:26Z
2018-04-01T15:22:40Z
BUG: Allow overwriting object columns with EAs
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a0e122d390240..f5956aacf8646 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2379,7 +2379,10 @@ def should_store(self, value): return not (issubclass(value.dtype.type, (np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_)) or - is_extension_type(value)) + # TODO(ExtensionArray): remove is_extension_type + # when all extension arrays have been ported. + is_extension_type(value) or + is_extension_array_dtype(value)) def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True, mgr=None): diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 9b9a614889bef..6f4d5b40515be 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -75,3 +75,9 @@ def test_set_frame_expand_extension_with_regular(self, data): df['B'] = [1] * len(data) expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) self.assert_frame_equal(df, expected) + + def test_set_frame_overwrite_object(self, data): + # https://github.com/pandas-dev/pandas/issues/20555 + df = pd.DataFrame({"A": [1] * len(data)}, dtype=object) + df['A'] = data + assert df.dtypes['A'] == data.dtype
Closes https://github.com/pandas-dev/pandas/issues/20555
https://api.github.com/repos/pandas-dev/pandas/pulls/20556
2018-03-30T18:59:37Z
2018-03-31T16:03:00Z
2018-03-31T16:03:00Z
2018-03-31T16:03:05Z
COMPAT: Remove use of private re attribute
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index aefa1ddd6cf0b..8885064b22ea8 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -26,6 +26,7 @@ # pylint disable=W0611 # flake8: noqa +import re import functools import itertools from distutils.version import LooseVersion @@ -136,7 +137,6 @@ def lfilter(*args, **kwargs): else: # Python 2 - import re _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") FileNotFoundError = IOError @@ -423,6 +423,14 @@ def raise_with_traceback(exc, traceback=Ellipsis): parse_date = _date_parser.parse +# In Python 3.7, the private re._pattern_type is removed. +# Python 3.5+ have typing.re.Pattern +if PY35: + import typing + re_type = typing.re.Pattern +else: + re_type = type(re.compile('')) + # https://github.com/pandas-dev/pandas/pull/9123 def is_platform_little_endian(): """ am I little endian """ diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index a02f0c5b2a4d6..d747e69d1ff39 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -6,7 +6,7 @@ from collections import Iterable from numbers import Number from pandas.compat import (PY2, string_types, text_type, - string_and_binary_types) + string_and_binary_types, re_type) from pandas._libs import lib is_bool = lib.is_bool @@ -216,7 +216,7 @@ def is_re(obj): False """ - return isinstance(obj, re._pattern_type) + return isinstance(obj, re_type) def is_re_compilable(obj):
Closes https://github.com/pandas-dev/pandas/issues/20551
https://api.github.com/repos/pandas-dev/pandas/pulls/20553
2018-03-30T16:58:42Z
2018-03-31T16:04:13Z
2018-03-31T16:04:12Z
2018-05-16T13:30:13Z
TST: tests for inconsistent indexing with datetimes
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6d74ce54faa94..7149c9e27408f 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1548,6 +1548,25 @@ def test_setitem_single_column_mixed_datetime(self): # pytest.raises( # Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan]) + def test_setitem_mixed_datetime(self): + # GH 9336 + expected = DataFrame({'a': [0, 0, 0, 0, 13, 14], + 'b': [pd.datetime(2012, 1, 1), + 1, + 'x', + 'y', + pd.datetime(2013, 1, 1), + pd.datetime(2014, 1, 1)]}) + df = pd.DataFrame(0, columns=list('ab'), index=range(6)) + df['b'] = pd.NaT + df.loc[0, 'b'] = pd.datetime(2012, 1, 1) + df.loc[1, 'b'] = 1 + df.loc[[2, 3], 'b'] = 'x', 'y' + A = np.array([[13, np.datetime64('2013-01-01T00:00:00')], + [14, np.datetime64('2014-01-01T00:00:00')]]) + df.loc[[4, 5], ['a', 'b']] = A + assert_frame_equal(df, expected) + def test_setitem_frame(self): piece = self.frame.loc[self.frame.index[:2], ['A', 'B']] self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
- [X] closes #9336 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20550
2018-03-30T14:18:18Z
2018-05-29T01:34:34Z
2018-05-29T01:34:34Z
2018-05-29T01:34:39Z
Fixed WOM offset when n=0
diff --git a/doc/source/api.rst b/doc/source/api.rst index e224e9927f55c..e43632ea46bfb 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2106,6 +2106,7 @@ Standard moving window functions Rolling.skew Rolling.kurt Rolling.apply + Rolling.aggregate Rolling.quantile Window.mean Window.sum @@ -2133,6 +2134,7 @@ Standard expanding window functions Expanding.skew Expanding.kurt Expanding.apply + Expanding.aggregate Expanding.quantile Exponentially-weighted moving window functions diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1c9849730edd6..e340acc17fe9f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -438,6 +438,7 @@ Other Enhancements ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) +- :class:`WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). - :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 - Updated ``to_gbq`` and ``read_gbq`` signature and documentation to reflect changes from the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ @@ -847,7 +848,7 @@ Other API Changes - :func:`DatetimeIndex.strftime` and :func:`PeriodIndex.strftime` now return an ``Index`` instead of a numpy array to be consistent with similar accessors (:issue:`20127`) - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). - :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) -- A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than an ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) +- A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than a ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) .. _whatsnew_0230.deprecations: diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 2dfd4ae3e6e3a..e5291ed52a86c 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -236,6 +236,12 @@ def test_catch_infinite_loop(self): pytest.raises(Exception, date_range, datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset) + @pytest.mark.parametrize('periods', (1, 2)) + def test_wom_len(self, periods): + # https://github.com/pandas-dev/pandas/issues/20517 + res = date_range(start='20110101', periods=periods, freq='WOM-1MON') + assert len(res) == periods + class TestGenRangeGeneration(object): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index d96ebab615d12..5369b1a94a956 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -2228,8 +2228,6 @@ class TestWeekOfMonth(Base): _offset = WeekOfMonth def test_constructor(self): - tm.assert_raises_regex(ValueError, "^N cannot be 0", - WeekOfMonth, n=0, week=1, weekday=1) tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth, n=1, week=4, weekday=0) tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth, @@ -2261,6 +2259,19 @@ def test_offset(self): (-1, 2, 1, date3, datetime(2010, 12, 21)), (-1, 2, 1, date4, datetime(2011, 1, 18)), + (0, 0, 1, date1, datetime(2011, 1, 4)), + (0, 0, 1, date2, datetime(2011, 2, 1)), + (0, 0, 1, date3, datetime(2011, 2, 1)), + (0, 0, 1, date4, datetime(2011, 2, 1)), + (0, 1, 1, date1, datetime(2011, 1, 11)), + (0, 1, 1, date2, datetime(2011, 1, 11)), + (0, 1, 1, date3, datetime(2011, 2, 8)), + (0, 1, 1, date4, datetime(2011, 2, 8)), + (0, 0, 1, date1, datetime(2011, 1, 4)), + (0, 1, 1, date2, datetime(2011, 1, 11)), + (0, 2, 1, date3, datetime(2011, 1, 18)), + (0, 3, 1, date4, datetime(2011, 1, 25)), + (1, 0, 0, date1, datetime(2011, 2, 7)), (1, 0, 0, date2, datetime(2011, 2, 7)), (1, 0, 0, date3, datetime(2011, 2, 7)), diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 2e4be7fbdeebf..749165f894819 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1461,9 +1461,6 @@ def __init__(self, n=1, normalize=False, week=0, weekday=0): self.weekday = weekday self.week = week - if self.n == 0: - raise ValueError('N cannot be 0') - if self.weekday < 0 or self.weekday > 6: raise ValueError('Day must be 0<=day<=6, got {day}' .format(day=self.weekday))
Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20517 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20549
2018-03-30T14:10:52Z
2018-04-21T18:14:15Z
2018-04-21T18:14:15Z
2018-04-21T18:38:01Z
ERR: disallow non-hashables in Index/MultiIndex construction & rename
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e19aedac80213..3f7c4b3b0ccb7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -827,6 +827,7 @@ Other API Changes - A :class:`Series` of ``dtype=category`` constructed from an empty ``dict`` will now have categories of ``dtype=object`` rather than ``dtype=float64``, consistently with the case in which an empty list is passed (:issue:`18515`) - All-NaN levels in a ``MultiIndex`` are now assigned ``float`` rather than ``object`` dtype, promoting consistency with ``Index`` (:issue:`17929`). - Levels names of a ``MultiIndex`` (when not None) are now required to be unique: trying to create a ``MultiIndex`` with repeated names will raise a ``ValueError`` (:issue:`18872`) +- Both construction and renaming of ``Index``/``MultiIndex`` with non-hashable ``name``/``names`` will now raise ``TypeError`` (:issue:`20527`) - :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`, :issue:`18482`, :issue:`18509`). - :func:`DataFrame.unstack` will now default to filling with ``np.nan`` for ``object`` columns. (:issue:`12815`) - :class:`IntervalIndex` constructor will raise if the ``closed`` parameter conflicts with how the input data is inferred to be closed (:issue:`18421`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 12bb09e8f8a8a..f392a716d9e5b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -42,6 +42,7 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, + is_hashable, needs_i8_conversion, is_iterator, is_list_like, is_scalar) @@ -1311,9 +1312,33 @@ def _get_names(self): return FrozenList((self.name, )) def _set_names(self, values, level=None): + """ + Set new names on index. Each name has to be a hashable type. + + Parameters + ---------- + values : str or sequence + name(s) to set + level : int, level name, or sequence of int/level names (default None) + If the index is a MultiIndex (hierarchical), level(s) to set (None + for all levels). Otherwise level must be None + + Raises + ------ + TypeError if each name is not hashable. + """ + if not is_list_like(values): + raise ValueError('Names must be a list-like') if len(values) != 1: raise ValueError('Length of new names must be 1, got %d' % len(values)) + + # GH 20527 + # All items in 'name' need to be hashable: + for name in values: + if not is_hashable(name): + raise TypeError('{}.name must be a hashable type' + .format(self.__class__.__name__)) self.name = values[0] names = property(fset=_set_names, fget=_get_names) @@ -1339,9 +1364,9 @@ def set_names(self, names, level=None, inplace=False): Examples -------- >>> Index([1, 2, 3, 4]).set_names('foo') - Int64Index([1, 2, 3, 4], dtype='int64') + Int64Index([1, 2, 3, 4], dtype='int64', name='foo') >>> Index([1, 2, 3, 4]).set_names(['foo']) - Int64Index([1, 2, 3, 4], dtype='int64') + Int64Index([1, 2, 3, 4], dtype='int64', name='foo') >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'), (2, u'one'), (2, u'two')], names=['foo', 'bar']) @@ -1354,6 +1379,7 @@ def set_names(self, names, level=None, inplace=False): labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'baz', u'bar']) """ + if level is not None and self.nlevels == 1: raise ValueError('Level must be None for non-MultiIndex') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 8098f7bb7d246..fbcf06a28c1e5 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -16,6 +16,7 @@ _ensure_platform_int, is_categorical_dtype, is_object_dtype, + is_hashable, is_iterator, is_list_like, pandas_dtype, @@ -634,12 +635,29 @@ def _get_names(self): def _set_names(self, names, level=None, validate=True): """ + Set new names on index. Each name has to be a hashable type. + + Parameters + ---------- + values : str or sequence + name(s) to set + level : int, level name, or sequence of int/level names (default None) + If the index is a MultiIndex (hierarchical), level(s) to set (None + for all levels). Otherwise level must be None + validate : boolean, default True + validate that the names match level lengths + + Raises + ------ + TypeError if each name is not hashable. + + Notes + ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies """ - # GH 15110 # Don't allow a single string for names in a MultiIndex if names is not None and not is_list_like(names): @@ -662,10 +680,20 @@ def _set_names(self, names, level=None, validate=True): # set the name for l, name in zip(level, names): - if name is not None and name in used: - raise ValueError('Duplicated level name: "{}", assigned to ' - 'level {}, is already used for level ' - '{}.'.format(name, l, used[name])) + if name is not None: + + # GH 20527 + # All items in 'names' need to be hashable: + if not is_hashable(name): + raise TypeError('{}.name must be a hashable type' + .format(self.__class__.__name__)) + + if name in used: + raise ValueError( + 'Duplicated level name: "{}", assigned to ' + 'level {}, is already used for level ' + '{}.'.format(name, l, used[name])) + self.levels[l].rename(name, inplace=True) used[name] = l diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index a8b81b1b03552..8e10e4c4fbc65 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -125,12 +125,12 @@ def test_getitem_list(self): # tuples df = DataFrame(randn(8, 3), columns=Index([('foo', 'bar'), ('baz', 'qux'), - ('peek', 'aboo')], name=['sth', 'sth2'])) + ('peek', 'aboo')], name=('sth', 'sth2'))) result = df[[('foo', 'bar'), ('baz', 'qux')]] expected = df.iloc[:, :2] assert_frame_equal(result, expected) - assert result.columns.names == ['sth', 'sth2'] + assert result.columns.names == ('sth', 'sth2') def test_getitem_callable(self): # GH 12533 diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7e19de4cca292..682517f5a6fb1 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -435,6 +435,24 @@ def test_constructor_empty(self): assert isinstance(empty, MultiIndex) assert not len(empty) + def test_constructor_nonhashable_name(self, indices): + # GH 20527 + + if isinstance(indices, MultiIndex): + pytest.skip("multiindex handled in test_multi.py") + + name = ['0'] + message = "Index.name must be a hashable type" + tm.assert_raises_regex(TypeError, message, name=name) + + # With .rename() + renamed = [['1']] + tm.assert_raises_regex(TypeError, message, + indices.rename, name=renamed) + # With .set_names() + tm.assert_raises_regex(TypeError, message, + indices.set_names, names=renamed) + def test_view_with_args(self): restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex', diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 984f37042d600..88dc4cbaf7bb3 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -615,8 +615,27 @@ def test_constructor_mismatched_label_levels(self): with tm.assert_raises_regex(ValueError, label_error): self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) - @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], - [1, 'a', 1]]) + def test_constructor_nonhashable_names(self): + # GH 20527 + levels = [[1, 2], [u'one', u'two']] + labels = [[0, 0, 1, 1], [0, 1, 0, 1]] + names = ((['foo'], ['bar'])) + message = "MultiIndex.name must be a hashable type" + tm.assert_raises_regex(TypeError, message, + MultiIndex, levels=levels, + labels=labels, names=names) + + # With .rename() + mi = MultiIndex(levels=[[1, 2], [u'one', u'two']], + labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=('foo', 'bar')) + renamed = [['foor'], ['barr']] + tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed) + # With .set_names() + tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed) + + @pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'], + ['1', 'a', '1']]) def test_duplicate_level_names(self, names): # GH18872 pytest.raises(ValueError, pd.MultiIndex.from_product,
Index & MultiIndex names need to be hashable. Both constructing and renaming without a hashable name raise TypeError exceptions now. **Examples:** - Index: ``` In [2]: pd.Index([1, 2, 3], name=['foo']) >>> Int64Index([1, 2, 3], dtype='int64', name=['foo']) ``` ``` In [3]: pd.Index([1, 2, 3], name='foo').rename(['bar']) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-3-f3327eccf0fc> in <module>() ----> 1 pd.Index([1, 2, 3], name='foo').rename(['bar']) ~/Documents/GitHub/pandas/pandas/core/indexes/base.py in rename(self, name, inplace) 1406 new index (of same type and class...etc) [if inplace, returns None] 1407 """ -> 1408 return self.set_names([name], inplace=inplace) 1409 1410 @property ~/Documents/GitHub/pandas/pandas/core/indexes/base.py in set_names(self, names, level, inplace) 1387 else: 1388 idx = self._shallow_copy() -> 1389 idx._set_names(names, level=level) 1390 if not inplace: 1391 return idx ~/Documents/GitHub/pandas/pandas/core/indexes/base.py in _set_names(self, values, level) 1323 if not is_hashable(name): 1324 raise TypeError('{}.name must be a hashable type' -> 1325 .format(self.__class__.__name__)) 1326 if len(values) != 1: 1327 raise ValueError('Length of new names must be 1, got %d' % TypeError: Int64Index.name must be a hashable type ``` - MultiIndex: ``` In [4]: pd.MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=((['foo'], ['bar']))) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-d211526eaa3d> in <module>() 1 pd.MultiIndex(levels=[[1, 2], [u'one', u'two']], 2 labels=[[0, 0, 1, 1], [0, 1, 0, 1]], ----> 3 names=((['foo'], ['bar']))) 4 ~/Documents/GitHub/pandas/pandas/core/indexes/multi.py in __new__(cls, levels, labels, sortorder, names, dtype, copy, name, verify_integrity, _set_identity) 230 if names is not None: 231 # handles name validation --> 232 result._set_names(names) 233 234 if sortorder is not None: ~/Documents/GitHub/pandas/pandas/core/indexes/multi.py in _set_names(self, names, level, validate) 646 if not is_hashable(name): 647 raise TypeError('{}.name must be a hashable type' --> 648 .format(self.__class__.__name__)) 649 650 # GH 15110 TypeError: MultiIndex.name must be a hashable type ``` ``` In [10]: pd.MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=('foo', 'bar')).rename(([1], [2])) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-ff74dfc48455> in <module>() 1 pd.MultiIndex(levels=[[1, 2], [u'one', u'two']], 2 labels=[[0, 0, 1, 1], [0, 1, 0, 1]], ----> 3 names=('foo', 'bar')).rename(([1], [2])) 4 ~/Documents/GitHub/pandas/pandas/core/indexes/base.py in set_names(self, names, level, inplace) 1387 else: 1388 idx = self._shallow_copy() -> 1389 idx._set_names(names, level=level) 1390 if not inplace: 1391 return idx ~/Documents/GitHub/pandas/pandas/core/indexes/multi.py in _set_names(self, names, level, validate) 646 if not is_hashable(name): 647 raise TypeError('{}.name must be a hashable type' --> 648 .format(self.__class__.__name__)) 649 650 # GH 15110 TypeError: MultiIndex.name must be a hashable type ``` Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes [#20527](https://github.com/pandas-dev/pandas/issues/20527) - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20548
2018-03-30T12:41:55Z
2018-04-23T19:05:14Z
2018-04-23T19:05:13Z
2018-06-27T21:13:53Z
CLN: Use pandas.compat instead of sys.version_info for Python version checks
diff --git a/pandas/_version.py b/pandas/_version.py index 624c7b5cd63a1..26e4d987e9f2e 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -12,6 +12,7 @@ import re import subprocess import sys +from pandas.compat import PY3 def get_keywords(): @@ -83,7 +84,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: + if PY3: stdout = stdout.decode() if p.returncode != 0: if verbose: diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index aefa1ddd6cf0b..dc52b5c283678 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -369,7 +369,7 @@ def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) -if sys.version_info[0] < 3: +if PY2: # In PY2 functools.wraps doesn't provide metadata pytest needs to generate # decorated tests using parametrization. See pytest GH issue #2782 def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, diff --git a/pandas/io/clipboard/clipboards.py b/pandas/io/clipboard/clipboards.py index 285d93e3ca497..0793ca6877cdb 100644 --- a/pandas/io/clipboard/clipboards.py +++ b/pandas/io/clipboard/clipboards.py @@ -1,12 +1,10 @@ -import sys import subprocess from .exceptions import PyperclipException +from pandas.compat import PY2, text_type EXCEPT_MSG = """ Pyperclip could not find a copy/paste mechanism for your system. For more information, please visit https://pyperclip.readthedocs.org """ -PY2 = sys.version_info[0] == 2 -text_type = unicode if PY2 else str # noqa def init_osx_clipboard(): diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 07ab445182680..52262ea05bf96 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -14,8 +14,9 @@ from __future__ import print_function import os -import sys import shutil +from pandas.compat import PY3 + __all__ = ['get_terminal_size', 'is_terminal'] @@ -29,7 +30,7 @@ def get_terminal_size(): """ import platform - if sys.version_info[0] >= 3: + if PY3: return shutil.get_terminal_size() current_os = platform.system() diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 7949636fcafbb..0dd068b90f30f 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -4,9 +4,7 @@ import warnings from datetime import timedelta -from distutils.version import LooseVersion import operator -import sys import pytest from string import ascii_lowercase @@ -1857,13 +1855,8 @@ def test_round(self): 'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]}) - if LooseVersion(sys.version) < LooseVersion('2.7'): - # Rounding with decimal is a ValueError in Python < 2.7 - with pytest.raises(ValueError): - df.round(nan_round_Series) - else: - with pytest.raises(TypeError): - df.round(nan_round_Series) + with pytest.raises(TypeError): + df.round(nan_round_Series) # Make sure this doesn't break existing Series.round tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1']) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index b2cbd0b07d7f5..78a19029db567 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -7,8 +7,6 @@ # pylint: disable-msg=W0612,E1101 from copy import deepcopy import pydoc -import sys -from distutils.version import LooseVersion from pandas.compat import range, lrange, long from pandas import compat @@ -253,18 +251,14 @@ def test_itertuples(self): '[(0, 1, 4), (1, 2, 5), (2, 3, 6)]') tup = next(df.itertuples(name='TestName')) - - if LooseVersion(sys.version) >= LooseVersion('2.7'): - assert tup._fields == ('Index', 'a', 'b') - assert (tup.Index, tup.a, tup.b) == tup - assert type(tup).__name__ == 'TestName' + assert tup._fields == ('Index', 'a', 'b') + assert (tup.Index, tup.a, tup.b) == tup + assert type(tup).__name__ == 'TestName' df.columns = ['def', 'return'] tup2 = next(df.itertuples(name='TestName')) assert tup2 == (0, 1, 4) - - if LooseVersion(sys.version) >= LooseVersion('2.7'): - assert tup2._fields == ('Index', '_1', '_2') + assert tup2._fields == ('Index', '_1', '_2') df3 = DataFrame({'f' + str(i): [i] for i in range(1024)}) # will raise SyntaxError if trying to create namedtuple diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index fb7677bb1449c..45be3974dad63 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1,6 +1,5 @@ """ test to_datetime """ -import sys import pytz import pytest import locale @@ -149,9 +148,6 @@ def test_to_datetime_with_non_exact(self, cache): # GH 10834 # 8904 # exact kw - if sys.version_info < (2, 7): - pytest.skip('on python version < 2.7') - s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00', '19MAY11 00:00:00Z']) result = to_datetime(s, format='%d%b%y', exact=False, cache=cache) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ab9f61cffc16b..dde0691907b20 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1256,8 +1256,6 @@ def test_to_string_float_formatting(self): df_s = df.to_string() - # Python 2.5 just wants me to be sad. And debian 32-bit - # sys.version_info[0] == 2 and sys.version_info[1] < 6: if _three_digit_exp(): expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n' '2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n' @@ -1281,8 +1279,7 @@ def test_to_string_float_formatting(self): df = DataFrame({'x': [1e9, 0.2512]}) df_s = df.to_string() - # Python 2.5 just wants me to be sad. And debian 32-bit - # sys.version_info[0] == 2 and sys.version_info[1] < 6: + if _three_digit_exp(): expected = (' x\n' '0 1.000000e+009\n' diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index cf7ec9e2f2652..2423ddcd9a1a0 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -1275,10 +1275,8 @@ def test_verbose_import(self): else: # Python engine assert output == 'Filled 1 NA values in column a\n' + @pytest.mark.skipif(PY3, reason="won't work in Python 3") def test_iteration_open_handle(self): - if PY3: - pytest.skip( - "won't work in Python 3 {0}".format(sys.version_info)) with tm.ensure_clean() as path: with open(path, 'wb') as f: diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 6b39717213c0d..cbb5932a890dc 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -1,6 +1,5 @@ # pylint: disable=E1101 import os -import sys import warnings from datetime import datetime, date, time, timedelta from distutils.version import LooseVersion @@ -16,7 +15,7 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex -from pandas.compat import u, range, map, BytesIO, iteritems +from pandas.compat import u, range, map, BytesIO, iteritems, PY36 from pandas.core.config import set_option, get_option from pandas.io.common import URLError from pandas.io.excel import ( @@ -585,9 +584,6 @@ def test_read_from_s3_url(self, ext): def test_read_from_file_url(self, ext): # FILE - if sys.version_info[:2] < (2, 6): - pytest.skip("file:// not supported with Python < 2.6") - localtable = os.path.join(self.dirpath, 'test1' + ext) local_table = read_excel(localtable) @@ -2314,9 +2310,9 @@ def custom_converter(css): @td.skip_if_no('openpyxl') +@pytest.mark.skipif(not PY36, reason='requires fspath') class TestFSPath(object): - @pytest.mark.skipif(sys.version_info < (3, 6), reason='requires fspath') def test_excelfile_fspath(self): with tm.ensure_clean('foo.xlsx') as path: df = DataFrame({"A": [1, 2]}) @@ -2325,8 +2321,6 @@ def test_excelfile_fspath(self): result = os.fspath(xl) assert result == path - @pytest.mark.skipif(sys.version_info < (3, 6), reason='requires fspath') - # @pytest.mark.xfail def test_excelwriter_fspath(self): with tm.ensure_clean('foo.xlsx') as path: writer = ExcelWriter(path) diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 919b34dc09f6f..cfac77291803d 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -4,7 +4,6 @@ import os import datetime import numpy as np -import sys from distutils.version import LooseVersion from pandas import compat @@ -298,11 +297,6 @@ def test_nat(self): def test_datetimes(self): - # fails under 2.6/win32 (np.datetime64 seems broken) - - if LooseVersion(sys.version) < LooseVersion('2.7'): - pytest.skip('2.6 with np.datetime64 is broken') - for i in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 1, 1, 5, 1), datetime.date(2013, 1, 1), diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 6bc3af2ba3fd2..fbe2174e603e2 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -20,13 +20,12 @@ from distutils.version import LooseVersion import pandas as pd from pandas import Index -from pandas.compat import is_platform_little_endian +from pandas.compat import is_platform_little_endian, PY3 import pandas import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.tseries.offsets import Day, MonthEnd import shutil -import sys @pytest.fixture(scope='module') @@ -474,21 +473,12 @@ def test_read(self, protocol, get_random_path): tm.assert_frame_equal(df, df2) @pytest.mark.parametrize('protocol', [3, 4]) - @pytest.mark.skipif(sys.version_info[:2] >= (3, 4), - reason="Testing invalid parameters for " - "Python 2.x and 3.y (y < 4).") + @pytest.mark.skipif(PY3, reason="Testing invalid parameters for Python 2") def test_read_bad_versions(self, protocol, get_random_path): - # For Python 2.x (respectively 3.y with y < 4), [expected] - # HIGHEST_PROTOCOL should be 2 (respectively 3). Hence, the protocol - # parameter should not exceed 2 (respectively 3). - if sys.version_info[:2] < (3, 0): - expect_hp = 2 - else: - expect_hp = 3 - with tm.assert_raises_regex(ValueError, - "pickle protocol %d asked for; the highest" - " available protocol is %d" % (protocol, - expect_hp)): + # For Python 2, HIGHEST_PROTOCOL should be 2. + msg = ("pickle protocol {protocol} asked for; the highest available " + "protocol is 2").format(protocol=protocol) + with tm.assert_raises_regex(ValueError, msg): with tm.ensure_clean(get_random_path) as path: df = tm.makeDataFrame() df.to_pickle(path, protocol=protocol) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 49ad07b79d111..972a47ef91c05 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -4,10 +4,8 @@ import datetime as dt import os import struct -import sys import warnings from datetime import datetime -from distutils.version import LooseVersion from collections import OrderedDict import numpy as np @@ -144,8 +142,6 @@ def test_read_dta1(self, file): tm.assert_frame_equal(parsed, expected) def test_read_dta2(self): - if LooseVersion(sys.version) < LooseVersion('2.7'): - pytest.skip('datetime interp under 2.6 is faulty') expected = DataFrame.from_records( [ diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py index 72d87be619917..50e72c11abc4b 100644 --- a/pandas/tests/scalar/timestamp/test_comparisons.py +++ b/pandas/tests/scalar/timestamp/test_comparisons.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import sys from datetime import datetime import operator @@ -9,7 +8,7 @@ from dateutil.tz import tzutc from pytz import utc -from pandas.compat import long +from pandas.compat import long, PY2 from pandas import Timestamp @@ -104,7 +103,7 @@ def test_cant_compare_tz_naive_w_aware(self): pytest.raises(Exception, b.__lt__, a) pytest.raises(Exception, b.__gt__, a) - if sys.version_info < (3, 3): + if PY2: pytest.raises(Exception, a.__eq__, b.to_pydatetime()) pytest.raises(Exception, a.to_pydatetime().__eq__, b) else: @@ -125,7 +124,7 @@ def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): pytest.raises(Exception, b.__lt__, a) pytest.raises(Exception, b.__gt__, a) - if sys.version_info < (3, 3): + if PY2: pytest.raises(Exception, a.__eq__, b.to_pydatetime()) pytest.raises(Exception, a.to_pydatetime().__eq__, b) else: @@ -146,7 +145,7 @@ def test_cant_compare_tz_naive_w_aware_dateutil(self): pytest.raises(Exception, b.__lt__, a) pytest.raises(Exception, b.__gt__, a) - if sys.version_info < (3, 3): + if PY2: pytest.raises(Exception, a.__eq__, b.to_pydatetime()) pytest.raises(Exception, a.to_pydatetime().__eq__, b) else: diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 2bc017ef226ce..145be7f85b193 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -7,7 +7,7 @@ from collections import OrderedDict import pytest -from pandas.compat import intern +from pandas.compat import intern, PY3 import pandas.core.common as com from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util._decorators import deprecate_kwarg, make_signature @@ -374,10 +374,7 @@ def test_exactly_one_ref(self): # materialize as bytearray to show that it is mutable assert bytearray(as_stolen_buf) == b'test' - @pytest.mark.skipif( - sys.version_info[0] > 2, - reason='bytes objects cannot be interned in py3', - ) + @pytest.mark.skipif(PY3, reason='bytes objects cannot be interned in py3') def test_interned(self): salt = uuid4().hex diff --git a/pandas/util/testing.py b/pandas/util/testing.py index f72c3b061208c..6e13a17eba68c 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2088,7 +2088,7 @@ def dec(f): # and conditionally raise on these exception types _network_error_classes = (IOError, httplib.HTTPException) -if sys.version_info >= (3, 3): +if PY3: _network_error_classes += (TimeoutError,) # noqa
Hopefully this will make code changes related to dropping support for specific versions of Python slightly easier to identify. Summary: - Replaced instances `sys.version_info` with equivalent checks from `pandas.compat` - Removed code blocks specific to unsupported versions of Python (<2.7, 3.0-3.4) <br /> There were some very specific examples that I left as-is: https://github.com/pandas-dev/pandas/blob/c4b4a81f56205082ec7f12bf77766e3b74d27c37/pandas/tests/io/formats/test_to_csv.py#L13 Didn't change any Cython related code, as it appears that we want to avoid imports from outside `_libs`: https://github.com/pandas-dev/pandas/blob/c4b4a81f56205082ec7f12bf77766e3b74d27c37/pandas/_libs/tslibs/parsing.pyx#L25-L26
https://api.github.com/repos/pandas-dev/pandas/pulls/20545
2018-03-30T02:41:28Z
2018-03-31T16:01:21Z
2018-03-31T16:01:21Z
2018-09-24T17:25:07Z
Deprecated Index.get_duplicates()
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f7c4b3b0ccb7..eb0fa49170d44 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -887,6 +887,7 @@ Deprecations - :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`) - ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`) +- ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b67ed9cfd2241..35bfd12466429 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3879,7 +3879,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, index = _ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: - duplicates = index.get_duplicates() + duplicates = index[index.duplicated()].unique() raise ValueError('Index has duplicate keys: {dup}'.format( dup=duplicates)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2e6e039add8a4..3d60eefc5b598 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1851,6 +1851,9 @@ def get_duplicates(self): Returns a sorted list of index elements which appear more than once in the index. + .. deprecated:: 0.23.0 + Use idx[idx.duplicated()].unique() instead + Returns ------- array-like @@ -1897,13 +1900,12 @@ def get_duplicates(self): >>> pd.Index(dates).get_duplicates() DatetimeIndex([], dtype='datetime64[ns]', freq=None) """ - from collections import defaultdict - counter = defaultdict(lambda: 0) - for k in self.values: - counter[k] += 1 - return sorted(k for k, v in compat.iteritems(counter) if v > 1) + warnings.warn("'get_duplicates' is deprecated and will be removed in " + "a future release. You can use " + "idx[idx.duplicated()].unique() instead", + FutureWarning, stacklevel=2) - _get_duplicates = get_duplicates + return self[self.duplicated()].unique() def _cleanup(self): self._engine.clear_mapping() diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 95186b2e79a16..51cd1837fecca 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -502,10 +502,6 @@ def take(self, indices, axis=0, allow_fill=True, freq = self.freq if isinstance(self, ABCPeriodIndex) else None return self._shallow_copy(taken, freq=freq) - def get_duplicates(self): - values = Index.get_duplicates(self) - return self._simple_new(values) - _can_hold_na = True _na_value = NaT diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 20f4384a3d698..6e564975f34cd 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -504,7 +504,7 @@ def _get_concat_axis(self): def _maybe_check_integrity(self, concat_index): if self.verify_integrity: if not concat_index.is_unique: - overlap = concat_index.get_duplicates() + overlap = concat_index[concat_index.duplicated()].unique() raise ValueError('Indexes have overlapping values: ' '{overlap!s}'.format(overlap=overlap)) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 2d55dfff7a8f3..0722b9175c0c6 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -1,3 +1,4 @@ +import warnings import pytest @@ -178,7 +179,10 @@ def test_get_duplicates(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02', '2000-01-03', '2000-01-03', '2000-01-04']) - result = idx.get_duplicates() + with warnings.catch_warnings(record=True): + # Deprecated - see GH20239 + result = idx.get_duplicates() + ex = DatetimeIndex(['2000-01-02', '2000-01-03']) tm.assert_index_equal(result, ex) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 682517f5a6fb1..8cb75f8cfb906 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2078,6 +2078,11 @@ def test_cached_properties_not_settable(self): with tm.assert_raises_regex(AttributeError, "Can't set attribute"): idx.is_unique = False + def test_get_duplicates_deprecated(self): + idx = pd.Index([1, 2, 3]) + with tm.assert_produces_warning(FutureWarning): + idx.get_duplicates() + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 88dc4cbaf7bb3..cc006baa64ce6 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2432,7 +2432,12 @@ def check(nlevels, with_nulls): for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) assert not mi.has_duplicates - assert mi.get_duplicates() == [] + + with warnings.catch_warnings(record=True): + # Deprecated - see GH20239 + assert mi.get_duplicates().equals(MultiIndex.from_arrays( + [[], []])) + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( 2, dtype='bool')) @@ -2444,7 +2449,12 @@ def check(nlevels, with_nulls): labels=np.random.permutation(list(lab)).T) assert len(mi) == (n + 1) * (m + 1) assert not mi.has_duplicates - assert mi.get_duplicates() == [] + + with warnings.catch_warnings(record=True): + # Deprecated - see GH20239 + assert mi.get_duplicates().equals(MultiIndex.from_arrays( + [[], []])) + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( len(mi), dtype='bool')) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 4692b6d675e6b..d7745ffd94cd9 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -1,3 +1,5 @@ +import warnings + import pytest import numpy as np @@ -145,7 +147,10 @@ def test_get_duplicates(self): idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day', '4day']) - result = idx.get_duplicates() + with warnings.catch_warnings(record=True): + # Deprecated - see GH20239 + result = idx.get_duplicates() + ex = TimedeltaIndex(['2 day', '3day']) tm.assert_index_equal(result, ex)
- [X] closes #20239 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20544
2018-03-30T01:37:36Z
2018-04-24T10:17:16Z
2018-04-24T10:17:16Z
2018-04-24T15:41:53Z
DOC: Sprint recap
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e83f149db1f18..560eb0df8ccf7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -901,6 +901,22 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ +Thanks to all of the contributors who participated in the Pandas Documentation +Sprint, which took place on March 10th. We had about 500 participants from over +30 locations across the world. You should notice that many of the +:ref:`API docstrings <api>` have greatly improved. + +There were too many simultaneous contributions to include a release note for each +improvement, but this `GitHub search`_ should give you an idea of how many docstrings +were improved. + +Special thanks to Marc Garcia for organizing the sprint. For more information, +read the `NumFOCUS blogpost`_ recapping the sprint. + +.. _GitHub search: https://github.com/pandas-dev/pandas/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3ADocs+created%3A2018-03-10..2018-03-15+ +.. _NumFOCUS blogpost: https://www.numfocus.org/blog/worldwide-pandas-sprint/ + + - Changed spelling of "numpy" to "NumPy", and "python" to "Python". (:issue:`19017`) - Consistency when introducing code samples, using either colon or period. Rewrote some sentences for greater clarity, added more dynamic references
Included a small note for this. Should we plan a blogpost as well? closes #20515
https://api.github.com/repos/pandas-dev/pandas/pulls/20543
2018-03-29T21:31:09Z
2018-03-30T20:03:41Z
2018-03-30T20:03:41Z
2018-10-25T19:10:38Z
[WIP]DOC: Fixed more warnings
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index cfd3f9e88e4ea..74b21c21252ec 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -583,7 +583,7 @@ and ``right`` is a subclass of DataFrame, the return type will still be ``merge`` is a function in the pandas namespace, and it is also available as a ``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling -``DataFrame `` being implicitly considered the left object in the join. +``DataFrame`` being implicitly considered the left object in the join. The related :meth:`~DataFrame.join` method, uses ``merge`` internally for the index-on-index (by default) and column(s)-on-index join. If you are joining on @@ -1202,7 +1202,7 @@ Overlapping value columns ~~~~~~~~~~~~~~~~~~~~~~~~~ The merge ``suffixes`` argument takes a tuple of list of strings to append to -overlapping column names in the input ``DataFrame``s to disambiguate the result +overlapping column names in the input ``DataFrame``\ s to disambiguate the result columns: .. ipython:: python diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index 6e5e113e859d7..e1b561c4deacb 100644 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -58,7 +58,7 @@ We can render the HTML to get the following table. :file: whatsnew_0171_html_table.html :class:`~pandas.core.style.Styler` interacts nicely with the Jupyter Notebook. -See the :ref:`documentation <style.ipynb>` for more. +See the :doc:`documentation <style>` for more. .. _whatsnew_0171.enhancements: diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index d04a34f7a44d6..5f22b518ab6c4 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -389,7 +389,7 @@ For example, after running the following, ``styled.xlsx`` renders as below: import os os.remove('styled.xlsx') -See the :ref:`Style documentation <style.ipynb#Export-to-Excel>` for more detail. +See the :ref:`Style documentation </style.ipynb#Export-to-Excel>` for more detail. .. _whatsnew_0200.enhancements.intervalindex: @@ -499,7 +499,7 @@ Other Enhancements - ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`) - ``pd.read_html()`` will parse multiple header rows, creating a MutliIndex header. (:issue:`13434`). - HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`) -- :class:`pandas.io.formats.style.Styler` template now has blocks for easier extension, :ref:`see the example notebook <style.ipynb#Subclassing>` (:issue:`15649`) +- :class:`pandas.io.formats.style.Styler` template now has blocks for easier extension, see the :ref:`example notebook </style.ipynb#Subclassing>` (:issue:`15649`) - :meth:`Styler.render() <pandas.io.formats.style.Styler.render>` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`) - Compatibility with Jupyter notebook 5.0; MultiIndex column labels are left-aligned and MultiIndex row-labels are top-aligned (:issue:`15379`) - ``TimedeltaIndex`` now has a custom date-tick formatter specifically designed for nanosecond level precision (:issue:`8711`) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e83f149db1f18..9e561a288f1d8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -330,7 +330,6 @@ for storing ip addresses. .. code-block:: ipython In [3]: ser = pd.Series(values) - ...: In [4]: ser Out[4]: @@ -342,8 +341,9 @@ for storing ip addresses. Notice that the dtype is ``ip``. The missing value semantics of the underlying array are respected: +.. code-block:: ipython + In [5]: ser.isna() - ...: Out[5]: 0 True 1 False
Also trying to fail on warnings. I think it's not working yet.
https://api.github.com/repos/pandas-dev/pandas/pulls/20542
2018-03-29T21:12:20Z
2018-04-04T19:20:20Z
2018-04-04T19:20:20Z
2018-04-08T09:58:34Z
DOC: Plans for 2.7
diff --git a/doc/source/install.rst b/doc/source/install.rst index c96d4fbeb4ad2..82a97ba7b04e1 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -15,6 +15,31 @@ Instructions for installing from source, `PyPI <http://pypi.python.org/pypi/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a `development version <http://github.com/pandas-dev/pandas>`__ are also provided. +.. _install.dropping_27 + +Plan for dropping Python 2.7 +---------------------------- + +The Python core team plans to stop supporting Python 2.7 on January 1st, 2020. +In line with `NumPy's plans`_, all pandas releases through December 31, 2018 +will support Python 2. + +The final release before **December 31, 2018** will be the last release to +support Python 2. The released package will continue to be available on +PyPI and through conda. + +Starting **January 1, 2019**, all releases will be Python 3 only. + +If there are people interested in continued support for Python 2.7 past December +31, 2018 (either backporting bugfixes or funding) please reach out to the +maintainers on the issue tracker. + +For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_. + +.. _NumPy's plans: https://github.com/numpy/numpy/blob/master/doc/neps/nep-0014-dropping-python2.7-proposal.rst#plan-for-dropping-python-27-support +.. _Python 3 statement: http://python3statement.org/ +.. _Porting to Python 3 guide: https://docs.python.org/3/howto/pyporting.html + Python version support ---------------------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6dadb7589869..de49ea754fc69 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -8,6 +8,11 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping_27` for more. + .. _whatsnew_0230.enhancements: New features
Closes https://github.com/pandas-dev/pandas/issues/18894 Just some sample text, essentially following NumPy. Of course, it'd be nice if 1.0 happened to be the LTS, but we'll see.
https://api.github.com/repos/pandas-dev/pandas/pulls/20540
2018-03-29T19:34:02Z
2018-04-14T13:41:44Z
2018-04-14T13:41:43Z
2018-05-06T08:08:48Z
ERR: Better error message for missing matplotlib
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6dadb7589869..5f008a7bc8dea 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1024,6 +1024,7 @@ I/O Plotting ^^^^^^^^ +- Better error message when attempting to plot but matplotlib is not installed (:issue:`19810`). - :func:`DataFrame.plot` now raises a ``ValueError`` when the ``x`` or ``y`` argument is improperly formed (:issue:`18671`) - Bug in :func:`DataFrame.plot` when ``x`` and ``y`` arguments given as positions caused incorrect referenced columns for line, bar and area plots (:issue:`20056`) - Bug in formatting tick labels with ``datetime.time()`` and fractional seconds (:issue:`18478`). diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 6c3d07124215b..c5f72cb391572 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -44,12 +44,19 @@ try: from pandas.plotting import _converter except ImportError: - pass + _HAS_MPL = False else: + _HAS_MPL = True if get_option('plotting.matplotlib.register_converters'): _converter.register(explicit=True) +def _raise_if_no_mpl(): + # TODO(mpl_converter): remove once converter is explicit + if not _HAS_MPL: + raise ImportError("matplotlib is required for plotting.") + + def _get_standard_kind(kind): return {'density': 'kde'}.get(kind, kind) @@ -97,6 +104,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, secondary_y=False, colormap=None, table=False, layout=None, **kwds): + _raise_if_no_mpl() _converter._WARN = False self.data = data self.by = by @@ -2264,6 +2272,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """ + _raise_if_no_mpl() _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, @@ -2403,6 +2412,7 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, ------- axes: collection of Matplotlib Axes """ + _raise_if_no_mpl() _converter._WARN = False def plot_group(group, ax): @@ -2469,6 +2479,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ + _raise_if_no_mpl() _converter._WARN = False if subplots is True: naxes = len(grouped) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index c5ce8aba9d80e..c82c939584dc7 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -17,6 +17,15 @@ from pandas.tests.plotting.common import TestPlotBase, _check_plot_works +@td.skip_if_mpl +def test_import_error_message(): + # GH-19810 + df = DataFrame({"A": [1, 2]}) + + with tm.assert_raises_regex(ImportError, 'matplotlib is required'): + df.plot() + + @td.skip_if_no_mpl class TestSeriesPlots(TestPlotBase): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 8ad73538fbec1..ab6dfee9c862c 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -160,6 +160,8 @@ def decorated_func(func): skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(), reason="Missing matplotlib dependency") +skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), + reason="matplotlib is present") skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), reason="matplotlib 1.5") xfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(),
Closes https://github.com/pandas-dev/pandas/issues/19810
https://api.github.com/repos/pandas-dev/pandas/pulls/20538
2018-03-29T18:45:37Z
2018-04-09T08:01:39Z
2018-04-09T08:01:39Z
2018-04-09T08:01:39Z
BUG: Presence of softlink in HDF5 file breaks HDFStore.keys() (GH20523)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 09bd09b06d9b9..fb63dc16249b2 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1098,6 +1098,7 @@ I/O - Bug in :func:`read_pickle` when unpickling objects with :class:`TimedeltaIndex` or :class:`Float64Index` created with pandas prior to version 0.20 (:issue:`19939`) - Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`) - Bug in ``usecols`` parameter in :func:`pandas.io.read_csv` and :func:`pandas.io.read_table` where error is not raised correctly when passing a string. (:issue:`20529`) +- Bug in :func:`HDFStore.keys` when reading a file with a softlink causes exception (:issue:`20523`) Plotting ^^^^^^^^ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 2437b7d396e84..f9a496edb45a3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1073,10 +1073,11 @@ def groups(self): self._check_if_open() return [ g for g in self._handle.walk_nodes() - if (getattr(g._v_attrs, 'pandas_type', None) or - getattr(g, 'table', None) or + if (not isinstance(g, _table_mod.link.Link) and + (getattr(g._v_attrs, 'pandas_type', None) or + getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and - g._v_name != u('table'))) + g._v_name != u('table')))) ] def get_node(self, key): diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index e690b1e302d8b..b34723d6cf72c 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -373,6 +373,23 @@ def test_keys(self): assert set(store.keys()) == expected assert set(store) == expected + def test_keys_ignore_hdf_softlink(self): + + # GH 20523 + # Puts a softlink into HDF file and rereads + + with ensure_clean_store(self.path) as store: + + df = DataFrame(dict(A=lrange(5), B=lrange(5))) + store.put("df", df) + + assert store.keys() == ["/df"] + + store._handle.create_soft_link(store._handle.root, "symlink", "df") + + # Should ignore the softlink + assert store.keys() == ["/df"] + def test_iter_empty(self): with ensure_clean_store(self.path) as store:
- [x] closes #20523 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20537
2018-03-29T13:43:21Z
2018-04-03T13:00:17Z
2018-04-03T13:00:16Z
2018-04-03T13:01:48Z
ENH20521 Added metadata argument to DataFrame.to_parquet
The argument allows for custom file metadata updating the default one. Closes #20521 Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Checklist from comments: - [ ] add example in to_parquet docstring - [ ] add API docs - [ ] investigate fastparquet - [ ] correct kwargs - [ ] add test for warning - [ ] address other comments
https://api.github.com/repos/pandas-dev/pandas/pulls/20534
2018-03-29T12:21:47Z
2018-10-07T16:06:24Z
null
2019-05-02T08:43:07Z
DOC: Extension whatsenw
diff --git a/ci/build_docs.sh b/ci/build_docs.sh index 5de9e158bcdb6..90a666dc34ed7 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -24,6 +24,7 @@ if [ "$DOC" ]; then source activate pandas mv "$TRAVIS_BUILD_DIR"/doc /tmp + mv "$TRAVIS_BUILD_DIR/LICENSE" /tmp # included in the docs. cd /tmp/doc echo ############################### diff --git a/doc/source/api.rst b/doc/source/api.rst index a5d24302e69e2..5e794c11658e8 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2576,4 +2576,3 @@ objects. generated/pandas.Series.ix generated/pandas.Series.imag generated/pandas.Series.real - generated/pandas.Timestamp.offset diff --git a/doc/source/io.rst b/doc/source/io.rst index 68b431925d983..ff505f525fc22 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2263,7 +2263,7 @@ round-trippable manner. new_df.dtypes Please note that the literal string 'index' as the name of an :class:`Index` -is not round-trippable, nor are any names beginning with 'level_' within a +is not round-trippable, nor are any names beginning with ``'level_'`` within a :class:`MultiIndex`. These are used by default in :func:`DataFrame.to_json` to indicate missing values and the subsequent read cannot distinguish the intent. diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 86cff4a358975..adb4cdf2974a0 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -198,8 +198,9 @@ This could also potentially speed up the conversion considerably. pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M') For more information on the choices available when specifying the ``format`` -option, see the Python `datetime documentation -<https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior`__. +option, see the Python `datetime documentation`_. + +.. _datetime documentation: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior Assembling Datetime from Multiple DataFrame Columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6dadb7589869..e83f149db1f18 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -299,6 +299,63 @@ Supplying a ``CategoricalDtype`` will make the categories in each column consist df['A'].dtype df['B'].dtype +.. _whatsnew_023.enhancements.extension: + +Extending Pandas with Custom Types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy +arrays as columns in a DataFrame or values in a Series. This allows third-party +libraries to implement extensions to NumPy's types, similar to how pandas +implemented categoricals, datetimes with timezones, periods, and intervals. + +As a demonstration, we'll use cyberpandas_, which provides an ``IPArray`` type +for storing ip addresses. + +.. code-block:: ipython + + In [1]: from cyberpandas import IPArray + + In [2]: values = IPArray([ + ...: 0, + ...: 3232235777, + ...: 42540766452641154071740215577757643572 + ...: ]) + ...: + ...: + +``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas +``ExtensionArray``, it can be stored properly inside pandas' containers. + +.. code-block:: ipython + + In [3]: ser = pd.Series(values) + ...: + + In [4]: ser + Out[4]: + 0 0.0.0.0 + 1 192.168.1.1 + 2 2001:db8:85a3::8a2e:370:7334 + dtype: ip + +Notice that the dtype is ``ip``. The missing value semantics of the underlying +array are respected: + + In [5]: ser.isna() + ...: + Out[5]: + 0 True + 1 False + 2 False + dtype: bool + +For more, see the :ref:`extension types <extending.extension-types>` +documentation. If you build an extension array, publicize it on our +:ref:`ecosystem page <ecosystem.extensions>`. + +.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/core/series.py b/pandas/core/series.py index 89075e5e6acbb..30e0319346961 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -468,7 +468,7 @@ def asobject(self): .. deprecated :: 0.23.0 - Use ``astype(object) instead. + Use ``astype(object)`` instead. *this is an internal non-public method* """
Closes https://github.com/pandas-dev/pandas/issues/20532
https://api.github.com/repos/pandas-dev/pandas/pulls/20533
2018-03-29T12:10:56Z
2018-03-29T20:10:04Z
2018-03-29T20:10:04Z
2018-03-29T20:10:09Z
COMPAT: 32-bit compat for testing
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ada4f880e92a4..8a8a6f7de70d7 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -266,7 +266,7 @@ def test_parametrized_factorize_na_value_default(self, data): # arrays that include the NA default for that type, but isn't used. l, u = algos.factorize(data) expected_uniques = data[[0, 1]] - expected_labels = np.array([0, 1, 0], dtype='i8') + expected_labels = np.array([0, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(l, expected_labels) tm.assert_numpy_array_equal(u, expected_uniques) @@ -283,7 +283,7 @@ def test_parametrized_factorize_na_value_default(self, data): def test_parametrized_factorize_na_value(self, data, na_value): l, u = algos._factorize_array(data, na_value=na_value) expected_uniques = data[[1, 3]] - expected_labels = np.array([-1, 0, -1, 1], dtype='i8') + expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(l, expected_labels) tm.assert_numpy_array_equal(u, expected_uniques)
xref #20502
https://api.github.com/repos/pandas-dev/pandas/pulls/20528
2018-03-29T10:17:36Z
2018-03-30T19:01:17Z
2018-03-30T19:01:17Z
2018-03-30T19:01:48Z
BUG: #19497 FIX. Add tupleize_cols option to internals._transform_index()
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ace975385ce32..236b0109da786 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3642,6 +3642,10 @@ def rename(self, *args, **kwargs): level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. + tupleize_cols : boolean, default False + In case of an Index, when True, create MultiIndex if possible. + False ensures that an Index will not be converted to a + MultiIndex if labels are 'rename'd. Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d5cd22732f0a9..ef0ab2cfce0c4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -933,6 +933,7 @@ def rename(self, *args, **kwargs): inplace = kwargs.pop('inplace', False) level = kwargs.pop('level', None) axis = kwargs.pop('axis', None) + tupleize_cols = kwargs.pop('tupleize_cols', False) if axis is not None: axis = self._get_axis_number(axis) @@ -970,8 +971,10 @@ def f(x): baxis = self._get_block_manager_axis(axis) if level is not None: level = self.axes[axis]._get_level_number(level) - result._data = result._data.rename_axis(f, axis=baxis, copy=copy, - level=level) + result._data = \ + result._data.rename_axis(f, axis=baxis, copy=copy, + level=level, + tupleize_cols=tupleize_cols) result._clear_item_cache() if inplace: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a0e122d390240..48ad53ee67f79 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3280,7 +3280,8 @@ def set_axis(self, axis, new_labels): self.axes[axis] = new_labels - def rename_axis(self, mapper, axis, copy=True, level=None): + def rename_axis(self, mapper, axis, copy=True, level=None, + tupleize_cols=False): """ Rename one of axes. @@ -3293,7 +3294,8 @@ def rename_axis(self, mapper, axis, copy=True, level=None): """ obj = self.copy(deep=copy) - obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level, + tupleize_cols=tupleize_cols)) return obj def add_prefix(self, prefix): @@ -5234,7 +5236,7 @@ def _safe_reshape(arr, new_shape): return arr -def _transform_index(index, func, level=None): +def _transform_index(index, func, level=None, tupleize_cols=True): """ Apply function to all values found in index. @@ -5251,7 +5253,7 @@ def _transform_index(index, func, level=None): return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] - return Index(items, name=index.name) + return Index(items, name=index.name, tupleize_cols=tupleize_cols) def _putmask_smart(v, m, n):
- Closes #19497 - NO TESTS have been added/passed. Currently experiencing issues getting pandas conda environment installed so unable to do this myself. Very simple alteration however... - Code passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - Implements tupleize_cols option for pandas.core.internals_transform_index(), and provides the capability to manipulate this option using pandas.DataFrame.rename(). **NOTE**: for renaming, the keyword argument default is False, because as suggested [here](https://github.com/pandas-dev/pandas/issues/19497), this is unintuitive and unexepected. The keyword argument for the _transform_index() is True however, which is consistent with default pandas.Index() creation behaviour.
https://api.github.com/repos/pandas-dev/pandas/pulls/20526
2018-03-29T06:42:17Z
2018-03-30T19:09:40Z
null
2018-05-14T01:32:37Z
PERF: GH2003 Series.isin for categorical dtypes
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 7743921003353..0ffd5f881d626 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -148,3 +148,24 @@ def time_rank_int_cat(self): def time_rank_int_cat_ordered(self): self.s_int_cat_ordered.rank() + + +class Isin(object): + + goal_time = 0.2 + + params = ['object', 'int64'] + param_names = ['dtype'] + + def setup(self, dtype): + np.random.seed(1234) + n = 5 * 10**5 + sample_size = 100 + arr = [i for i in np.random.randint(0, n // 10, size=n)] + if dtype == 'object': + arr = ['s%04d' % i for i in arr] + self.sample = np.random.choice(arr, sample_size) + self.series = pd.Series(arr).astype('category') + + def time_isin_categorical(self, dtype): + self.series.isin(self.sample) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 43e384b01ad2c..2b73a84810045 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -954,6 +954,7 @@ Performance Improvements - Improved performance of :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` (:issue:`11296`) - Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) - Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`) +- Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) .. _whatsnew_0230.docs: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 065a5782aced1..5493348334223 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -407,6 +407,13 @@ def isin(comps, values): if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) + if is_categorical_dtype(comps): + # TODO(extension) + # handle categoricals + return comps._values.isin(values) + + comps = com._values_from_object(comps) + comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 599161521f3a7..7f0d54de9def8 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -39,6 +39,8 @@ from pandas.util._decorators import ( Appender, cache_readonly, deprecate_kwarg, Substitution) +import pandas.core.algorithms as algorithms + from pandas.io.formats.terminal import get_terminal_size from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.config import get_option @@ -2216,6 +2218,60 @@ def _concat_same_type(self, to_concat): def _formatting_values(self): return self + def isin(self, values): + """ + Check whether `values` are contained in Categorical. + + Return a boolean NumPy Array showing whether each element in + the Categorical matches an element in the passed sequence of + `values` exactly. + + Parameters + ---------- + values : set or list-like + The sequence of values to test. Passing in a single string will + raise a ``TypeError``. Instead, turn a single string into a + list of one element. + + Returns + ------- + isin : numpy.ndarray (bool dtype) + + Raises + ------ + TypeError + * If `values` is not a set or list-like + + See Also + -------- + pandas.Series.isin : equivalent method on Series + + Examples + -------- + + >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', + ... 'hippo']) + >>> s.isin(['cow', 'lama']) + array([ True, True, True, False, True, False]) + + Passing a single string as ``s.isin('lama')`` will raise an error. Use + a list of one element instead: + + >>> s.isin(['lama']) + array([ True, False, True, False, True, False]) + """ + from pandas.core.series import _sanitize_array + if not is_list_like(values): + raise TypeError("only list-like objects are allowed to be passed" + " to isin(), you passed a [{values_type}]" + .format(values_type=type(values).__name__)) + values = _sanitize_array(values, None, None) + null_mask = np.asarray(isna(values)) + code_values = self.categories.get_indexer(values) + code_values = code_values[null_mask | (code_values >= 0)] + return algorithms.isin(self.codes, code_values) + + # The Series.cat accessor diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3d60eefc5b598..21006c4831ac5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3516,7 +3516,7 @@ def isin(self, values, level=None): """ if level is not None: self._validate_index_level(level) - return algos.isin(np.array(self), values) + return algos.isin(self, values) def _can_reindex(self, indexer): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index aa4cb510feb62..f2ee225f50514 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3567,7 +3567,7 @@ def isin(self, values): 5 False Name: animal, dtype: bool """ - result = algorithms.isin(com._values_from_object(self), values) + result = algorithms.isin(self, values) return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): diff --git a/pandas/tests/categorical/test_algos.py b/pandas/tests/categorical/test_algos.py index f727184e862d8..1c68377786dd4 100644 --- a/pandas/tests/categorical/test_algos.py +++ b/pandas/tests/categorical/test_algos.py @@ -47,3 +47,25 @@ def test_factorized_sort_ordered(): tm.assert_numpy_array_equal(labels, expected_labels) tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_isin_cats(): + # GH2003 + cat = pd.Categorical(["a", "b", np.nan]) + + result = cat.isin(["a", np.nan]) + expected = np.array([True, False, True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + result = cat.isin(["a", "c"]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])]) +def test_isin_empty(empty): + s = pd.Categorical(["a", "b"]) + expected = np.array([False, False], dtype=bool) + + result = s.isin(empty) + tm.assert_numpy_array_equal(expected, result)
I have added a branching for the categorical case in `Series.isin` function. I have also added a test for the most crucial cases (nans). closes #20003
https://api.github.com/repos/pandas-dev/pandas/pulls/20522
2018-03-28T18:49:50Z
2018-04-25T12:38:18Z
2018-04-25T12:38:18Z
2018-04-25T12:38:43Z
Set pd.options.display.max_rows = 20 by default
diff --git a/doc/source/options.rst b/doc/source/options.rst index 48247eb48baaf..0e5fa41be1d7a 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -352,7 +352,7 @@ display.max_info_rows 1690785 df.info() will usually show and max_info_cols limit this null check only to frames with smaller dimensions then specified. -display.max_rows 60 This sets the maximum number of rows +display.max_rows 20 This sets the maximum number of rows pandas should output when printing out various output. For example, this value determines whether the diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index b57b49c79bb93..03d58d8b2a0c8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -687,6 +687,11 @@ yourself. To revert to the old setting, you can run this line: pd.options.display.max_columns = 20 +Furthermore, the default value for the maximum number of displayed rows is now +20 (instead of 60) (``pd.options.display.max_rows=20``) (:issue:`20514`). This +makes it easier to get an overview of a large dataframe in most standard +terminal windows. + .. _whatsnew_0230.api.datetimelike: Datetimelike API Changes diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index b836a35b8cf29..301fa11e36b3f 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -310,7 +310,7 @@ def table_schema_cb(key): cf.register_option('column_space', 12, validator=is_int) cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None)))) - cf.register_option('max_rows', 60, pc_max_rows_doc, + cf.register_option('max_rows', 20, pc_max_rows_doc, validator=is_instance_factory([type(None), int])) cf.register_option('max_categories', 8, pc_max_categories_doc, validator=is_int)
As a follow-up to #17023, I propose to change the default number of displayed rows to 20. - [ ] closes #xxxx - [x] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20514
2018-03-28T09:14:20Z
2018-12-19T11:55:50Z
null
2018-12-19T11:55:50Z
API/BUG: Enforce "normalized" pytz timezones for DatetimeIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fb63dc16249b2..852a8d327707d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -767,6 +767,8 @@ Datetimelike API Changes - :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with ``freq=None``, addition or subtraction of integer-dtyped array or ``Index`` will raise ``NullFrequencyError`` instead of ``TypeError`` (:issue:`19895`) - :class:`Timestamp` constructor now accepts a `nanosecond` keyword or positional argument (:issue:`18898`) +- :class:`DatetimeIndex` will now raise an ``AttributeError`` when the ``tz`` attribute is set after instantiation (:issue:`3746`) +- :class:`DatetimeIndex` with a ``pytz`` timezone will now return a consistent ``pytz`` timezone (:issue:`18595`) .. _whatsnew_0230.api.other: @@ -1123,6 +1125,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.resample().aggregate` not raising a ``KeyError`` when aggregating a non-existent column (:issue:`16766`, :issue:`19566`) - Fixed a performance regression for ``GroupBy.nth`` and ``GroupBy.last`` with some object columns (:issue:`19283`) - Bug in :func:`DataFrameGroupBy.cumsum` and :func:`DataFrameGroupBy.cumprod` when ``skipna`` was passed (:issue:`19806`) +- Bug in :func:`Dataframe.resample` that dropped timezone information (:issue:`13238`) Sparse ^^^^^^ diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 9818d53e386bd..ba5ebdab82ddc 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -700,6 +700,12 @@ class Timestamp(_Timestamp): """ return self.tzinfo + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError("Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate") + def __setstate__(self, state): self.value = state[0] self.freq = state[1] diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 215ae9ce087ee..74fadbdb64763 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -314,3 +314,41 @@ cpdef bint tz_compare(object start, object end): """ # GH 18523 return get_timezone(start) == get_timezone(end) + + +cpdef tz_standardize(object tz): + """ + If the passed tz is a pytz timezone object, "normalize" it to the a + consistent version + + Parameters + ---------- + tz : tz object + + Returns: + ------- + tz object + + Examples: + -------- + >>> tz + <DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD> + + >>> tz_standardize(tz) + <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD> + + >>> tz + <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD> + + >>> tz_standardize(tz) + <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD> + + >>> tz + dateutil.tz.tz.tzutc + + >>> tz_standardize(tz) + dateutil.tz.tz.tzutc + """ + if treat_tz_as_pytz(tz): + return pytz.timezone(str(tz)) + return tz diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b906ea0f4784c..95e1f8438c704 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -1005,7 +1005,7 @@ def shift(self, n, freq=None): result = self + offset if hasattr(self, 'tz'): - result.tz = self.tz + result._tz = self.tz return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 75f4ec4f0d341..88ea3511d4ee3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -511,13 +511,7 @@ def _generate(cls, start, end, periods, name, offset, 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) - - # these may need to be localized tz = timezones.maybe_get_tz(tz) - if tz is not None: - date = start or end - if date.tzinfo is not None and hasattr(tz, 'localize'): - tz = tz.localize(date.replace(tzinfo=None)).tzinfo if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): @@ -654,7 +648,8 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, result._data = values result.name = name result.offset = freq - result.tz = timezones.maybe_get_tz(tz) + result._tz = timezones.maybe_get_tz(tz) + result._tz = timezones.tz_standardize(result._tz) result._reset_identity() return result @@ -684,6 +679,17 @@ def _values(self): else: return self.values + @property + def tz(self): + # GH 18595 + return self._tz + + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError("Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate") + @property def tzinfo(self): """ @@ -754,7 +760,7 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None, cachedRange = DatetimeIndex._simple_new(arr) cachedRange.offset = offset - cachedRange.tz = None + cachedRange = cachedRange.tz_localize(None) cachedRange.name = None drc[offset] = cachedRange else: @@ -831,7 +837,7 @@ def __setstate__(self, state): self.name = own_state[0] self.offset = own_state[1] - self.tz = own_state[2] + self._tz = timezones.tz_standardize(own_state[2]) # provide numpy < 1.7 compat if nd_state[2] == 'M8[us]': @@ -1175,7 +1181,7 @@ def union(self, other): else: result = Index.union(this, other) if isinstance(result, DatetimeIndex): - result.tz = this.tz + result._tz = timezones.tz_standardize(this.tz) if (result.freq is None and (this.freq is not None or other.freq is not None)): result.offset = to_offset(result.inferred_freq) @@ -1223,7 +1229,7 @@ def union_many(self, others): tz = this.tz this = Index.union(this, other) if isinstance(this, DatetimeIndex): - this.tz = tz + this._tz = timezones.tz_standardize(tz) if this.freq is None: this.offset = to_offset(this.inferred_freq) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 3e0ba26c20eb0..785bb128512fc 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -249,8 +249,8 @@ def test_set_index_cast_datetimeindex(self): # convert to utc df['C'] = i.to_series().reset_index(drop=True) result = df['C'] - comp = pd.DatetimeIndex(expected.values).copy() - comp.tz = None + comp = pd.DatetimeIndex(expected.values) + comp = comp.tz_localize(None) tm.assert_numpy_array_equal(result.values, comp.values) # list of datetimes with a tz diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 176f5bd0c1a2a..97e01478c736b 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -441,6 +441,34 @@ def test_000constructor_resolution(self): assert idx.nanosecond[0] == t1.nanosecond + def test_disallow_setting_tz(self): + # GH 3746 + dti = DatetimeIndex(['2010'], tz='UTC') + with pytest.raises(AttributeError): + dti.tz = pytz.timezone('US/Pacific') + + @pytest.mark.parametrize('tz', [ + None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'), + Timestamp('2000', tz='America/Los_Angeles').tz]) + def test_constructor_start_end_with_tz(self, tz): + # GH 18595 + start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles') + end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles') + result = DatetimeIndex(freq='D', start=start, end=end, tz=tz) + expected = DatetimeIndex(['2013-01-01 06:00:00', + '2013-01-02 06:00:00'], + tz='America/Los_Angeles') + tm.assert_index_equal(result, expected) + # Especially assert that the timezone is consistent for pytz + assert pytz.timezone('America/Los_Angeles') is result.tz + + @pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo']) + def test_constructor_with_non_normalized_pytz(self, tz): + # GH 18595 + non_norm_tz = Timestamp('2010', tz=tz).tz + result = DatetimeIndex(['2010'], tz=non_norm_tz) + assert pytz.timezone(tz) is result.tz + class TestTimeSeries(object): diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index cde5baf47c18e..55ed7e6cfa8db 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -521,6 +521,13 @@ def test_today(self): assert (abs(ts_from_string_tz.tz_localize(None) - ts_from_method_tz.tz_localize(None)) < delta) + @pytest.mark.parametrize('tz', [None, pytz.timezone('US/Pacific')]) + def test_disallow_setting_tz(self, tz): + # GH 3746 + ts = Timestamp('2010') + with pytest.raises(AttributeError): + ts.tz = tz + class TestTimestamp(object): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 896002d007a69..2180e38e24e6c 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -2532,6 +2532,18 @@ def test_with_local_timezone_pytz(self): expected = Series(1, index=expected_index) assert_series_equal(result, expected) + def test_resample_with_pytz(self): + # GH 13238 + s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H", + tz="US/Eastern")) + result = s.resample("D").mean() + expected = Series(2, index=pd.DatetimeIndex(['2017-01-01', + '2017-01-02'], + tz="US/Eastern")) + assert_series_equal(result, expected) + # Especially assert that the timezone is LMT for pytz + assert result.index.tz == pytz.timezone('US/Eastern') + def test_with_local_timezone_dateutil(self): # see gh-5430 local_timezone = 'dateutil/America/Los_Angeles'
closes #3746 closes #18595 closes #13238 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Addressing 3 birds with 2 stones here. Using @pganssle suggested implementation for a tz property and directly raising an error per #3746 (could depreciate and error in a future version as well, open to feedback on the prefered path of API change) Additionally, solves the issue of resampling a DataFrame/Series with a DatetimeIndex that retained a local timezone instead of the "LMT" version.
https://api.github.com/repos/pandas-dev/pandas/pulls/20510
2018-03-28T03:19:52Z
2018-04-11T02:29:55Z
2018-04-11T02:29:54Z
2018-04-11T17:10:07Z
DOC: Fix various warnings
diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst index eb97aeeb7e696..a7586f623a160 100644 --- a/doc/source/comparison_with_r.rst +++ b/doc/source/comparison_with_r.rst @@ -397,7 +397,7 @@ In Python, this list would be a list of tuples, so pd.DataFrame(a) For more details and examples see :ref:`the Into to Data Structures -documentation <basics.dataframe.from_items>`. +documentation <dsintro>`. |meltdf|_ ~~~~~~~~~~~~~~~~ diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 967d1fe3369f0..6d5ac31c39a62 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -298,6 +298,11 @@ Some other important things to know about the docs: Standard**. Follow the :ref:`pandas docstring guide <docstring>` for detailed instructions on how to write a correct docstring. + .. toctree:: + :maxdepth: 2 + + contributing_docstring.rst + - The tutorials make heavy use of the `ipython directive <http://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension. This directive lets you put code in the documentation which will be run @@ -900,7 +905,7 @@ Documenting your code Changes should be reflected in the release notes located in ``doc/source/whatsnew/vx.y.z.txt``. This file contains an ongoing change log for each release. Add an entry to this file to document your fix, enhancement or (unavoidable) breaking change. Make sure to include the -GitHub issue number when adding your entry (using `` :issue:`1234` `` where `1234` is the +GitHub issue number when adding your entry (using ``:issue:`1234``` where ``1234`` is the issue/pull request number). If your code is an enhancement, it is most likely necessary to add usage @@ -1020,7 +1025,7 @@ release. To submit a pull request: #. Click ``Send Pull Request``. This request then goes to the repository maintainers, and they will review -the code. +the code. .. _contributing.update-pr: @@ -1028,7 +1033,7 @@ Updating your pull request -------------------------- Based on the review you get on your pull request, you will probably need to make -some changes to the code. In that case, you can make them in your branch, +some changes to the code. In that case, you can make them in your branch, add a new commit to that branch, push it to GitHub, and the pull request will be automatically updated. Pushing them to GitHub again is done by:: @@ -1039,7 +1044,7 @@ This will automatically update your pull request with the latest code and restar Another reason you might need to update your pull request is to solve conflicts with changes that have been merged into the master branch since you opened your -pull request. +pull request. To do this, you need to "merge upstream master" in your branch:: diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index b6690eff89836..4e61228d5c0ad 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -411,6 +411,8 @@ Levels `Flatten Hierarchical columns <http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__ +.. _cookbook.missing_data: + Missing Data ------------ diff --git a/doc/source/io.rst b/doc/source/io.rst index d6bd81861adee..68b431925d983 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3862,6 +3862,8 @@ Then create the index when finished appending. See `here <http://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index>`__ for how to create a completely-sorted-index (CSI) on an existing store. +.. _io.hdf5-query-data-columns: + Query via Data Columns ++++++++++++++++++++++ diff --git a/doc/source/release.rst b/doc/source/release.rst index 8e063116cbf07..da3362b47b29b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -71,7 +71,7 @@ Highlights include: - Temporarily restore matplotlib datetime plotting functionality. This should resolve issues for users who relied implicitly on pandas to plot datetimes - with matplotlib. See :ref:`here <whatsnew_0211.special>`. + with matplotlib. See :ref:`here <whatsnew_0211.converters>`. - Improvements to the Parquet IO functions introduced in 0.21.0. See :ref:`here <whatsnew_0211.enhancements.parquet>`. diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index 222a2da23865c..3fc05158b7fe7 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -409,7 +409,7 @@ N Dimensional Panels (Experimental) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Adding experimental support for Panel4D and factory functions to create n-dimensional named panels. -:ref:`Docs <dsintro.panel4d>` for NDim. Here is a taste of what to expect. +Here is a taste of what to expect. .. code-block:: ipython diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index 9e1dc391d7ace..5c716f6ad45c1 100644 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -26,7 +26,7 @@ Highlights include: .. warning:: - In pandas 0.17.0, the sub-package ``pandas.io.data`` will be removed in favor of a separately installable package. See :ref:`here for details <remote_data.pandas_datareader>` (:issue:`8961`) + In pandas 0.17.0, the sub-package ``pandas.io.data`` will be removed in favor of a separately installable package (:issue:`8961`). Enhancements ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a19b71c27d998..9a8659dfd8b06 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -331,7 +331,7 @@ Other Enhancements :func:`pandas.api.extensions.register_series_accessor`, and :func:`pandas.api.extensions.register_index_accessor`, accessor for libraries downstream of pandas to register custom accessors like ``.cat`` on pandas objects. See - :ref:`Registering Custom Accessors <developer.register-accessors>` for more (:issue:`14781`). + :ref:`Registering Custom Accessors <extending.register-accessors>` for more (:issue:`14781`). - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) - :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) diff --git a/doc/source/whatsnew/v0.6.1.txt b/doc/source/whatsnew/v0.6.1.txt index a2dab738546f9..acd5b0774f2bb 100644 --- a/doc/source/whatsnew/v0.6.1.txt +++ b/doc/source/whatsnew/v0.6.1.txt @@ -16,12 +16,12 @@ New features - Add PyQt table widget to sandbox (:issue:`435`) - DataFrame.align can :ref:`accept Series arguments <basics.align.frame.series>` and an :ref:`axis option <basics.df_join>` (:issue:`461`) -- Implement new :ref:`SparseArray <sparse.array>` and :ref:`SparseList <sparse.list>` +- Implement new :ref:`SparseArray <sparse.array>` and `SparseList` data structures. SparseSeries now derives from SparseArray (:issue:`463`) - :ref:`Better console printing options <basics.console_output>` (:issue:`453`) - Implement fast :ref:`data ranking <computation.ranking>` for Series and DataFrame, fast versions of scipy.stats.rankdata (:issue:`428`) -- Implement :ref:`DataFrame.from_items <basics.dataframe.from_items>` alternate +- Implement `DataFrame.from_items` alternate constructor (:issue:`444`) - DataFrame.convert_objects method for :ref:`inferring better dtypes <basics.cast>` for object columns (:issue:`302`) diff --git a/doc/source/whatsnew/v0.7.3.txt b/doc/source/whatsnew/v0.7.3.txt index 6b5199c55cbf5..77cc72d8707cf 100644 --- a/doc/source/whatsnew/v0.7.3.txt +++ b/doc/source/whatsnew/v0.7.3.txt @@ -22,7 +22,7 @@ New features from pandas.tools.plotting import scatter_matrix scatter_matrix(df, alpha=0.2) -.. image:: _static/scatter_matrix_kde.png +.. image:: savefig/scatter_matrix_kde.png :width: 5in - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for @@ -32,14 +32,14 @@ New features df.plot(kind='bar', stacked=True) -.. image:: _static/bar_plot_stacked_ex.png +.. image:: savefig/bar_plot_stacked_ex.png :width: 4in .. code-block:: python df.plot(kind='barh', stacked=True) -.. image:: _static/barh_plot_stacked_ex.png +.. image:: savefig/barh_plot_stacked_ex.png :width: 4in - Add log x and y :ref:`scaling options <visualization.basic>` to diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f1fa43818ce64..d5cd22732f0a9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1900,7 +1900,7 @@ def to_hdf(self, path_or_buf, key, **kwargs): In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. - For more information see the :ref:`user guide <io.html#io-hdf5>`. + For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- @@ -1929,8 +1929,7 @@ def to_hdf(self, path_or_buf, key, **kwargs): data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes - of the object are indexed. See `here - <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__. + of the object are indexed. See :ref:`io.hdf5-query-data-columns`. Applicable only to format='table'. complevel : {0-9}, optional Specifies a compression level for data. @@ -2141,7 +2140,7 @@ def to_pickle(self, path, compression='infer', .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, - default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible + default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative diff --git a/pandas/core/series.py b/pandas/core/series.py index 62f0ea3ce8b2a..d066e9409b594 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -467,8 +467,8 @@ def asobject(self): """Return object Series which contains boxed values. .. deprecated :: 0.23.0 - Use ``astype(object) instead. + Use ``astype(object) instead. *this is an internal non-public method* """ @@ -1772,18 +1772,20 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): return self.index[i] # ndarray compat - argmin = deprecate('argmin', idxmin, '0.21.0', - msg="'argmin' is deprecated, use 'idxmin' instead. " - "The behavior of 'argmin' will be corrected to " - "return the positional minimum in the future. " - "Use 'series.values.argmin' to get the position of " - "the minimum now.") - argmax = deprecate('argmax', idxmax, '0.21.0', - msg="'argmax' is deprecated, use 'idxmax' instead. " - "The behavior of 'argmax' will be corrected to " - "return the positional maximum in the future. " - "Use 'series.values.argmax' to get the position of " - "the maximum now.") + argmin = deprecate( + 'argmin', idxmin, '0.21.0', + msg=dedent("""\ + 'argmin' is deprecated, use 'idxmin' instead. The behavior of 'argmin' + will be corrected to return the positional minimum in the future. + Use 'series.values.argmin' to get the position of the minimum now.""") + ) + argmax = deprecate( + 'argmax', idxmax, '0.21.0', + msg=dedent("""\ + 'argmax' is deprecated, use 'idxmax' instead. The behavior of 'argmax' + will be corrected to return the positional maximum in the future. + Use 'series.values.argmax' to get the position of the maximum now.""") + ) def round(self, decimals=0, *args, **kwargs): """ diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 89e04f03cda32..6c3d07124215b 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -3100,7 +3100,7 @@ def hist(self, by=None, bins=10, **kwds): A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame - into bins, and draws all bins in only one :ref:`matplotlib.axes.Axes`. + into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 1753bc8b8fc33..624fbbbd4f05e 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -45,13 +45,18 @@ def wrapper(*args, **kwargs): return alternative(*args, **kwargs) # adding deprecated directive to the docstring - msg = msg or 'Use `{alt_name}` instead.' - docstring = '.. deprecated:: {}\n'.format(version) - docstring += dedent(' ' + ('\n'.join(wrap(msg, 70)))) - - if getattr(wrapper, '__doc__') is not None: - docstring += dedent(wrapper.__doc__) - + msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) + tpl = dedent(""" + .. deprecated:: {version} + + {msg} + + {rest} + """) + rest = getattr(wrapper, '__doc__', '') + docstring = tpl.format(version=version, + msg='\n '.join(wrap(msg, 70)), + rest=dedent(rest)) wrapper.__doc__ = docstring return wrapper
https://api.github.com/repos/pandas-dev/pandas/pulls/20509
2018-03-27T21:29:47Z
2018-03-29T02:31:38Z
2018-03-29T02:31:38Z
2018-03-29T02:31:41Z
Move GroupBy to Submodule and Add FutureWarning
diff --git a/pandas/core/api.py b/pandas/core/api.py index aa37ddffa1156..640baf31268a7 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -7,7 +7,7 @@ from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.dtypes.missing import isna, isnull, notna, notnull from pandas.core.arrays import Categorical -from pandas.core.groupby import Grouper +from pandas.core.groupby.groupby import Grouper from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 33617964d7e59..ae9d160db08e9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6589,7 +6589,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, resample : Convenience method for frequency conversion and resampling of time series. """ - from pandas.core.groupby import groupby + from pandas.core.groupby.groupby import groupby if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") diff --git a/pandas/core/groupby/__init__.py b/pandas/core/groupby/__init__.py new file mode 100644 index 0000000000000..4b2ebdf16b89b --- /dev/null +++ b/pandas/core/groupby/__init__.py @@ -0,0 +1,4 @@ +# flake8: noqa +from pandas.core.groupby.groupby import ( + Grouper, GroupBy, SeriesGroupBy, DataFrameGroupBy +) diff --git a/pandas/core/groupby.py b/pandas/core/groupby/groupby.py similarity index 100% rename from pandas/core/groupby.py rename to pandas/core/groupby/groupby.py diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 7c087ac7deafc..e08d0a7368ccb 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -912,7 +912,7 @@ def groupby(self, function, axis='major'): ------- grouped : PanelGroupBy """ - from pandas.core.groupby import PanelGroupBy + from pandas.core.groupby.groupby import PanelGroupBy axis = self._get_axis_number(axis) return PanelGroupBy(self, function, axis=axis) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index b3ab90fd67de4..0d0023b9f67d3 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -7,9 +7,10 @@ import pandas as pd from pandas.core.base import GroupByMixin -from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy, - SeriesGroupBy, groupby, PanelGroupBy, - _pipe_template) +from pandas.core.groupby.groupby import ( + BinGrouper, Grouper, _GroupBy, GroupBy, SeriesGroupBy, groupby, + PanelGroupBy, _pipe_template +) from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod from pandas.core.indexes.datetimes import DatetimeIndex, date_range diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index a4c9848dca900..74a9b59d3194a 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -6,7 +6,7 @@ from pandas.core.reshape.concat import concat from pandas.core.series import Series -from pandas.core.groupby import Grouper +from pandas.core.groupby.groupby import Grouper from pandas.core.reshape.util import cartesian_product from pandas.core.index import Index, _get_objs_combined_axis from pandas.compat import range, lrange, zip diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 7cc6c2fa7b88c..d85719d328ff2 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import concat, DataFrame, Index, MultiIndex, Series -from pandas.core.groupby import Grouping, SpecificationError +from pandas.core.groupby.groupby import Grouping, SpecificationError from pandas.compat import OrderedDict import pandas.util.testing as tm diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index cef3a699ed24b..80383c895a5e5 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -14,7 +14,7 @@ from pandas import (bdate_range, DataFrame, Index, Series, Timestamp, Timedelta, NaT) -from pandas.core.groupby import DataError +from pandas.core.groupby.groupby import DataError import pandas.util.testing as tm diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 4c407ad8a0d93..7c6cb5b9615cb 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -18,7 +18,7 @@ from pandas import ( date_range, DataFrame, Index, MultiIndex, PeriodIndex, period_range, Series ) -from pandas.core.groupby import SpecificationError +from pandas.core.groupby.groupby import SpecificationError from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 979b2f7a539af..c293f49c5bc2a 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -54,7 +54,7 @@ def setup_method(self, method): self.bins = np.array([3, 6], dtype=np.int64) def test_generate_bins(self): - from pandas.core.groupby import generate_bins_generic + from pandas.core.groupby.groupby import generate_bins_generic values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64) binner = np.array([0, 3, 6, 9], dtype=np.int64) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 8702062e9cd0a..57becd342d370 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -340,7 +340,7 @@ def test_groupby_grouper_f_sanity_checked(self): pytest.raises(AssertionError, ts.groupby, lambda key: key[0:6]) def test_grouping_error_on_multidim_input(self): - from pandas.core.groupby import Grouping + from pandas.core.groupby.groupby import Grouping pytest.raises(ValueError, Grouping, self.df.index, self.df[['A', 'A']]) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index d359bfa5351a9..17ca5d31b6b59 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -57,11 +57,12 @@ def test_groupby_with_timegrouper(self): result3 = df.groupby(pd.Grouper(freq='5D')).sum() assert_frame_equal(result3, expected) - def test_groupby_with_timegrouper_methods(self): + @pytest.mark.parametrize("should_sort", [True, False]) + def test_groupby_with_timegrouper_methods(self, should_sort): # GH 3881 # make sure API of timegrouper conforms - df_original = pd.DataFrame({ + df = pd.DataFrame({ 'Branch': 'A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(), 'Quantity': [1, 3, 5, 8, 9, 3], @@ -75,16 +76,18 @@ def test_groupby_with_timegrouper_methods(self): ] }) - df_sorted = df_original.sort_values(by='Quantity', ascending=False) + if should_sort: + df = df.sort_values(by='Quantity', ascending=False) - for df in [df_original, df_sorted]: - df = df.set_index('Date', drop=False) - g = df.groupby(pd.Grouper(freq='6M')) - assert g.group_keys - assert isinstance(g.grouper, pd.core.groupby.BinGrouper) - groups = g.groups - assert isinstance(groups, dict) - assert len(groups) == 3 + df = df.set_index('Date', drop=False) + g = df.groupby(pd.Grouper(freq='6M')) + assert g.group_keys + + import pandas.core.groupby.groupby + assert isinstance(g.grouper, pandas.core.groupby.groupby.BinGrouper) + groups = g.groups + assert isinstance(groups, dict) + assert len(groups) == 3 def test_timegrouper_with_reg_groups(self): diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index bce38b8cf9eed..23326d1b105fe 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -13,7 +13,7 @@ from .common import MixIn, assert_fp_equal from pandas.util.testing import assert_frame_equal, assert_series_equal -from pandas.core.groupby import DataError +from pandas.core.groupby.groupby import DataError from pandas.core.config import option_context diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 23cc18de34778..ef09b64d5b6eb 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -21,7 +21,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.compat import range, lrange, zip, product, OrderedDict from pandas.errors import UnsupportedFunctionCall -from pandas.core.groupby import DataError +from pandas.core.groupby.groupby import DataError import pandas.core.common as com from pandas.tseries.frequencies import to_offset
ref #20485 @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/20506
2018-03-27T17:04:15Z
2018-04-02T22:38:33Z
2018-04-02T22:38:32Z
2018-05-14T21:11:31Z
Option_context Dictionary argument
diff --git a/pandas/core/config.py b/pandas/core/config.py index 369e0568346ef..24a3c44ac019c 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -379,6 +379,8 @@ class option_context(object): Context manager to temporarily set options in the `with` statement context. You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. + You may also invoke using a dictionary of (pat, val) pairs: + ``option_context(options={pat:val})`` Examples -------- @@ -388,12 +390,14 @@ class option_context(object): """ - def __init__(self, *args): - if not (len(args) % 2 == 0 and len(args) >= 2): + def __init__(self, *args, options={}): + if not (len(args) % 2 == 0 and (len(args) >= 2 or len(options))): raise ValueError('Need to invoke as' 'option_context(pat, val, [(pat, val), ...)).') - self.ops = list(zip(args[::2], args[1::2])) + self.ops = list(zip(args[::2], args[1::2])) + [ + [key, val] for key, val in options.items() + ] def __enter__(self): undo = [] diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index 91ce65dcce9b2..87a1912da9015 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -370,6 +370,9 @@ def eq(val): eq(25) eq(15) eq(0) + with self.cf.option_context(options={"a": 15}): + eq(15) + eq(0) self.cf.set_option("a", 17) eq(17)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): Not a documentation sprint participant Please include the output of the validation script below between the "```" ticks: ``` root@dfb9f788ea02:/pandas# scripts/validate_docstrings.py pandas.option_context ################################################################################ ###################### Docstring (pandas.option_context) ###################### ################################################################################ Context manager to temporarily set options in the `with` statement context. You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. You may also invoke using a dictionary of (pat, val) pairs: ``option_context(options={pat:val})`` Examples -------- >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): ... Traceback (most recent call last): File "scripts/validate_docstrings.py", line 505, in <module> sys.exit(main(args.function)) File "scripts/validate_docstrings.py", line 491, in main return validate_one(function) File "scripts/validate_docstrings.py", line 468, in validate_one examples_errs = doc.examples_errors File "scripts/validate_docstrings.py", line 265, in examples_errors for test in finder.find(self.raw_doc, self.method_name, globs=context): File "/usr/lib/python3.5/doctest.py", line 924, in find self._find(tests, obj, name, module, source_lines, globs, {}) File "/usr/lib/python3.5/doctest.py", line 974, in _find test = self._get_test(obj, name, module, globs, source_lines) File "/usr/lib/python3.5/doctest.py", line 1058, in _get_test filename, lineno) File "/usr/lib/python3.5/doctest.py", line 660, in get_doctest return DocTest(self.get_examples(string, name), globs, File "/usr/lib/python3.5/doctest.py", line 674, in get_examples return [x for x in self.parse(string, name) File "/usr/lib/python3.5/doctest.py", line 636, in parse self._parse_example(m, name, lineno) File "/usr/lib/python3.5/doctest.py", line 695, in _parse_example self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) File "/usr/lib/python3.5/doctest.py", line 792, in _check_prefix (lineno+i+1, name, line)) ValueError: line 11 of the docstring for pandas.option_context has inconsistent leading whitespace: ' ...' ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Two reasons to ignore the error with the whitespace: 1) It was there in the original, unmodified docstring 2) It appears to be an indication of the indentation following entering a `with` context block Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] No related issue. Added to resolve a personal pet peeve (the original `option_context` api is extremely un-pythonic) - [x] tests added & passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry (Not sure what to add or to which specific whatsnew file)
https://api.github.com/repos/pandas-dev/pandas/pulls/20504
2018-03-27T16:34:52Z
2018-03-28T15:10:50Z
null
2018-03-28T15:10:50Z
ENH: Support ExtensionArray in Groupby
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 601acac20c96d..7c89cab6b1428 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -44,7 +44,7 @@ DataError, SpecificationError) from pandas.core.index import (Index, MultiIndex, CategoricalIndex, _ensure_index) -from pandas.core.arrays import Categorical +from pandas.core.arrays import ExtensionArray, Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.internals import BlockManager, make_block @@ -2968,7 +2968,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # no level passed elif not isinstance(self.grouper, - (Series, Index, Categorical, np.ndarray)): + (Series, Index, ExtensionArray, np.ndarray)): if getattr(self.grouper, 'ndim', 1) != 1: t = self.name or str(type(self.grouper)) raise ValueError("Grouper for '%s' not 1-dimensional" % t) diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 27c106efd0524..f8078d2798b32 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -44,6 +44,7 @@ class TestMyDtype(BaseDtypeTests): from .constructors import BaseConstructorsTests # noqa from .dtype import BaseDtypeTests # noqa from .getitem import BaseGetitemTests # noqa +from .groupby import BaseGroupbyTests # noqa from .interface import BaseInterfaceTests # noqa from .methods import BaseMethodsTests # noqa from .missing import BaseMissingTests # noqa diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py new file mode 100644 index 0000000000000..a29ef2a509a63 --- /dev/null +++ b/pandas/tests/extension/base/groupby.py @@ -0,0 +1,69 @@ +import pytest + +import pandas.util.testing as tm +import pandas as pd +from .base import BaseExtensionTests + + +class BaseGroupbyTests(BaseExtensionTests): + """Groupby-specific tests.""" + + def test_grouping_grouper(self, data_for_grouping): + df = pd.DataFrame({ + "A": ["B", "B", None, None, "A", "A", "B", "C"], + "B": data_for_grouping + }) + gr1 = df.groupby("A").grouper.groupings[0] + gr2 = df.groupby("B").grouper.groupings[0] + + tm.assert_numpy_array_equal(gr1.grouper, df.A.values) + tm.assert_extension_array_equal(gr2.grouper, data_for_grouping) + + @pytest.mark.parametrize('as_index', [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping}) + result = df.groupby("B", as_index=as_index).A.mean() + _, index = pd.factorize(data_for_grouping, sort=True) + # TODO(ExtensionIndex): remove astype + index = pd.Index(index.astype(object), name="B") + expected = pd.Series([3, 1, 4], index=index, name="A") + if as_index: + self.assert_series_equal(result, expected) + else: + expected = expected.reset_index() + self.assert_frame_equal(result, expected) + + def test_groupby_extension_no_sort(self, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping}) + result = df.groupby("B", sort=False).A.mean() + _, index = pd.factorize(data_for_grouping, sort=False) + # TODO(ExtensionIndex): remove astype + index = pd.Index(index.astype(object), name="B") + expected = pd.Series([1, 3, 4], index=index, name="A") + self.assert_series_equal(result, expected) + + def test_groupby_extension_transform(self, data_for_grouping): + valid = data_for_grouping[~data_for_grouping.isna()] + df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], + "B": valid}) + + result = df.groupby("B").A.transform(len) + expected = pd.Series([3, 3, 2, 2, 3, 1], name="A") + + self.assert_series_equal(result, expected) + + @pytest.mark.parametrize('op', [ + lambda x: 1, + lambda x: [1] * len(x), + lambda x: pd.Series([1] * len(x)), + lambda x: x, + ], ids=['scalar', 'list', 'series', 'object']) + def test_groupby_extension_apply(self, data_for_grouping, op): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping}) + df.groupby("B").apply(op) + df.groupby("B").A.apply(op) + df.groupby("A").apply(op) + df.groupby("A").B.apply(op) diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 22c1a67a0d60d..d509170565e1a 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -127,6 +127,10 @@ class TestCasting(BaseDecimal, base.BaseCastingTests): pass +class TestGroupby(BaseDecimal, base.BaseGroupbyTests): + pass + + def test_series_constructor_coerce_data_to_extension_dtype_raises(): xpr = ("Cannot cast data to extension dtype 'decimal'. Pass the " "extension array directly.") diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 51a68a3701046..d9ae49d87804a 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -113,8 +113,8 @@ def _concat_same_type(cls, to_concat): return cls(data) def _values_for_factorize(self): - frozen = tuple(tuple(x.items()) for x in self) - return np.array(frozen, dtype=object), () + frozen = self._values_for_argsort() + return frozen, () def _values_for_argsort(self): # Disable NumPy's shape inference by including an empty tuple... diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 63d97d5e7a2c5..5e9639c487c37 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -89,11 +89,12 @@ def test_fillna_frame(self): """We treat dictionaries as a mapping in fillna, not a scalar.""" -class TestMethods(base.BaseMethodsTests): - unhashable = pytest.mark.skip(reason="Unhashable") - unstable = pytest.mark.skipif(not PY36, # 3.6 or higher - reason="Dictionary order unstable") +unhashable = pytest.mark.skip(reason="Unhashable") +unstable = pytest.mark.skipif(not PY36, # 3.6 or higher + reason="Dictionary order unstable") + +class TestMethods(base.BaseMethodsTests): @unhashable def test_value_counts(self, all_data, dropna): pass @@ -118,6 +119,7 @@ def test_sort_values(self, data_for_sorting, ascending): super(TestMethods, self).test_sort_values( data_for_sorting, ascending) + @unstable @pytest.mark.parametrize('ascending', [True, False]) def test_sort_values_missing(self, data_missing_for_sorting, ascending): super(TestMethods, self).test_sort_values_missing( @@ -126,3 +128,34 @@ def test_sort_values_missing(self, data_missing_for_sorting, ascending): class TestCasting(base.BaseCastingTests): pass + + +class TestGroupby(base.BaseGroupbyTests): + + @unhashable + def test_groupby_extension_transform(self): + """ + This currently fails in Series.name.setter, since the + name must be hashable, but the value is a dictionary. + I think this is what we want, i.e. `.name` should be the original + values, and not the values for factorization. + """ + + @unhashable + def test_groupby_extension_apply(self): + """ + This fails in Index._do_unique_check with + + > hash(val) + E TypeError: unhashable type: 'UserDict' with + + I suspect that once we support Index[ExtensionArray], + we'll be able to dispatch unique. + """ + + @unstable + @pytest.mark.parametrize('as_index', [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_agg( + as_index, data_for_grouping + )
```python In [1]: import pandas as pd In [2]: from cyberpandas import IPArray In [3]: df = pd.DataFrame({"A": IPArray([0, 0, 1, 2, 2]), "B": [1, 5, 1, 1, 3]}) In [4]: df Out[4]: A B 0 0.0.0.0 1 1 0.0.0.0 5 2 0.0.0.1 1 3 0.0.0.2 1 4 0.0.0.2 3 In [5]: df.groupby("A").B.mean() Out[5]: A 0.0.0.1 1 0.0.0.2 2 Name: B, dtype: int64 ``` Note that right now `Out[5].index` just just an `Index` with object dtype. In the future, we could tie an Index type to an ExtensionArray type, and ensure that the extension type propagates through.
https://api.github.com/repos/pandas-dev/pandas/pulls/20502
2018-03-27T15:54:39Z
2018-03-28T10:35:45Z
2018-03-28T10:35:45Z
2018-05-02T13:10:02Z
Add interpolation options to rolling quantile
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index ba25ad6c5eda6..e3bf551fa5f2b 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -22,6 +22,7 @@ def setup(self, constructor, window, dtype, method): def time_rolling(self, constructor, window, dtype, method): getattr(self.roll, method)() + class VariableWindowMethods(Methods): sample_time = 0.2 params = (['DataFrame', 'Series'], @@ -37,6 +38,7 @@ def setup(self, constructor, window, dtype, method): index = pd.date_range('2017-01-01', periods=N, freq='5s') self.roll = getattr(pd, constructor)(arr, index=index).rolling(window) + class Pairwise(object): sample_time = 0.2 @@ -59,18 +61,19 @@ def time_pairwise(self, window, method, pairwise): class Quantile(object): - sample_time = 0.2 params = (['DataFrame', 'Series'], [10, 1000], ['int', 'float'], - [0, 0.5, 1]) + [0, 0.5, 1], + ['linear', 'nearest', 'lower', 'higher', 'midpoint']) param_names = ['constructor', 'window', 'dtype', 'percentile'] - def setup(self, constructor, window, dtype, percentile): - N = 10**5 + def setup(self, constructor, window, dtype, percentile, interpolation): + N = 10 ** 5 arr = np.random.random(N).astype(dtype) self.roll = getattr(pd, constructor)(arr).rolling(window) - def time_quantile(self, constructor, window, dtype, percentile): - self.roll.quantile(percentile) + def time_quantile(self, constructor, window, dtype, percentile, + interpolation): + self.roll.quantile(percentile, interpolation=interpolation) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 9d9ce0b49f760..69d41f29506d1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -443,6 +443,7 @@ Other Enhancements - :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row. ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) +- :meth:`Rolling.quantile` and :meth:`Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) - :class:`WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). - :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index e524f823605a4..6b1239e198e26 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1357,25 +1357,50 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, return output +cdef enum InterpolationType: + LINEAR, + LOWER, + HIGHER, + NEAREST, + MIDPOINT + + +interpolation_types = { + 'linear': LINEAR, + 'lower': LOWER, + 'higher': HIGHER, + 'nearest': NEAREST, + 'midpoint': MIDPOINT, +} + + def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, int64_t minp, object index, object closed, - double quantile): + double quantile, str interpolation): """ O(N log(window)) implementation using skip list """ cdef: - double val, prev, midpoint - IndexableSkiplist skiplist + double val, prev, midpoint, idx_with_fraction + skiplist_t *skiplist int64_t nobs = 0, i, j, s, e, N Py_ssize_t idx bint is_variable ndarray[int64_t] start, end ndarray[double_t] output double vlow, vhigh + InterpolationType interpolation_type + int ret = 0 if quantile <= 0.0 or quantile >= 1.0: raise ValueError("quantile value {0} not in [0, 1]".format(quantile)) + try: + interpolation_type = interpolation_types[interpolation] + except KeyError: + raise ValueError("Interpolation '{}' is not supported" + .format(interpolation)) + # we use the Fixed/Variable Indexer here as the # actual skiplist ops outweigh any window computation costs start, end, N, win, minp, is_variable = get_window_indexer( @@ -1383,51 +1408,78 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, minp, index, closed, use_mock=False) output = np.empty(N, dtype=float) - skiplist = IndexableSkiplist(win) - - for i in range(0, N): - s = start[i] - e = end[i] - - if i == 0: - - # setup - val = input[i] - if val == val: - nobs += 1 - skiplist.insert(val) + skiplist = skiplist_init(<int>win) + if skiplist == NULL: + raise MemoryError("skiplist_init failed") - else: + with nogil: + for i in range(0, N): + s = start[i] + e = end[i] - # calculate deletes - for j in range(start[i - 1], s): - val = input[j] - if val == val: - skiplist.remove(val) - nobs -= 1 + if i == 0: - # calculate adds - for j in range(end[i - 1], e): - val = input[j] + # setup + val = input[i] if val == val: nobs += 1 - skiplist.insert(val) + skiplist_insert(skiplist, val) - if nobs >= minp: - idx = int(quantile * <double>(nobs - 1)) + else: - # Single value in skip list - if nobs == 1: - output[i] = skiplist.get(0) + # calculate deletes + for j in range(start[i - 1], s): + val = input[j] + if val == val: + skiplist_remove(skiplist, val) + nobs -= 1 - # Interpolated quantile + # calculate adds + for j in range(end[i - 1], e): + val = input[j] + if val == val: + nobs += 1 + skiplist_insert(skiplist, val) + + if nobs >= minp: + if nobs == 1: + # Single value in skip list + output[i] = skiplist_get(skiplist, 0, &ret) + else: + idx_with_fraction = quantile * (nobs - 1) + idx = <int> idx_with_fraction + + if idx_with_fraction == idx: + # no need to interpolate + output[i] = skiplist_get(skiplist, idx, &ret) + continue + + if interpolation_type == LINEAR: + vlow = skiplist_get(skiplist, idx, &ret) + vhigh = skiplist_get(skiplist, idx + 1, &ret) + output[i] = ((vlow + (vhigh - vlow) * + (idx_with_fraction - idx))) + elif interpolation_type == LOWER: + output[i] = skiplist_get(skiplist, idx, &ret) + elif interpolation_type == HIGHER: + output[i] = skiplist_get(skiplist, idx + 1, &ret) + elif interpolation_type == NEAREST: + # the same behaviour as round() + if idx_with_fraction - idx == 0.5: + if idx % 2 == 0: + output[i] = skiplist_get(skiplist, idx, &ret) + else: + output[i] = skiplist_get(skiplist, idx + 1, &ret) + elif idx_with_fraction - idx < 0.5: + output[i] = skiplist_get(skiplist, idx, &ret) + else: + output[i] = skiplist_get(skiplist, idx + 1, &ret) + elif interpolation_type == MIDPOINT: + vlow = skiplist_get(skiplist, idx, &ret) + vhigh = skiplist_get(skiplist, idx + 1, &ret) + output[i] = <double> (vlow + vhigh) / 2 else: - vlow = skiplist.get(idx) - vhigh = skiplist.get(idx + 1) - output[i] = ((vlow + (vhigh - vlow) * - (quantile * (nobs - 1) - idx))) - else: - output[i] = NaN + output[i] = NaN return output diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 35bfd12466429..de6985ef3b4ea 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7079,6 +7079,10 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, a b 0.1 1.3 3.7 0.5 2.5 55.0 + + See Also + -------- + pandas.core.window.Rolling.quantile """ self._check_percentile(q) diff --git a/pandas/core/series.py b/pandas/core/series.py index 13e94f971d003..aa4cb510feb62 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1855,6 +1855,9 @@ def quantile(self, q=0.5, interpolation='linear'): 0.75 3.25 dtype: float64 + See Also + -------- + pandas.core.window.Rolling.quantile """ self._check_percentile(q) diff --git a/pandas/core/window.py b/pandas/core/window.py index f8b5aa292f309..96630258c3e50 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -1276,9 +1276,53 @@ def kurt(self, **kwargs): Parameters ---------- quantile : float - 0 <= quantile <= 1""") + 0 <= quantile <= 1 + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + .. versionadded:: 0.23.0 + + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + + Returns + ------- + Series or DataFrame + Returned object type is determined by the caller of the %(name)s + calculation. + + Examples + -------- + >>> s = Series([1, 2, 3, 4]) + >>> s.rolling(2).quantile(.4, interpolation='lower') + 0 NaN + 1 1.0 + 2 2.0 + 3 3.0 + dtype: float64 + + >>> s.rolling(2).quantile(.4, interpolation='midpoint') + 0 NaN + 1 1.5 + 2 2.5 + 3 3.5 + dtype: float64 + + See Also + -------- + pandas.Series.quantile : Computes value at the given quantile over all data + in Series. + pandas.DataFrame.quantile : Computes values at the given quantile over + requested axis in DataFrame. + + """) - def quantile(self, quantile, **kwargs): + def quantile(self, quantile, interpolation='linear', **kwargs): window = self._get_window() index, indexi = self._get_index() @@ -1292,7 +1336,8 @@ def f(arg, *args, **kwargs): self.closed) else: return _window.roll_quantile(arg, window, minp, indexi, - self.closed, quantile) + self.closed, quantile, + interpolation) return self._apply(f, 'quantile', quantile=quantile, **kwargs) @@ -1613,8 +1658,10 @@ def kurt(self, **kwargs): @Substitution(name='rolling') @Appender(_doc_template) @Appender(_shared_docs['quantile']) - def quantile(self, quantile, **kwargs): - return super(Rolling, self).quantile(quantile=quantile, **kwargs) + def quantile(self, quantile, interpolation='linear', **kwargs): + return super(Rolling, self).quantile(quantile=quantile, + interpolation=interpolation, + **kwargs) @Substitution(name='rolling') @Appender(_doc_template) @@ -1872,8 +1919,10 @@ def kurt(self, **kwargs): @Substitution(name='expanding') @Appender(_doc_template) @Appender(_shared_docs['quantile']) - def quantile(self, quantile, **kwargs): - return super(Expanding, self).quantile(quantile=quantile, **kwargs) + def quantile(self, quantile, interpolation='linear', **kwargs): + return super(Expanding, self).quantile(quantile=quantile, + interpolation=interpolation, + **kwargs) @Substitution(name='expanding') @Appender(_doc_template) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 605230390ff1d..304e3d02466a5 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -6,6 +6,7 @@ from datetime import datetime, timedelta from numpy.random import randn import numpy as np +from pandas import _np_version_under1p12 import pandas as pd from pandas import (Series, DataFrame, bdate_range, @@ -1166,15 +1167,40 @@ def test_rolling_quantile_np_percentile(self): tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) - def test_rolling_quantile_series(self): - # #16211: Tests that rolling window's quantile default behavior - # is analogus to Series' quantile - arr = np.arange(100) - s = Series(arr) - q1 = s.quantile(0.1) - q2 = s.rolling(100).quantile(0.1).iloc[-1] + @pytest.mark.skipif(_np_version_under1p12, + reason='numpy midpoint interpolation is broken') + @pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1]) + @pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher', + 'nearest', 'midpoint']) + @pytest.mark.parametrize('data', [[1., 2., 3., 4., 5., 6., 7.], + [8., 1., 3., 4., 5., 2., 6., 7.], + [0., np.nan, 0.2, np.nan, 0.4], + [np.nan, np.nan, np.nan, np.nan], + [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5], + [0.5], [np.nan, 0.7, 0.6]]) + def test_rolling_quantile_interpolation_options(self, quantile, + interpolation, data): + # Tests that rolling window's quantile behavior is analogous to + # Series' quantile for each interpolation option + s = Series(data) + + q1 = s.quantile(quantile, interpolation) + q2 = s.expanding(min_periods=1).quantile( + quantile, interpolation).iloc[-1] + + if np.isnan(q1): + assert np.isnan(q2) + else: + assert q1 == q2 + + def test_invalid_quantile_value(self): + data = np.arange(5) + s = Series(data) - tm.assert_almost_equal(q1, q2) + with pytest.raises(ValueError, match="Interpolation 'invalid'" + " is not supported"): + s.rolling(len(data), min_periods=1).quantile( + 0.5, interpolation='invalid') def test_rolling_quantile_param(self): ser = Series([0.0, .1, .5, .9, 1.0])
It version 0.21.0 rolling quantile started to use linear interpolation, it broke backward compatibility. Regular (not rolling) quantile supports these interpolation options: `linear`, `lower`, `higher`, `nearest` and `midpoint`. This commit adds the same options to moving quantile. Performance issues of this commit (note: I re-run benchmarks, see message below) This code has 15% worse performance on benchmarks with small values of window (`window=10`). This is because loop inside `roll_quantile` now contains switch. I tried to replace switch with callback but it led to even worse performance. Even if I move some of the code to new function (without any change in logic) it still makes performance much worse. How bad is it? Could you please give me an advice on how to arrange the code such that it has the same performance? - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20497
2018-03-27T09:34:52Z
2018-04-24T11:29:35Z
2018-04-24T11:29:34Z
2018-04-24T11:30:07Z
DOC: update the pandas.Series.str.endswith docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index b98fa106336fc..1703de5714bdf 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -348,19 +348,54 @@ def str_startswith(arr, pat, na=np.nan): def str_endswith(arr, pat, na=np.nan): """ - Return boolean Series indicating whether each string in the - Series/Index ends with passed pattern. Equivalent to - :meth:`str.endswith`. + Test if the end of each string element matches a pattern. + + Equivalent to :meth:`str.endswith`. Parameters ---------- - pat : string - Character sequence - na : bool, default NaN + pat : str + Character sequence. Regular expressions are not accepted. + na : object, default NaN + Object shown if element tested is not a string. Returns ------- - endswith : Series/array of boolean values + Series or Index of bool + A Series of booleans indicating whether the given pattern matches + the end of each string element. + + See Also + -------- + str.endswith : Python standard library string method. + Series.str.startswith : Same as endswith, but tests the start of string. + Series.str.contains : Tests if string element contains a pattern. + + Examples + -------- + >>> s = pd.Series(['bat', 'bear', 'caT', np.nan]) + >>> s + 0 bat + 1 bear + 2 caT + 3 NaN + dtype: object + + >>> s.str.endswith('t') + 0 True + 1 False + 2 False + 3 NaN + dtype: object + + Specifying `na` to be `False` instead of `NaN`. + + >>> s.str.endswith('t', na=False) + 0 True + 1 False + 2 False + 3 False + dtype: bool """ f = lambda x: x.endswith(pat) return _na_map(f, arr, na, dtype=bool)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ X] PR title is "DOC: update the <your-function-or-method> docstring" - [X ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ X ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ #################### Docstring (pandas.Series.str.endswith) #################### ################################################################################ Test if the end of each string element matches a pattern. Equivalent to :meth:`str.endswith`. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. na : object, default NaN Object shown if element tested is not a string. Returns ------- Series or Index of bool A Series of booleans indicating whether the given pattern matches the end of each string element. See Also -------- str.endswith : Python standard library string method. Series.str.startswith : Same as endswith, but tests the start of string. Series.str.contains : Tests if string element contains a pattern. Examples -------- >>> s = pd.Series(['bat', 'bear', 'caT', np.nan]) >>> s 0 bat 1 bear 2 caT 3 NaN dtype: object >>> s.str.endswith('t') 0 True 1 False 2 False 3 NaN dtype: object Specifying `na` to be `False` instead of `NaN`. >>> s.str.endswith('t', na=False) 0 True 1 False 2 False 3 False dtype: bool ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.str.endswith" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20491
2018-03-26T14:20:57Z
2018-03-28T16:15:13Z
2018-03-28T16:15:13Z
2018-03-28T16:15:13Z
CLN: remove deprecated infer_dst keyword
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index e1a6bc7a68e9d..eada401d2930b 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -75,8 +75,7 @@ def setup(self): freq='S')) def time_infer_dst(self): - with warnings.catch_warnings(record=True): - self.index.tz_localize('US/Eastern', infer_dst=True) + self.index.tz_localize('US/Eastern', ambiguous='infer') class ResetIndex(object): diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 466c48b780861..86cff4a358975 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -2191,10 +2191,9 @@ Ambiguous Times when Localizing In some cases, localize cannot determine the DST and non-DST hours when there are duplicates. This often happens when reading files or database records that simply -duplicate the hours. Passing ``ambiguous='infer'`` (``infer_dst`` argument in prior -releases) into ``tz_localize`` will attempt to determine the right offset. Below -the top example will fail as it contains ambiguous times and the bottom will -infer the right offset. +duplicate the hours. Passing ``ambiguous='infer'`` into ``tz_localize`` will +attempt to determine the right offset. Below the top example will fail as it +contains ambiguous times and the bottom will infer the right offset. .. ipython:: python diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 39bfc8c633dbb..ced3cdd7a81f4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -777,6 +777,10 @@ Removal of prior version deprecations/changes - The top-level functions ``pd.rolling_*``, ``pd.expanding_*`` and ``pd.ewm*`` have been removed (Deprecated since v0.18). Instead, use the DataFrame/Series methods :attr:`~DataFrame.rolling`, :attr:`~DataFrame.expanding` and :attr:`~DataFrame.ewm` (:issue:`18723`) - Imports from ``pandas.core.common`` for functions such as ``is_datetime64_dtype`` are now removed. These are located in ``pandas.api.types``. (:issue:`13634`, :issue:`19769`) +- The ``infer_dst`` keyword in :meth:`Series.tz_localize`, :meth:`DatetimeIndex.tz_localize` + and :class:`DatetimeIndex` have been removed. ``infer_dst=True`` is equivalent to + ``ambiguous='infer'``, and ``infer_dst=False`` to ``ambiguous='raise'`` (:issue:`7963`). + .. _whatsnew_0230.performance: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fc6eda0290c28..6810aff56806f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7937,9 +7937,6 @@ def _tz_convert(ax, tz): result.set_axis(ax, axis=axis, inplace=True) return result.__finalize__(self) - @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous', - mapping={True: 'infer', - False: 'raise'}) def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise'): """ @@ -7963,9 +7960,6 @@ def tz_localize(self, tz, axis=0, level=None, copy=True, - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times - infer_dst : boolean, default False - .. deprecated:: 0.15.0 - Attempt to infer fall dst-transition hours based on order Returns ------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e8bc9a2519333..75f4ec4f0d341 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -208,9 +208,6 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times - infer_dst : boolean, default False - .. deprecated:: 0.15.0 - Attempt to infer fall dst-transition hours based on order name : object Name to be stored in the index dayfirst : bool, default False @@ -329,8 +326,6 @@ def _add_comparison_methods(cls): _is_numeric_dtype = False _infer_as_myclass = True - @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous', - mapping={True: 'infer', False: 'raise'}) def __new__(cls, data=None, freq=None, start=None, end=None, periods=None, tz=None, normalize=False, closed=None, ambiguous='raise', @@ -2270,8 +2265,6 @@ def tz_convert(self, tz): # No conversion since timestamps are all UTC to begin with return self._shallow_copy(tz=tz) - @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous', - mapping={True: 'infer', False: 'raise'}) def tz_localize(self, tz, ambiguous='raise', errors='raise'): """ Localize tz-naive DatetimeIndex to tz-aware DatetimeIndex. @@ -2306,10 +2299,6 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'): .. versionadded:: 0.19.0 - infer_dst : boolean, default False - .. deprecated:: 0.15.0 - Attempt to infer fall dst-transition hours based on order - Returns ------- DatetimeIndex diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 705dc36d92522..4a224d4e6ee7f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1095,7 +1095,7 @@ def tz_convert(self, tz): """ raise NotImplementedError("Not yet implemented for PeriodIndex") - def tz_localize(self, tz, infer_dst=False): + def tz_localize(self, tz, ambiguous='raise'): """ Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil), or remove timezone from tz-aware DatetimeIndex @@ -1106,8 +1106,6 @@ def tz_localize(self, tz, infer_dst=False): Time zone for time. Corresponding timestamps would be converted to time zone of the TimeSeries. None will remove timezone holding local time. - infer_dst : boolean, default False - Attempt to infer fall dst-transition hours based on order Returns ------- diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 2913812db0dd4..a8191816238b1 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -341,9 +341,6 @@ def test_dti_tz_localize_ambiguous_infer(self, tz): di = DatetimeIndex(times) localized = di.tz_localize(tz, ambiguous='infer') tm.assert_index_equal(dr, localized) - with tm.assert_produces_warning(FutureWarning): - localized_old = di.tz_localize(tz, infer_dst=True) - tm.assert_index_equal(dr, localized_old) tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, ambiguous='infer')) @@ -353,9 +350,6 @@ def test_dti_tz_localize_ambiguous_infer(self, tz): localized = dr.tz_localize(tz) localized_infer = dr.tz_localize(tz, ambiguous='infer') tm.assert_index_equal(localized, localized_infer) - with tm.assert_produces_warning(FutureWarning): - localized_infer_old = dr.tz_localize(tz, infer_dst=True) - tm.assert_index_equal(localized, localized_infer_old) @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), gettz('US/Eastern')]) @@ -525,7 +519,7 @@ def test_dti_tz_localize_ambiguous_flags(self, tz): localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) tm.assert_index_equal(dr, localized) - # Test duplicate times where infer_dst fails + # Test duplicate times where inferring the dst fails times += times di = DatetimeIndex(times)
This has come up a few times during the docstring improvements: we still had the `infer_dst` deprecations left over (deprecated in 0.15)
https://api.github.com/repos/pandas-dev/pandas/pulls/20490
2018-03-26T13:12:29Z
2018-03-27T10:22:33Z
2018-03-27T10:22:33Z
2018-03-27T11:21:12Z
CI: Fixed deprecationWarning
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 7d959ea4fcd84..b6303ededd0dc 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -134,7 +134,7 @@ def test_series_constructor_with_same_dtype_ok(): def test_series_constructor_coerce_extension_array_to_dtype_raises(): arr = DecimalArray([decimal.Decimal('10.0')]) - xpr = "Cannot specify a dtype 'int64' .* \('decimal'\)." + xpr = r"Cannot specify a dtype 'int64' .* \('decimal'\)." with tm.assert_raises_regex(ValueError, xpr): pd.Series(arr, dtype='int64')
Closes #20479 Hopefully. I can't reproduce locally.
https://api.github.com/repos/pandas-dev/pandas/pulls/20489
2018-03-26T12:19:50Z
2018-03-26T14:29:06Z
2018-03-26T14:29:05Z
2018-03-26T14:29:17Z
BUG: raise error when setting cached properties
diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx index e3f16f224db1c..0f2900619fdb6 100644 --- a/pandas/_libs/properties.pyx +++ b/pandas/_libs/properties.pyx @@ -37,6 +37,9 @@ cdef class CachedProperty(object): PyDict_SetItem(cache, self.name, val) return val + def __set__(self, obj, value): + raise AttributeError("Can't set attribute") + cache_readonly = CachedProperty diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 22ef2fe7aa19e..ff9c86fbfe384 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2056,6 +2056,11 @@ def test_iadd_preserves_name(self): ser.index -= 1 assert ser.index.name == "foo" + def test_cached_properties_not_settable(self): + idx = pd.Index([1, 2, 3]) + with tm.assert_raises_regex(AttributeError, "Can't set attribute"): + idx.is_unique = False + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ
A bug I introduced myself some time ago in https://github.com/pandas-dev/pandas/pull/19991. Apparently the object needs to explicitly raise in the `__set__` method, otherwise the property is assumed to be settable: ``` In [6]: idx = pd.Index([1, 2, 3]) In [7]: idx.is_unique Out[7]: True In [8]: idx.is_unique = False In [9]: idx.is_unique Out[9]: False In [10]: idx.is_monotonic Out[10]: True In [11]: idx.is_monotonic = False --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-11-b2a74ac7466c> in <module>() ----> 1 idx.is_monotonic = False AttributeError: can't set attribute ``` `is_unique` is a cached property, didn't raise anymore on master (see above), in contrast to normal property `is_monotonic`. This patch ensures also cached properties have the same error.
https://api.github.com/repos/pandas-dev/pandas/pulls/20487
2018-03-26T09:49:38Z
2018-03-26T12:48:49Z
2018-03-26T12:48:49Z
2018-03-26T12:48:53Z
POC for New GroupBy Dispatching Module
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 601acac20c96d..96816824d724f 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -53,6 +53,7 @@ from pandas.core.sorting import (get_group_index_sorter, get_group_index, compress_group_index, get_flattened_iterator, decons_obs_group_ids, get_indexer_dict) +from pandas.core.util._dispatching import CythonDispatcher from pandas.util._decorators import (cache_readonly, Substitution, Appender, make_signature) from pandas.io.formats.printing import pprint_thing @@ -593,6 +594,8 @@ def __init__(self, obj, keys=None, axis=0, level=None, # we accept no other args validate_kwargs('group', kwargs, {}) + self._dispatcher = CythonDispatcher(self) # circle-ref + def __len__(self): return len(self.groups) @@ -1202,29 +1205,6 @@ class GroupBy(_GroupBy): """ _apply_whitelist = _common_apply_whitelist - def _bool_agg(self, val_test, skipna): - """Shared func to call any / all Cython GroupBy implementations""" - - def objs_to_bool(vals): - try: - vals = vals.astype(np.bool) - except ValueError: # for objects - vals = np.array([bool(x) for x in vals]) - - return vals.view(np.uint8) - - def result_to_bool(result): - return result.astype(np.bool, copy=False) - - return self._get_cythonized_result('group_any_all', self.grouper, - aggregate=True, - cython_dtype=np.uint8, - needs_values=True, - needs_mask=True, - pre_processing=objs_to_bool, - post_processing=result_to_bool, - val_test=val_test, skipna=skipna) - @Substitution(name='groupby') @Appender(_doc_template) def any(self, skipna=True): @@ -1236,7 +1216,7 @@ def any(self, skipna=True): skipna : bool, default True Flag to ignore nan values during truth testing """ - return self._bool_agg('any', skipna) + return self._dispatcher.dispatch('any', skipna=skipna) @Substitution(name='groupby') @Appender(_doc_template) @@ -1248,7 +1228,7 @@ def all(self, skipna=True): skipna : bool, default True Flag to ignore nan values during truth testing """ - return self._bool_agg('all', skipna) + return self._dispatcher.dispatch('all', skipna=skipna) @Substitution(name='groupby') @Appender(_doc_template) diff --git a/pandas/core/util/_dispatching.py b/pandas/core/util/_dispatching.py new file mode 100644 index 0000000000000..21bdaccfc6245 --- /dev/null +++ b/pandas/core/util/_dispatching.py @@ -0,0 +1,186 @@ +import collections +import inspect +from functools import partial + +import numpy as np + +from pandas._libs import groupby as libgroupby +from pandas.core.dtypes.missing import isnull + + +class CythonDispatcher(object): + + def __init__(self, groupby): + self.groupby = groupby + self.func_nm = None + self.obj = None + + @property + def func_metadata(self): + """ + Stores the metadata required to dispatch each function. + + The format of the dict is as follows: + + attr_name : { + 'application': {'aggregate', 'transform'} + 'cython_nm': ... # Name of the Cython function to call + 'extra_kwargs': {...} # Extra kwargs to pass to Cython + 'type_blacklist': [...] # Dtypes for which func should raise + 'result_type': ... # dtype of result from Cython + 'conversion_in': ... # dtype or callable for conversion pre-Cython + 'needs_values': ... # Whether the obj values should pass to Cython + 'needs_mask': ... # Whether a mask of NA values should be passed + 'conversion_out': ... # dtype or callable for conv post-Cython + } + """ + return { + 'any': { + 'application': 'aggregate', + 'cython_nm' : 'group_any_all', + 'extra_kwargs': {'val_test': 'any'}, + 'type_blacklist': [], + 'result_type': np.uint8, + 'conversion_in': self._any_all_convertor, + 'needs_values': True, + 'needs_mask': True, + 'conversion_out': np.bool + }, + 'all': { + 'application': 'aggregate', + 'cython_nm' : 'group_any_all', + 'extra_kwargs': {'val_test': 'all'}, + 'type_blacklist': [], + 'result_type': np.uint8, + 'conversion_in': self._any_all_convertor, + 'needs_values': True, + 'needs_mask': True, + 'conversion_out': np.bool + } + } + + @property + def application_type(self): + return self.func_metadata[self.func_nm]['application'] + + def _any_all_convertor(self, vals): + """ + Converts objects to appropriate type for any/all calculations. + """ + try: + vals = vals.astype(np.bool) + except ValueError: # for objects + vals = np.array([bool(x) for x in vals]) + + return vals.view(np.uint8) + + def _validate_types(self): + """ + Validate that the types of the `grp_by` object. + + Raises + ------ + ``TypeError`` if the `grp_by` dtypes are not valid for `func_nm`. + """ + if self.obj.values.dtype in self.func_metadata[ + self.func_nm]['type_blacklist']: + raise TypeError("'{}' cannot be applied to a dtype of {}".format( + self.func_nm, self.obj.values.dtype)) + + def _get_result(self, **kwargs): + """ + Fetch the result from the Cython layer. + + Parameters + ---------- + kwargs + Extra arguments to bind to the `func_nm` Cython signature. + + Resolve function name in case of templating use. + """ + # Since this func is called in a loop, the below might be better + # served outside of the loop and passed in? + labels, _, ngroups = self.groupby.grouper.group_info + + if self.application_type == 'aggregate': + res_sz = ngroups + elif self.application_type == 'transform': + res_sz = len(labels) + + res_type = self.func_metadata[self.func_nm].get('result_type', + self.obj.values.dtype) + + result = np.zeros(res_sz, dtype=res_type) + base_func = getattr(libgroupby, + self.func_metadata[self.func_nm]['cython_nm']) + func = partial(base_func, result, labels) + + if self.func_metadata[self.func_nm].get('needs_values'): + conv_in = self.func_metadata[self.func_nm].get('conversion_in') + vals = self.obj.values + # Below conditional needs refactoring but essentially want + # to differentiate callables from dtypes + if callable(conv_in) and not inspect.isclass(conv_in): + vals = conv_in(self.obj.values) + elif conv_in: # is a type to convert to + vals = self.obj.values.astype(conv, copy=False) + func = partial(func, vals) + + if self.func_metadata[self.func_nm].get('needs_values'): + mask = isnull(self.obj.values).view(np.uint8) + func = partial(func, mask) + + # Not backwards compatible (py>=3.5 only) + cy_kwargs = {**kwargs, **self.func_metadata[self.func_nm].get( + 'extra_kwargs', {})} + func(**cy_kwargs) + + conv_out = self.func_metadata[self.func_nm].get('conversion_out') + # Just like before, this needs refactoring + if callable(conv_out) and not inspect.isclass(conv_out): + result = conv_out(result) + elif conv_out: + result = result.astype(conv_out, copy=False) + + return result + + def _wrap_output(self, output): + """ + Bind and apply the appropriate wrap func from `self.groupby`. + """ + if self.application_type == 'aggregate': + return getattr(self.groupby, '_wrap_aggregated_output')(output) + elif self.application_type == 'transform': + return getattr(self.groupby, '_wrap_transformed_output')(output) + + raise ValueError("Unknown application type for {}".format( + self.func_nm)) + + def dispatch(self, func_nm, **kwargs): + """ + Dispatch the `func_nm` appropriately to the Cython layer. + + Will resolve any type and conversion dependencies, as well as apply + any post-Cython conversions required for the given `func_nm`. + + Parameters + ---------- + func_nm : str + Conceptual name of the function to be applied. + kwargs + Extra arguments to bind to the `func_nm` Cython signature. + + Returns + ------- + ndarray + Result of Cython operation with appropriate conversions applied. + """ + self.func_nm = func_nm + + output = collections.OrderedDict() + for name, obj in self.groupby._iterate_slices(): + self.obj = obj + self._validate_types() + output[name] = self._get_result(**kwargs) + + return self._wrap_output(output)
This is nowhere near completion but looking for feedback on the direction. @jreback - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20485
2018-03-26T05:23:26Z
2018-07-08T16:06:34Z
null
2018-12-25T06:12:48Z
TST: test_nanops some parametrize & catch warnings (RuntimeWarning: All-Nan slice in tests)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7b68ad67675ff..601acac20c96d 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -3860,7 +3860,7 @@ def count(self): mask = (ids != -1) & ~isna(val) ids = _ensure_platform_int(ids) - out = np.bincount(ids[mask], minlength=ngroups or None) + out = np.bincount(ids[mask], minlength=ngroups or 0) return Series(out, index=self.grouper.result_index, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 40f543e211f0c..12bb09e8f8a8a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -99,9 +99,14 @@ def cmp_method(self, other): # don't pass MultiIndex with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) + else: - with np.errstate(all='ignore'): - result = op(self.values, np.asarray(other)) + + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(record=True): + with np.errstate(all='ignore'): + result = op(self.values, np.asarray(other)) # technically we could support bool dtyped Index # for now just return the indexing array directly diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 59a30fc69905f..8efa140237614 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -529,7 +529,8 @@ def wrapper(x): self._check_stat_op('median', wrapper, check_dates=True) def test_min(self): - self._check_stat_op('min', np.min, check_dates=True) + with warnings.catch_warnings(record=True): + self._check_stat_op('min', np.min, check_dates=True) self._check_stat_op('min', np.min, frame=self.intframe) def test_cummin(self): @@ -579,7 +580,8 @@ def test_cummax(self): assert np.shape(cummax_xs) == np.shape(self.tsframe) def test_max(self): - self._check_stat_op('max', np.max, check_dates=True) + with warnings.catch_warnings(record=True): + self._check_stat_op('max', np.max, check_dates=True) self._check_stat_op('max', np.max, frame=self.intframe) def test_mad(self): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index dffb303af6ae1..a70ee80aee180 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -301,24 +301,6 @@ def check_funs(self, testfunc, targfunc, allow_complex=True, allow_complex=allow_complex) self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs) - def check_funs_ddof(self, - testfunc, - targfunc, - allow_complex=True, - allow_all_nan=True, - allow_str=True, - allow_date=False, - allow_tdelta=False, - allow_obj=True, ): - for ddof in range(3): - try: - self.check_funs(testfunc, targfunc, allow_complex, - allow_all_nan, allow_str, allow_date, - allow_tdelta, allow_obj, ddof=ddof) - except BaseException as exc: - exc.args += ('ddof %s' % ddof, ) - raise - def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): if value.dtype.kind == 'O': if allow_complex: @@ -381,37 +363,46 @@ def test_nanmedian(self): allow_str=False, allow_date=False, allow_tdelta=True, allow_obj='convert') - def test_nanvar(self): - self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=True, allow_obj='convert') + @pytest.mark.parametrize('ddof', range(3)) + def test_nanvar(self, ddof): + self.check_funs(nanops.nanvar, np.var, allow_complex=False, + allow_str=False, allow_date=False, + allow_tdelta=True, allow_obj='convert', ddof=ddof) - def test_nanstd(self): - self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=True, allow_obj='convert') + @pytest.mark.parametrize('ddof', range(3)) + def test_nanstd(self, ddof): + self.check_funs(nanops.nanstd, np.std, allow_complex=False, + allow_str=False, allow_date=False, + allow_tdelta=True, allow_obj='convert', ddof=ddof) @td.skip_if_no('scipy', min_version='0.17.0') - def test_nansem(self): + @pytest.mark.parametrize('ddof', range(3)) + def test_nansem(self, ddof): from scipy.stats import sem with np.errstate(invalid='ignore'): - self.check_funs_ddof(nanops.nansem, sem, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=False, allow_obj='convert') + self.check_funs(nanops.nansem, sem, allow_complex=False, + allow_str=False, allow_date=False, + allow_tdelta=False, allow_obj='convert', ddof=ddof) def _minmax_wrap(self, value, axis=None, func=None): + + # numpy warns if all nan res = func(value, axis) if res.dtype.kind == 'm': res = np.atleast_1d(res) return res def test_nanmin(self): - func = partial(self._minmax_wrap, func=np.min) - self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False) + with warnings.catch_warnings(record=True): + func = partial(self._minmax_wrap, func=np.min) + self.check_funs(nanops.nanmin, func, + allow_str=False, allow_obj=False) def test_nanmax(self): - func = partial(self._minmax_wrap, func=np.max) - self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False) + with warnings.catch_warnings(record=True): + func = partial(self._minmax_wrap, func=np.max) + self.check_funs(nanops.nanmax, func, + allow_str=False, allow_obj=False) def _argminmax_wrap(self, value, axis=None, func=None): res = func(value, axis) @@ -425,17 +416,15 @@ def _argminmax_wrap(self, value, axis=None, func=None): return res def test_nanargmax(self): - func = partial(self._argminmax_wrap, func=np.argmax) - self.check_funs(nanops.nanargmax, func, allow_str=False, - allow_obj=False, allow_date=True, allow_tdelta=True) + with warnings.catch_warnings(record=True): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, + allow_str=False, allow_obj=False, + allow_date=True, allow_tdelta=True) def test_nanargmin(self): - func = partial(self._argminmax_wrap, func=np.argmin) - if tm.sys.version_info[0:2] == (2, 6): - self.check_funs(nanops.nanargmin, func, allow_date=True, - allow_tdelta=True, allow_str=False, - allow_obj=False) - else: + with warnings.catch_warnings(record=True): + func = partial(self._argminmax_wrap, func=np.argmin) self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 301a7fc437fcf..7973b27601237 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -100,10 +100,12 @@ def wrapper(x): self._check_stat_op('median', wrapper) def test_min(self): - self._check_stat_op('min', np.min) + with catch_warnings(record=True): + self._check_stat_op('min', np.min) def test_max(self): - self._check_stat_op('max', np.max) + with catch_warnings(record=True): + self._check_stat_op('max', np.max) @td.skip_if_no_scipy def test_skew(self):
closes #20011
https://api.github.com/repos/pandas-dev/pandas/pulls/20484
2018-03-25T22:56:33Z
2018-03-26T01:17:39Z
2018-03-26T01:17:39Z
2018-03-26T01:17:39Z
TST: 32-bit compat for categorical factorization tests
diff --git a/pandas/tests/categorical/test_algos.py b/pandas/tests/categorical/test_algos.py index 61764ec0ff632..f727184e862d8 100644 --- a/pandas/tests/categorical/test_algos.py +++ b/pandas/tests/categorical/test_algos.py @@ -15,7 +15,7 @@ def test_factorize(categories, ordered): categories=categories, ordered=ordered) labels, uniques = pd.factorize(cat) - expected_labels = np.array([0, 0, 1, 2, -1], dtype='int64') + expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp) expected_uniques = pd.Categorical(['b', 'a', 'c'], categories=categories, ordered=ordered) @@ -27,7 +27,7 @@ def test_factorize(categories, ordered): def test_factorized_sort(): cat = pd.Categorical(['b', 'b', None, 'a']) labels, uniques = pd.factorize(cat, sort=True) - expected_labels = np.array([1, 1, -1, 0], dtype='int64') + expected_labels = np.array([1, 1, -1, 0], dtype=np.intp) expected_uniques = pd.Categorical(['a', 'b']) tm.assert_numpy_array_equal(labels, expected_labels) @@ -40,7 +40,7 @@ def test_factorized_sort_ordered(): ordered=True) labels, uniques = pd.factorize(cat, sort=True) - expected_labels = np.array([0, 0, -1, 1], dtype='int64') + expected_labels = np.array([0, 0, -1, 1], dtype=np.intp) expected_uniques = pd.Categorical(['b', 'a'], categories=['c', 'b', 'a'], ordered=True)
https://travis-ci.org/MacPython/pandas-wheels/jobs/357947315 cc @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/20482
2018-03-25T13:38:15Z
2018-03-25T14:10:14Z
2018-03-25T14:10:14Z
2018-03-25T14:10:15Z
removed not necessary bn switch decorator on nansum
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index d4851f579dda4..90333c23817c5 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -326,7 +326,6 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') -@bottleneck_switch() def nansum(values, axis=None, skipna=True, min_count=0): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max
Removed not necessary bottleneck_switch decorator on nansum function like nanprod.
https://api.github.com/repos/pandas-dev/pandas/pulls/20481
2018-03-25T09:50:01Z
2018-03-29T02:31:13Z
2018-03-29T02:31:13Z
2018-03-31T16:37:48Z
BUG: read_excel return empty dataframe when using usecols
diff --git a/doc/source/io.rst b/doc/source/io.rst index 658b9ff15783d..4a26497fa83c2 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2852,23 +2852,46 @@ Parsing Specific Columns It is often the case that users will insert columns to do temporary computations in Excel and you may not want to read in those columns. ``read_excel`` takes -a ``usecols`` keyword to allow you to specify a subset of columns to parse. +either a ``usecols`` or ``usecols_excel`` keyword to allow you to specify a +subset of columns to parse. Note that you can not use both ``usecols`` and +``usecols_excel`` named arguments at the same time. -If ``usecols`` is an integer, then it is assumed to indicate the last column -to be parsed. +If ``usecols_excel`` is supplied, then it is assumed that indicates a comma +separated list of Excel column letters and column ranges to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=2) + read_excel('path_to_file.xls', 'Sheet1', usecols_excel='A:E') + read_excel('path_to_file.xls', 'Sheet1', usecols_excel='A,C,E:F') -If `usecols` is a list of integers, then it is assumed to be the file column -indices to be parsed. +If ``usecols`` is a list of integers, then it is assumed to be the file +column indices to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) + read_excel('path_to_file.xls', 'Sheet1', usecols=[1, 3, 5]) + +Element order is ignored, so ``usecols_excel=[0, 1]`` is the same as ``[1, 0]``. + +If ``usecols`` is a list of strings, then it is assumed that each string +correspond to column names provided either by the user in `names` or +inferred from the document header row(s) and those strings define which columns +will be parsed. + +.. code-block:: python + + read_excel('path_to_file.xls', 'Sheet1', usecols=['foo', 'bar']) + +Element order is ignored, so ``usecols=['baz', 'joe']`` is the same as +``['joe', 'baz']``. + +If ``usecols`` is callable, the callable function will be evaluated against the +column names, returning names where the callable function evaluates to True. + +.. code-block:: python + + read_excel('path_to_file.xls', 'Sheet1', usecols=lambda x: x.isalpha()) -Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. Parsing Dates +++++++++++++ diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index de985d4db5fa3..40e90a1ddf8b9 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -36,7 +36,7 @@ Datetimelike API Changes Other API Changes ^^^^^^^^^^^^^^^^^ -- +- :func:`read_excel` has gained the keyword argument ``usecols_excel`` that receives a string containing comma separated Excel ranges and columns. The ``usecols`` keyword argument at :func:`read_excel` had removed support for a string containing comma separated Excel ranges and columns and for an int indicating the first j columns to be read in a ``DataFrame``. Also, the ``usecols`` keyword argument at :func:`read_excel` had added support for receiving a list of strings containing column labels and a callable. (:issue:`18273`) - - @@ -148,7 +148,7 @@ I/O ^^^ - -- +- Bug in :func:`read_excel` where ``usecols`` keyword argument as a list of strings were returning a empty ``DataFrame`` (:issue:`18273`) - Plotting diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 5608c29637447..104a5e59474e6 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -10,6 +10,8 @@ import abc import warnings import numpy as np +import string +import re from io import UnsupportedOperation from pandas.core.dtypes.common import ( @@ -85,20 +87,42 @@ Column (0-indexed) to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined into a ``MultiIndex``. If a - subset of data is selected with ``usecols``, index_col - is based on the subset. + subset of data is selected with ``usecols_excel`` or ``usecols``, + index_col is based on the subset. parse_cols : int or list, default None .. deprecated:: 0.21.0 Pass in `usecols` instead. -usecols : int or list, default None +usecols : list-like or callable, default None + Return a subset of the columns. If list-like, all elements must either + be positional (i.e. integer indices into the document columns) or string + that correspond to column names provided either by the user in `names` or + inferred from the document header row(s). For example, a valid list-like + `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Note that + you can not give both ``usecols`` and ``usecols_excel`` keyword arguments + at the same time. + + If callable, the callable function will be evaluated against the column + names, returning names where the callable function evaluates to True. An + example of a valid callable argument would be ``lambda x: x.upper() in + ['AAA', 'BBB', 'DDD']``. + + .. versionadded:: 0.24.0 + Added support to column labels and now `usecols_excel` is the keyword that + receives separated comma list of excel columns and ranges. +usecols_excel : string, default None + Return a subset of the columns from a spreadsheet specified as Excel column + ranges and columns. Note that you can not use both ``usecols`` and + ``usecols_excel`` keyword arguments at the same time. + * If None then parse all columns, - * If int then indicates last column to be parsed - * If list of ints then indicates list of column numbers to be parsed * If string then indicates comma separated list of Excel column letters and - column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of - both sides. + column ranges (e.g. "A:E" or "A,C,E:F") to be parsed. Ranges are + inclusive of both sides. + + .. versionadded:: 0.24.0 + squeeze : boolean, default False If the parsed data only contains one column then return a Series dtype : Type name or dict of column -> type, default None @@ -269,6 +293,19 @@ def _get_default_writer(ext): return _default_writers[ext] +def _is_excel_columns_notation(columns): + """ + Receives a string and check if the string is a comma separated list of + Excel index columns and index ranges. An Excel range is a string with two + column indexes separated by ':'). + """ + if isinstance(columns, compat.string_types) and all( + (x in string.ascii_letters) for x in re.split(r',|:', columns)): + return True + + return False + + def get_writer(engine_name): try: return _writers[engine_name] @@ -286,6 +323,7 @@ def read_excel(io, names=None, index_col=None, usecols=None, + usecols_excel=None, squeeze=False, dtype=None, engine=None, @@ -311,6 +349,7 @@ def read_excel(io, header=header, names=names, index_col=index_col, + usecols_excel=usecols_excel, usecols=usecols, squeeze=squeeze, dtype=dtype, @@ -405,6 +444,7 @@ def parse(self, names=None, index_col=None, usecols=None, + usecols_excel=None, squeeze=False, converters=None, true_values=None, @@ -439,6 +479,7 @@ def parse(self, header=header, names=names, index_col=index_col, + usecols_excel=usecols_excel, usecols=usecols, squeeze=squeeze, converters=converters, @@ -455,7 +496,7 @@ def parse(self, convert_float=convert_float, **kwds) - def _should_parse(self, i, usecols): + def _should_parse(self, i, usecols_excel, usecols): def _range2cols(areas): """ @@ -481,12 +522,12 @@ def _excel2num(x): cols.append(_excel2num(rng)) return cols - if isinstance(usecols, int): - return i <= usecols - elif isinstance(usecols, compat.string_types): - return i in _range2cols(usecols) - else: - return i in usecols + # check if usecols_excel is a string that indicates a comma separated + # list of Excel column letters and column ranges + if isinstance(usecols_excel, compat.string_types): + return i in _range2cols(usecols_excel) + + return True def _parse_excel(self, sheet_name=0, @@ -494,6 +535,7 @@ def _parse_excel(self, names=None, index_col=None, usecols=None, + usecols_excel=None, squeeze=False, dtype=None, true_values=None, @@ -512,6 +554,25 @@ def _parse_excel(self, _validate_header_arg(header) + if (usecols is not None) and (usecols_excel is not None): + raise ValueError("Cannot specify both `usecols` and " + "`usecols_excel`. Choose one of them.") + + # Check if some string in usecols may be interpreted as a Excel + # range or positional column + elif _is_excel_columns_notation(usecols): + warnings.warn("The `usecols` keyword argument used to refer to " + "Excel ranges and columns as strings was " + "renamed to `usecols_excel`.", UserWarning, + stacklevel=3) + usecols_excel = usecols + usecols = None + + elif (usecols_excel is not None) and not _is_excel_columns_notation( + usecols_excel): + raise TypeError("`usecols_excel` must be None or a string as a " + "comma separeted Excel ranges and columns.") + if 'chunksize' in kwds: raise NotImplementedError("chunksize keyword of read_excel " "is not implemented") @@ -615,10 +676,13 @@ def _parse_cell(cell_contents, cell_typ): row = [] for j, (value, typ) in enumerate(zip(sheet.row_values(i), sheet.row_types(i))): - if usecols is not None and j not in should_parse: - should_parse[j] = self._should_parse(j, usecols) + if ((usecols is not None) or (usecols_excel is not None) or + (j not in should_parse)): + should_parse[j] = self._should_parse(j, usecols_excel, + usecols) - if usecols is None or should_parse[j]: + if (((usecols_excel is None) and (usecols is None)) or + should_parse[j]): row.append(_parse_cell(value, typ)) data.append(row) @@ -674,6 +738,7 @@ def _parse_cell(cell_contents, cell_typ): dtype=dtype, true_values=true_values, false_values=false_values, + usecols=usecols, skiprows=skiprows, nrows=nrows, na_values=na_values, diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 05423474f330a..439244a9f6262 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -107,54 +107,43 @@ def get_exceldf(self, basename, ext, *args, **kwds): class ReadingTestsBase(SharedItems): # This is based on ExcelWriterBase - def test_usecols_int(self, ext): - - dfref = self.get_csv_refdf('test1') - dfref = dfref.reindex(columns=['A', 'B', 'C']) - df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols=3) - df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols=3) - - with tm.assert_produces_warning(FutureWarning): - df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, parse_cols=3) - - # TODO add index to xls file) - tm.assert_frame_equal(df1, dfref, check_names=False) - tm.assert_frame_equal(df2, dfref, check_names=False) - tm.assert_frame_equal(df3, dfref, check_names=False) - def test_usecols_list(self, ext): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['B', 'C']) df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols=[0, 2, 3]) + usecols=[1, 2]) df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols=[0, 2, 3]) + index_col=0, usecols=[1, 2]) with tm.assert_produces_warning(FutureWarning): df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, parse_cols=[0, 2, 3]) + index_col=0, parse_cols=[1, 2]) # TODO add index to xls file) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) tm.assert_frame_equal(df3, dfref, check_names=False) - def test_usecols_str(self, ext): + def test_usecols_excel_str(self, ext): dfref = self.get_csv_refdf('test1') df1 = dfref.reindex(columns=['A', 'B', 'C']) df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A:D') + usecols_excel='A:D') df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A:D') + index_col=0, usecols_excel='A:D') - with tm.assert_produces_warning(FutureWarning): + # The following code receives two warnings because FutureWarning is + # thrown when parse_cols is passed in read_excel and UserWarning is + # thrown when parse_cols (usecols) receives an comma separated list of + # Excel indexes and ranges + with tm.assert_produces_warning() as w: df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], index_col=0, parse_cols='A:D') + assert issubclass(w[0].category, FutureWarning) + assert issubclass(w[1].category, UserWarning) # TODO add index to xls, read xls ignores index name ? tm.assert_frame_equal(df2, df1, check_names=False) @@ -163,21 +152,107 @@ def test_usecols_str(self, ext): df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A,C,D') + usecols_excel='A,C,D') df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A,C,D') + index_col=0, usecols_excel='A,C,D') # TODO add index to xls file tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A,C:D') + usecols_excel='A,C:D') df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A,C:D') + index_col=0, usecols_excel='A,C:D') tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) + def test_usecols_diff_positional_int_columns_order(self, ext): + + df1 = self.get_csv_refdf('test1')[['A', 'C']] + + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols=[0, 2]) + df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols=[2, 0]) + + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df2, check_names=False) + + def test_usecols_diff_positional_str_columns_order(self, ext): + + df1 = self.get_csv_refdf('test1')[['B', 'D']] + + df2 = self.get_exceldf('test1', ext, 'Sheet1', usecols=['B', 'D']) + df3 = self.get_exceldf('test1', ext, 'Sheet1', usecols=['D', 'B']) + + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + def test_read_excel_without_slicing(self, ext): + + df1 = self.get_csv_refdf('test1') + df2 = self.get_exceldf('test1', ext, 'Sheet1') + + tm.assert_frame_equal(df2, df1, check_names=False) + + def test_pass_callable_argument(self, ext): + + dfref = self.get_csv_refdf('test1')[['C', 'D']] + + df1 = dfref.reindex(columns=['C', 'D']) + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols=lambda x: x > 'B') + df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols_excel='A,D:E') + + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + def test_usecols_deprecated_excel_range_str(self, ext): + + dfref = self.get_csv_refdf('test1')[['B', 'C', 'D']] + + df1 = dfref.reindex(columns=['C', 'D']) + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols=['C', 'D']) + with tm.assert_produces_warning(UserWarning): + df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols='A,D:E') + + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + def test_index_col_label_error(self, ext): + msg = "list indices must be integers.*, not str" + with tm.assert_raises_regex(TypeError, msg): + self.get_exceldf('test1', ext, 'Sheet1', index_col=["A"], + usecols=["", "A", "C"]) + + def test_pass_non_existent_column(self, ext): + msg = "Usecols do not match columns, columns expected but not found: " + "['E']" + with tm.assert_raises_regex(ValueError, msg): + self.get_exceldf('test1', ext, usecols=['E']) + + def test_usecols_excel_wrong_type(self, ext): + msg = "`usecols_excel` must be None or a string as a comma separeted " + " Excel ranges and columns." + with tm.assert_raises_regex(TypeError, msg): + self.get_exceldf('test1', ext, usecols_excel=1) + + def test_usecols_wrong_type(self, ext): + msg = "'usecols' must either be list-like of all strings, all unicode," + " all integers or a callable." + with tm.assert_raises_regex(ValueError, msg): + self.get_exceldf('test1', ext, usecols='E1') + + def test_usecols_and_usecols_excel_error(self, ext): + msg = "Cannot specify both `usecols` and `usecols_excel`. Choose one" + " of them." + with tm.assert_raises_regex(ValueError, msg): + self.get_exceldf('test1', ext, usecols=[0, 2], usecols_excel="A:C") + def test_excel_stop_iterator(self, ext): parsed = self.get_exceldf('test2', ext, 'Sheet1') @@ -422,7 +497,8 @@ def test_read_one_empty_col_no_header(self, ext): path, 'no_header', usecols=[0], - header=None + header=None, + nrows=0 ) actual_header_zero = read_excel( @@ -431,9 +507,10 @@ def test_read_one_empty_col_no_header(self, ext): usecols=[0], header=0 ) - expected = DataFrame() - tm.assert_frame_equal(actual_header_none, expected) - tm.assert_frame_equal(actual_header_zero, expected) + expected_header_none = DataFrame(columns=[0]) + tm.assert_frame_equal(actual_header_none, expected_header_none) + expected_header_zero = DataFrame({1: [2, 3, 4]}, index=3 * [np.nan]) + tm.assert_frame_equal(actual_header_zero, expected_header_zero) @td.skip_if_no('openpyxl') @td.skip_if_no('xlwt') @@ -450,7 +527,8 @@ def test_read_one_empty_col_with_header(self, ext): path, 'with_header', usecols=[0], - header=None + header=None, + nrows=1 ) actual_header_zero = read_excel( @@ -461,7 +539,7 @@ def test_read_one_empty_col_with_header(self, ext): ) expected_header_none = DataFrame(pd.Series([0], dtype='int64')) tm.assert_frame_equal(actual_header_none, expected_header_none) - expected_header_zero = DataFrame(columns=[0]) + expected_header_zero = DataFrame(pd.Series(4 * [np.nan])) tm.assert_frame_equal(actual_header_zero, expected_header_zero) @td.skip_if_no('openpyxl') @@ -503,35 +581,20 @@ def test_sheet_name_and_sheetname(self, ext): # GH10559: Minor improvement: Change "sheet_name" to "sheetname" # GH10969: DOC: Consistent var names (sheetname vs sheet_name) # GH12604: CLN GH10559 Rename sheetname variable to sheet_name - # GH20920: ExcelFile.parse() and pd.read_xlsx() have different - # behavior for "sheetname" argument dfref = self.get_csv_refdf('test1') - df1 = self.get_exceldf('test1', ext, - sheet_name='Sheet1') # doc + df1 = self.get_exceldf('test1', ext, sheet_name='Sheet1') # doc with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): df2 = self.get_exceldf('test1', ext, sheetname='Sheet1') # bkwrd compat - excel = self.get_excelfile('test1', ext) - df1_parse = excel.parse(sheet_name='Sheet1') # doc - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - df2_parse = excel.parse(sheetname='Sheet1') # bkwrd compat - tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) - tm.assert_frame_equal(df1_parse, dfref, check_names=False) - tm.assert_frame_equal(df2_parse, dfref, check_names=False) def test_sheet_name_both_raises(self, ext): with tm.assert_raises_regex(TypeError, "Cannot specify both"): self.get_exceldf('test1', ext, sheetname='Sheet1', sheet_name='Sheet1') - excel = self.get_excelfile('test1', ext) - with tm.assert_raises_regex(TypeError, "Cannot specify both"): - excel.parse(sheetname='Sheet1', - sheet_name='Sheet1') - @pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm']) class TestXlrdReader(ReadingTestsBase):
- [x] closes #18273 - [x] tests added / passed - [x] passes git diff master --name-only -- "*.py" | grep "pandas/" | xargs -r flake8 - [x] whatsnew entry As mentioned read_excel returns an empty DataFrame when usecols argument is a list of strings. Now lists of strings are correctly interpreted by read_excel function.
https://api.github.com/repos/pandas-dev/pandas/pulls/20480
2018-03-25T04:53:02Z
2018-11-08T13:19:35Z
null
2018-11-08T13:19:36Z
Additional DOC and BUG fix related to merging with mix of columns and…
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 98914c13d4d31..8a25d991c149b 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -31,10 +31,10 @@ operations. Concatenating objects --------------------- -The :func:`~pandas.concat` function (in the main pandas namespace) does all of -the heavy lifting of performing concatenation operations along an axis while -performing optional set logic (union or intersection) of the indexes (if any) on -the other axes. Note that I say "if any" because there is only a single possible +The :func:`~pandas.concat` function (in the main pandas namespace) does all of +the heavy lifting of performing concatenation operations along an axis while +performing optional set logic (union or intersection) of the indexes (if any) on +the other axes. Note that I say "if any" because there is only a single possible axis of concatenation for Series. Before diving into all of the details of ``concat`` and what it can do, here is @@ -109,9 +109,9 @@ some configurable handling of "what to do with the other axes": to the actual data concatenation. * ``copy`` : boolean, default True. If False, do not copy data unnecessarily. -Without a little bit of context many of these arguments don't make much sense. -Let's revisit the above example. Suppose we wanted to associate specific keys -with each of the pieces of the chopped up DataFrame. We can do this using the +Without a little bit of context many of these arguments don't make much sense. +Let's revisit the above example. Suppose we wanted to associate specific keys +with each of the pieces of the chopped up DataFrame. We can do this using the ``keys`` argument: .. ipython:: python @@ -138,9 +138,9 @@ It's not a stretch to see how this can be very useful. More detail on this functionality below. .. note:: - It is worth noting that :func:`~pandas.concat` (and therefore - :func:`~pandas.append`) makes a full copy of the data, and that constantly - reusing this function can create a significant performance hit. If you need + It is worth noting that :func:`~pandas.concat` (and therefore + :func:`~pandas.append`) makes a full copy of the data, and that constantly + reusing this function can create a significant performance hit. If you need to use the operation over several datasets, use a list comprehension. :: @@ -224,8 +224,8 @@ DataFrame: Concatenating using ``append`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A useful shortcut to :func:`~pandas.concat` are the :meth:`~DataFrame.append` -instance methods on ``Series`` and ``DataFrame``. These methods actually predated +A useful shortcut to :func:`~pandas.concat` are the :meth:`~DataFrame.append` +instance methods on ``Series`` and ``DataFrame``. These methods actually predated ``concat``. They concatenate along ``axis=0``, namely the index: .. ipython:: python @@ -271,8 +271,8 @@ need to be: .. note:: - Unlike the :py:meth:`~list.append` method, which appends to the original list - and returns ``None``, :meth:`~DataFrame.append` here **does not** modify + Unlike the :py:meth:`~list.append` method, which appends to the original list + and returns ``None``, :meth:`~DataFrame.append` here **does not** modify ``df1`` and returns its copy with ``df2`` appended. .. _merging.ignore_index: @@ -370,9 +370,9 @@ Passing ``ignore_index=True`` will drop all name references. More concatenating with group keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A fairly common use of the ``keys`` argument is to override the column names +A fairly common use of the ``keys`` argument is to override the column names when creating a new ``DataFrame`` based on existing ``Series``. -Notice how the default behaviour consists on letting the resulting ``DataFrame`` +Notice how the default behaviour consists on letting the resulting ``DataFrame`` inherit the parent ``Series``' name, when these existed. .. ipython:: python @@ -468,7 +468,7 @@ Appending rows to a DataFrame ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While not especially efficient (since a new object must be created), you can -append a single row to a ``DataFrame`` by passing a ``Series`` or dict to +append a single row to a ``DataFrame`` by passing a ``Series`` or dict to ``append``, which returns a new ``DataFrame`` as above. .. ipython:: python @@ -513,7 +513,7 @@ pandas has full-featured, **high performance** in-memory join operations idiomatically very similar to relational databases like SQL. These methods perform significantly better (in some cases well over an order of magnitude better) than other open source implementations (like ``base::merge.data.frame`` -in R). The reason for this is careful algorithmic design and the internal layout +in R). The reason for this is careful algorithmic design and the internal layout of the data in ``DataFrame``. See the :ref:`cookbook<cookbook.merge>` for some advanced strategies. @@ -521,7 +521,7 @@ See the :ref:`cookbook<cookbook.merge>` for some advanced strategies. Users who are familiar with SQL but new to pandas might be interested in a :ref:`comparison with SQL<compare_with_sql.join>`. -pandas provides a single function, :func:`~pandas.merge`, as the entry point for +pandas provides a single function, :func:`~pandas.merge`, as the entry point for all standard database join operations between ``DataFrame`` or named ``Series`` objects: :: @@ -590,7 +590,7 @@ The return type will be the same as ``left``. If ``left`` is a ``DataFrame`` or and ``right`` is a subclass of ``DataFrame``, the return type will still be ``DataFrame``. ``merge`` is a function in the pandas namespace, and it is also available as a -``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling +``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling ``DataFrame`` being implicitly considered the left object in the join. The related :meth:`~DataFrame.join` method, uses ``merge`` internally for the @@ -602,7 +602,7 @@ Brief primer on merge methods (relational algebra) Experienced users of relational databases like SQL will be familiar with the terminology used to describe join operations between two SQL-table like -structures (``DataFrame`` objects). There are several cases to consider which +structures (``DataFrame`` objects). There are several cases to consider which are very important to understand: * **one-to-one** joins: for example when joining two ``DataFrame`` objects on @@ -642,8 +642,8 @@ key combination: labels=['left', 'right'], vertical=False); plt.close('all'); -Here is a more complicated example with multiple join keys. Only the keys -appearing in ``left`` and ``right`` are present (the intersection), since +Here is a more complicated example with multiple join keys. Only the keys +appearing in ``left`` and ``right`` are present (the intersection), since ``how='inner'`` by default. .. ipython:: python @@ -759,13 +759,13 @@ Checking for duplicate keys .. versionadded:: 0.21.0 -Users can use the ``validate`` argument to automatically check whether there -are unexpected duplicates in their merge keys. Key uniqueness is checked before -merge operations and so should protect against memory overflows. Checking key -uniqueness is also a good way to ensure user data structures are as expected. +Users can use the ``validate`` argument to automatically check whether there +are unexpected duplicates in their merge keys. Key uniqueness is checked before +merge operations and so should protect against memory overflows. Checking key +uniqueness is also a good way to ensure user data structures are as expected. -In the following example, there are duplicate values of ``B`` in the right -``DataFrame``. As this is not a one-to-one merge -- as specified in the +In the following example, there are duplicate values of ``B`` in the right +``DataFrame``. As this is not a one-to-one merge -- as specified in the ``validate`` argument -- an exception will be raised. @@ -778,11 +778,11 @@ In the following example, there are duplicate values of ``B`` in the right In [53]: result = pd.merge(left, right, on='B', how='outer', validate="one_to_one") ... - MergeError: Merge keys are not unique in right dataset; not a one-to-one merge + MergeError: Merge keys are not unique in right dataset; not a one-to-one merge -If the user is aware of the duplicates in the right ``DataFrame`` but wants to -ensure there are no duplicates in the left DataFrame, one can use the -``validate='one_to_many'`` argument instead, which will not raise an exception. +If the user is aware of the duplicates in the right ``DataFrame`` but wants to +ensure there are no duplicates in the left DataFrame, one can use the +``validate='one_to_many'`` argument instead, which will not raise an exception. .. ipython:: python @@ -794,8 +794,8 @@ ensure there are no duplicates in the left DataFrame, one can use the The merge indicator ~~~~~~~~~~~~~~~~~~~ -:func:`~pandas.merge` accepts the argument ``indicator``. If ``True``, a -Categorical-type column called ``_merge`` will be added to the output object +:func:`~pandas.merge` accepts the argument ``indicator``. If ``True``, a +Categorical-type column called ``_merge`` will be added to the output object that takes on values: =================================== ================ @@ -903,7 +903,7 @@ Joining on index ~~~~~~~~~~~~~~~~ :meth:`DataFrame.join` is a convenient method for combining the columns of two -potentially differently-indexed ``DataFrames`` into a single result +potentially differently-indexed ``DataFrames`` into a single result ``DataFrame``. Here is a very basic example: .. ipython:: python @@ -983,9 +983,9 @@ indexes: Joining key columns on an index ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -:meth:`~DataFrame.join` takes an optional ``on`` argument which may be a column +:meth:`~DataFrame.join` takes an optional ``on`` argument which may be a column or multiple column names, which specifies that the passed ``DataFrame`` is to be -aligned on that column in the ``DataFrame``. These two function calls are +aligned on that column in the ``DataFrame``. These two function calls are completely equivalent: :: @@ -995,7 +995,7 @@ completely equivalent: how='left', sort=False) Obviously you can choose whichever form you find more convenient. For -many-to-one joins (where one of the ``DataFrame``'s is already indexed by the +many-to-one joins (where one of the ``DataFrame``'s is already indexed by the join key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python @@ -1133,17 +1133,42 @@ This is equivalent but less verbose and more memory efficient / faster than this Joining with two MultiIndexes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This is not implemented via ``join`` at-the-moment, however it can be done using -the following code. +This is supported in a limited way, provided that the index for the right +argument is completely used in the join, and is a subset of the indices in +the left argument, as in this example: .. ipython:: python - index = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), - ('K1', 'X2')], - names=['key', 'X']) + leftindex = pd.MultiIndex.from_product([list('abc'), list('xy'), [1, 2]], + names=['abc', 'xy', 'num']) + left = pd.DataFrame({'v1' : range(12)}, index=leftindex) + left + + rightindex = pd.MultiIndex.from_product([list('abc'), list('xy')], + names=['abc', 'xy']) + right = pd.DataFrame({'v2': [100*i for i in range(1, 7)]}, index=rightindex) + right + + left.join(right, on=['abc', 'xy'], how='inner') + +If that condition is not satisfied, a join with two multi-indexes can be +done using the following code. + +.. ipython:: python + + leftindex = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), + ('K1', 'X2')], + names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, - index=index) + index=leftindex) + + rightindex = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), + ('K2', 'Y2'), ('K2', 'Y3')], + names=['key', 'Y']) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=rightindex) result = pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner').set_index(['key','X','Y']) @@ -1161,7 +1186,7 @@ the following code. Merging on a combination of columns and index levels ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. versionadded:: 0.22 +.. versionadded:: 0.23 Strings passed as the ``on``, ``left_on``, and ``right_on`` parameters may refer to either column names or index level names. This enables merging @@ -1200,6 +1225,12 @@ resetting indexes. frames, the index level is preserved as an index level in the resulting DataFrame. +.. note:: + When DataFrames are merged using only some of the levels of a `MultiIndex`, + the extra levels will be dropped from the resulting merge. In order to + preserve those levels, use ``reset_index`` on those level names to move + those levels to columns prior to doing the merge. + .. note:: If a string matches both a column name and an index level name, then a @@ -1262,7 +1293,7 @@ similarly. Joining multiple DataFrame or Panel objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A list or tuple of ``DataFrames`` can also be passed to :meth:`~DataFrame.join` +A list or tuple of ``DataFrames`` can also be passed to :meth:`~DataFrame.join` to join them together on their indexes. .. ipython:: python @@ -1284,7 +1315,7 @@ Merging together values within Series or DataFrame columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another fairly common situation is to have two like-indexed (or similarly -indexed) ``Series`` or ``DataFrame`` objects and wanting to "patch" values in +indexed) ``Series`` or ``DataFrame`` objects and wanting to "patch" values in one object from values for matching indices in the other. Here is an example: .. ipython:: python @@ -1309,7 +1340,7 @@ For this, use the :meth:`~DataFrame.combine_first` method: plt.close('all'); Note that this method only takes values from the right ``DataFrame`` if they are -missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`, +missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`, alters non-NA values in place: .. ipython:: python @@ -1361,15 +1392,15 @@ Merging AsOf .. versionadded:: 0.19.0 -A :func:`merge_asof` is similar to an ordered left-join except that we match on -nearest key rather than equal keys. For each row in the ``left`` ``DataFrame``, -we select the last row in the ``right`` ``DataFrame`` whose ``on`` key is less +A :func:`merge_asof` is similar to an ordered left-join except that we match on +nearest key rather than equal keys. For each row in the ``left`` ``DataFrame``, +we select the last row in the ``right`` ``DataFrame`` whose ``on`` key is less than the left's key. Both DataFrames must be sorted by the key. -Optionally an asof merge can perform a group-wise merge. This matches the +Optionally an asof merge can perform a group-wise merge. This matches the ``by`` key equally, in addition to the nearest match on the ``on`` key. -For example; we might have ``trades`` and ``quotes`` and we want to ``asof`` +For example; we might have ``trades`` and ``quotes`` and we want to ``asof`` merge them. .. ipython:: python @@ -1428,8 +1459,8 @@ We only asof within ``2ms`` between the quote time and the trade time. by='ticker', tolerance=pd.Timedelta('2ms')) -We only asof within ``10ms`` between the quote time and the trade time and we -exclude exact matches on time. Note that though we exclude the exact matches +We only asof within ``10ms`` between the quote time and the trade time and we +exclude exact matches on time. Note that though we exclude the exact matches (of the quotes), prior quotes **do** propagate to that point in time. .. ipython:: python diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 93ac9caa42e3e..cc37881de30ee 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1541,6 +1541,7 @@ Reshaping - Bug in :meth:`DataFrame.append` with a :class:`Series` with a dateutil timezone would raise a ``TypeError`` (:issue:`23682`) - Bug in ``Series`` construction when passing no data and ``dtype=str`` (:issue:`22477`) - Bug in :func:`cut` with ``bins`` as an overlapping ``IntervalIndex`` where multiple bins were returned per item instead of raising a ``ValueError`` (:issue:`23980`) +- Bug in :meth:`DataFrame.join` when joining on partial MultiIndex would drop names (:issue:`20452`). .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index dfbee5656da7d..b078ff32f6944 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -715,6 +715,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): result[name] = key_col elif result._is_level_reference(name): if isinstance(result.index, MultiIndex): + key_col.name = name idx_list = [result.index.get_level_values(level_name) if level_name != name else key_col for level_name in result.index.names] diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 5387a1043e00e..99386e594ff3a 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -730,6 +730,31 @@ def test_panel_join_many(self): pytest.raises(ValueError, panels[0].join, panels[1:], how='right') + def test_join_multi_to_multi(self, join_type): + # GH 20475 + leftindex = MultiIndex.from_product([list('abc'), list('xy'), [1, 2]], + names=['abc', 'xy', 'num']) + left = DataFrame({'v1': range(12)}, index=leftindex) + + rightindex = MultiIndex.from_product([list('abc'), list('xy')], + names=['abc', 'xy']) + right = DataFrame({'v2': [100 * i for i in range(1, 7)]}, + index=rightindex) + + result = left.join(right, on=['abc', 'xy'], how=join_type) + expected = (left.reset_index() + .merge(right.reset_index(), + on=['abc', 'xy'], how=join_type) + .set_index(['abc', 'xy', 'num']) + ) + assert_frame_equal(expected, result) + + with pytest.raises(ValueError): + left.join(right, on='xy', how=join_type) + + with pytest.raises(ValueError): + right.join(left, on=['abc', 'xy'], how=join_type) + def _check_join(left, right, result, join_col, how='left', lsuffix='_x', rsuffix='_y'): diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 7ee88f223cd95..94e180f9328d6 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1397,16 +1397,16 @@ def test_merge_index_types(index): assert_frame_equal(result, expected) -@pytest.mark.parametrize("on,left_on,right_on,left_index,right_index,nms,nm", [ - (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], 'B'), - (None, None, None, True, True, ['outer', 'inner'], 'B'), - (None, ['outer', 'inner'], None, False, True, None, 'B'), - (None, None, ['outer', 'inner'], True, False, None, 'B'), - (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], None), - (None, None, None, True, True, ['outer', 'inner'], None), - (None, ['outer', 'inner'], None, False, True, None, None), - (None, None, ['outer', 'inner'], True, False, None, None)]) -def test_merge_series(on, left_on, right_on, left_index, right_index, nms, nm): +@pytest.mark.parametrize("on,left_on,right_on,left_index,right_index,nm", [ + (['outer', 'inner'], None, None, False, False, 'B'), + (None, None, None, True, True, 'B'), + (None, ['outer', 'inner'], None, False, True, 'B'), + (None, None, ['outer', 'inner'], True, False, 'B'), + (['outer', 'inner'], None, None, False, False, None), + (None, None, None, True, True, None), + (None, ['outer', 'inner'], None, False, True, None), + (None, None, ['outer', 'inner'], True, False, None)]) +def test_merge_series(on, left_on, right_on, left_index, right_index, nm): # GH 21220 a = pd.DataFrame({"A": [1, 2, 3, 4]}, index=pd.MultiIndex.from_product([['a', 'b'], [0, 1]], @@ -1416,7 +1416,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nms, nm): names=['outer', 'inner']), name=nm) expected = pd.DataFrame({"A": [2, 4], "B": [1, 3]}, index=pd.MultiIndex.from_product([['a', 'b'], [1]], - names=nms)) + names=['outer', 'inner'])) if nm is not None: result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index)
Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #20452 - [x] tests added / passed - test_join.py - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - added to v0.24 related to bug fix This PR adds some documentation about how `MultiLevel` index levels are dropped when doing a merge that includes levels and columns. It also fixes a small bug related to `join`.
https://api.github.com/repos/pandas-dev/pandas/pulls/20475
2018-03-23T22:20:56Z
2018-12-04T13:11:04Z
2018-12-04T13:11:04Z
2019-07-12T17:21:10Z
DOC: docstring to series.unique
diff --git a/pandas/core/base.py b/pandas/core/base.py index b3eb9a0ae7530..7c31ea32bfa19 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1020,30 +1020,6 @@ def value_counts(self, normalize=False, sort=True, ascending=False, normalize=normalize, bins=bins, dropna=dropna) return result - _shared_docs['unique'] = ( - """ - Return unique values in the object. Uniques are returned in order - of appearance, this does NOT sort. Hash table-based unique. - - Parameters - ---------- - values : 1d array-like - - Returns - ------- - unique values. - - If the input is an Index, the return is an Index - - If the input is a Categorical dtype, the return is a Categorical - - If the input is a Series/ndarray, the return will be an ndarray - - See Also - -------- - unique - Index.unique - Series.unique - """) - - @Appender(_shared_docs['unique'] % _indexops_doc_kwargs) def unique(self): values = self._values diff --git a/pandas/core/series.py b/pandas/core/series.py index da598259d272d..48e6453e36491 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1429,8 +1429,51 @@ def mode(self): # TODO: Add option for bins like value_counts() return algorithms.mode(self) - @Appender(base._shared_docs['unique'] % _shared_doc_kwargs) def unique(self): + """ + Return unique values of Series object. + + Uniques are returned in order of appearance. Hash table-based unique, + therefore does NOT sort. + + Returns + ------- + ndarray or Categorical + The unique values returned as a NumPy array. In case of categorical + data type, returned as a Categorical. + + See Also + -------- + pandas.unique : top-level unique method for any 1-d array-like object. + Index.unique : return Index with unique values from an Index object. + + Examples + -------- + >>> pd.Series([2, 1, 3, 3], name='A').unique() + array([2, 1, 3]) + + >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() + array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') + + >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') + ... for _ in range(3)]).unique() + array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], + dtype=object) + + An unordered Categorical will return categories in the order of + appearance. + + >>> pd.Series(pd.Categorical(list('baabc'))).unique() + [b, a, c] + Categories (3, object): [b, a, c] + + An ordered Categorical preserves the category ordering. + + >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), + ... ordered=True)).unique() + [b, a, c] + Categories (3, object): [a < b < c] + """ result = super(Series, self).unique() if is_datetime64tz_dtype(self.dtype):
- [x] closes #20075 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry seems that no one has picked this up. moving _shared_doc['unique'] used by Series.unique() only to Series.unique. ``` ################################################################################ ####################### Docstring (pandas.Series.unique) ####################### ################################################################################ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- unique values. - If the input is an Index, the return is an Index - If the input is a Categorical dtype, the return is a Categorical - If the input is a Series/ndarray, the return will be an ndarray See Also -------- unique : return unique values of 1d array-like objects. Index.unique : return Index with unique values from an Index object. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([2] + [1] * 5).unique() array([2, 1]) >>> pd.Series([pd.Timestamp('20160101') for _ in range(3)]).unique() array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.Series([pd.Timestamp('20160101', tz='US/Eastern') for _ in range(3)]).unique() array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.Series(pd.Categorical(list('baabc'))).unique() [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ordered=True)).unique() [b, a, c] Categories (3, object): [a < b < c] ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.unique" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20474
2018-03-23T22:08:19Z
2018-03-27T20:50:12Z
2018-03-27T20:50:12Z
2018-03-27T21:39:46Z
Parametrized NA sentinel for factorize
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 07b4b80603e03..15d93374da3a9 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -70,7 +70,7 @@ cdef class Factorizer: return self.count def factorize(self, ndarray[object] values, sort=False, na_sentinel=-1, - check_null=True): + na_value=None): """ Factorize values with nans replaced by na_sentinel >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) @@ -81,7 +81,7 @@ cdef class Factorizer: uniques.extend(self.uniques.to_array()) self.uniques = uniques labels = self.table.get_labels(values, self.uniques, - self.count, na_sentinel, check_null) + self.count, na_sentinel, na_value) mask = (labels == na_sentinel) # sort on if sort: @@ -114,7 +114,7 @@ cdef class Int64Factorizer: return self.count def factorize(self, int64_t[:] values, sort=False, - na_sentinel=-1, check_null=True): + na_sentinel=-1, na_value=None): """ Factorize values with nans replaced by na_sentinel >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) @@ -126,7 +126,7 @@ cdef class Int64Factorizer: self.uniques = uniques labels = self.table.get_labels(values, self.uniques, self.count, na_sentinel, - check_null) + na_value=na_value) # sort on if sort: diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index bca4e388f3279..eca66f78499db 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -250,13 +250,13 @@ cdef class HashTable: {{py: -# name, dtype, null_condition, float_group -dtypes = [('Float64', 'float64', 'val != val', True), - ('UInt64', 'uint64', 'False', False), - ('Int64', 'int64', 'val == iNaT', False)] +# name, dtype, float_group, default_na_value +dtypes = [('Float64', 'float64', True, 'nan'), + ('UInt64', 'uint64', False, 0), + ('Int64', 'int64', False, 'iNaT')] def get_dispatch(dtypes): - for (name, dtype, null_condition, float_group) in dtypes: + for (name, dtype, float_group, default_na_value) in dtypes: unique_template = """\ cdef: Py_ssize_t i, n = len(values) @@ -298,13 +298,13 @@ def get_dispatch(dtypes): return uniques.to_array() """ - unique_template = unique_template.format(name=name, dtype=dtype, null_condition=null_condition, float_group=float_group) + unique_template = unique_template.format(name=name, dtype=dtype, float_group=float_group) - yield (name, dtype, null_condition, float_group, unique_template) + yield (name, dtype, float_group, default_na_value, unique_template) }} -{{for name, dtype, null_condition, float_group, unique_template in get_dispatch(dtypes)}} +{{for name, dtype, float_group, default_na_value, unique_template in get_dispatch(dtypes)}} cdef class {{name}}HashTable(HashTable): @@ -408,24 +408,36 @@ cdef class {{name}}HashTable(HashTable): @cython.boundscheck(False) def get_labels(self, {{dtype}}_t[:] values, {{name}}Vector uniques, Py_ssize_t count_prior, Py_ssize_t na_sentinel, - bint check_null=True): + object na_value=None): cdef: Py_ssize_t i, n = len(values) int64_t[:] labels Py_ssize_t idx, count = count_prior int ret = 0 - {{dtype}}_t val + {{dtype}}_t val, na_value2 khiter_t k {{name}}VectorData *ud + bint use_na_value labels = np.empty(n, dtype=np.int64) ud = uniques.data + use_na_value = na_value is not None + + if use_na_value: + # We need this na_value2 because we want to allow users + # to *optionally* specify an NA sentinel *of the correct* type. + # We use None, to make it optional, which requires `object` type + # for the parameter. To please the compiler, we use na_value2, + # which is only used if it's *specified*. + na_value2 = <{{dtype}}_t>na_value + else: + na_value2 = {{default_na_value}} with nogil: for i in range(n): val = values[i] - if check_null and {{null_condition}}: + if val != val or (use_na_value and val == na_value2): labels[i] = na_sentinel continue @@ -695,7 +707,7 @@ cdef class StringHashTable(HashTable): @cython.boundscheck(False) def get_labels(self, ndarray[object] values, ObjectVector uniques, Py_ssize_t count_prior, int64_t na_sentinel, - bint check_null=1): + object na_value=None): cdef: Py_ssize_t i, n = len(values) int64_t[:] labels @@ -706,10 +718,12 @@ cdef class StringHashTable(HashTable): char *v char **vecs khiter_t k + bint use_na_value # these by-definition *must* be strings labels = np.zeros(n, dtype=np.int64) uindexer = np.empty(n, dtype=np.int64) + use_na_value = na_value is not None # pre-filter out missing # and assign pointers @@ -717,7 +731,8 @@ cdef class StringHashTable(HashTable): for i in range(n): val = values[i] - if PyUnicode_Check(val) or PyString_Check(val): + if ((PyUnicode_Check(val) or PyString_Check(val)) and + not (use_na_value and val == na_value)): v = util.get_c_string(val) vecs[i] = v else: @@ -868,7 +883,7 @@ cdef class PyObjectHashTable(HashTable): def get_labels(self, ndarray[object] values, ObjectVector uniques, Py_ssize_t count_prior, int64_t na_sentinel, - bint check_null=True): + object na_value=None): cdef: Py_ssize_t i, n = len(values) int64_t[:] labels @@ -876,14 +891,17 @@ cdef class PyObjectHashTable(HashTable): int ret = 0 object val khiter_t k + bint use_na_value labels = np.empty(n, dtype=np.int64) + use_na_value = na_value is not None for i in range(n): val = values[i] hash(val) - if check_null and val != val or val is None: + if ((val != val or val is None) or + (use_na_value and val == na_value)): labels[i] = na_sentinel continue diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index de2e638265f1e..45f86f044a4b2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -29,7 +29,7 @@ _ensure_float64, _ensure_uint64, _ensure_int64) from pandas.compat.numpy import _np_version_under1p10 -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import isna, na_value_for_dtype from pandas.core import common as com from pandas._libs import algos, lib, hashtable as htable @@ -435,7 +435,8 @@ def isin(comps, values): return f(comps, values) -def _factorize_array(values, check_nulls, na_sentinel=-1, size_hint=None): +def _factorize_array(values, na_sentinel=-1, size_hint=None, + na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. @@ -443,11 +444,14 @@ def _factorize_array(values, check_nulls, na_sentinel=-1, size_hint=None): Parameters ---------- values : ndarray - check_nulls : bool - Whether to check for nulls in the hashtable's 'get_labels' method. na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method + na_value : object, optional + A value in `values` to consider missing. Note: only use this + parameter when you know that you don't have any values pandas would + consider missing in the array (NaN for float data, iNaT for + datetimes, etc.). Returns ------- @@ -457,7 +461,8 @@ def _factorize_array(values, check_nulls, na_sentinel=-1, size_hint=None): table = hash_klass(size_hint or len(values)) uniques = vec_klass() - labels = table.get_labels(values, uniques, 0, na_sentinel, check_nulls) + labels = table.get_labels(values, uniques, 0, na_sentinel, + na_value=na_value) labels = _ensure_platform_int(labels) uniques = uniques.to_array() @@ -508,10 +513,18 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): dtype = original.dtype else: values, dtype, _ = _ensure_data(values) - check_nulls = not is_integer_dtype(original) - labels, uniques = _factorize_array(values, check_nulls, + + if (is_datetime64_any_dtype(original) or + is_timedelta64_dtype(original) or + is_period_dtype(original)): + na_value = na_value_for_dtype(original.dtype) + else: + na_value = None + + labels, uniques = _factorize_array(values, na_sentinel=na_sentinel, - size_hint=size_hint) + size_hint=size_hint, + na_value=na_value) if sort and len(uniques) > 0: from pandas.core.sorting import safe_sort diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 6eadef37da344..ac57660300be4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -7,7 +7,6 @@ from pandas import compat from pandas.compat import u, lzip from pandas._libs import lib, algos as libalgos -from pandas._libs.tslib import iNaT from pandas.core.dtypes.generic import ( ABCSeries, ABCIndexClass, ABCCategoricalIndex) @@ -2163,11 +2162,10 @@ def factorize(self, na_sentinel=-1): from pandas.core.algorithms import _factorize_array codes = self.codes.astype('int64') - codes[codes == -1] = iNaT # We set missing codes, normally -1, to iNaT so that the # Int64HashTable treats them as missing values. - labels, uniques = _factorize_array(codes, check_nulls=True, - na_sentinel=na_sentinel) + labels, uniques = _factorize_array(codes, na_sentinel=na_sentinel, + na_value=-1) uniques = self._constructor(self.categories.take(uniques), categories=self.categories, ordered=self.ordered) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 01c88c269e7e0..7be00cbfd567a 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -11,6 +11,7 @@ is_datetimelike_v_numeric, is_float_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_interval_dtype, + is_period_dtype, is_complex_dtype, is_string_like_dtype, is_bool_dtype, is_integer_dtype, is_dtype_equal, @@ -393,7 +394,7 @@ def na_value_for_dtype(dtype, compat=True): dtype = pandas_dtype(dtype) if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or - is_timedelta64_dtype(dtype)): + is_timedelta64_dtype(dtype) or is_period_dtype(dtype)): return NaT elif is_float_dtype(dtype): return np.nan diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 4f208bc352c70..365d8d762d673 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -15,7 +15,8 @@ from pandas import (NaT, Float64Index, Series, DatetimeIndex, TimedeltaIndex, date_range) from pandas.core.dtypes.common import is_scalar -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, PeriodDtype, IntervalDtype) from pandas.core.dtypes.missing import ( array_equivalent, isna, notna, isnull, notnull, na_value_for_dtype) @@ -311,23 +312,27 @@ def test_array_equivalent_str(): np.array(['A', 'X'], dtype=dtype)) -def test_na_value_for_dtype(): - for dtype in [np.dtype('M8[ns]'), np.dtype('m8[ns]'), - DatetimeTZDtype('datetime64[ns, US/Eastern]')]: - assert na_value_for_dtype(dtype) is NaT - - for dtype in ['u1', 'u2', 'u4', 'u8', - 'i1', 'i2', 'i4', 'i8']: - assert na_value_for_dtype(np.dtype(dtype)) == 0 - - for dtype in ['bool']: - assert na_value_for_dtype(np.dtype(dtype)) is False - - for dtype in ['f2', 'f4', 'f8']: - assert np.isnan(na_value_for_dtype(np.dtype(dtype))) - - for dtype in ['O']: - assert np.isnan(na_value_for_dtype(np.dtype(dtype))) +@pytest.mark.parametrize('dtype, na_value', [ + # Datetime-like + (np.dtype("M8[ns]"), NaT), + (np.dtype("m8[ns]"), NaT), + (DatetimeTZDtype('datetime64[ns, US/Eastern]'), NaT), + (PeriodDtype("M"), NaT), + # Integer + ('u1', 0), ('u2', 0), ('u4', 0), ('u8', 0), + ('i1', 0), ('i2', 0), ('i4', 0), ('i8', 0), + # Bool + ('bool', False), + # Float + ('f2', np.nan), ('f4', np.nan), ('f8', np.nan), + # Object + ('O', np.nan), + # Interval + (IntervalDtype(), np.nan), +]) +def test_na_value_for_dtype(dtype, na_value): + result = na_value_for_dtype(dtype) + assert result is na_value class TestNAObj(object): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 884b1eb7342c6..ada4f880e92a4 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -257,6 +257,36 @@ def test_deprecate_order(self): with tm.assert_produces_warning(False): algos.factorize(data) + @pytest.mark.parametrize('data', [ + np.array([0, 1, 0], dtype='u8'), + np.array([-2**63, 1, -2**63], dtype='i8'), + np.array(['__nan__', 'foo', '__nan__'], dtype='object'), + ]) + def test_parametrized_factorize_na_value_default(self, data): + # arrays that include the NA default for that type, but isn't used. + l, u = algos.factorize(data) + expected_uniques = data[[0, 1]] + expected_labels = np.array([0, 1, 0], dtype='i8') + tm.assert_numpy_array_equal(l, expected_labels) + tm.assert_numpy_array_equal(u, expected_uniques) + + @pytest.mark.parametrize('data, na_value', [ + (np.array([0, 1, 0, 2], dtype='u8'), 0), + (np.array([1, 0, 1, 2], dtype='u8'), 1), + (np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63), + (np.array([1, -2**63, 1, 0], dtype='i8'), 1), + (np.array(['a', '', 'a', 'b'], dtype=object), 'a'), + (np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()), + (np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object), + ('a', 1)), + ]) + def test_parametrized_factorize_na_value(self, data, na_value): + l, u = algos._factorize_array(data, na_value=na_value) + expected_uniques = data[[1, 3]] + expected_labels = np.array([-1, 0, -1, 1], dtype='i8') + tm.assert_numpy_array_equal(l, expected_labels) + tm.assert_numpy_array_equal(u, expected_uniques) + class TestUnique(object):
Adds a new keyword `na_value` to control the NA sentinel inside the factorize routine. ```python In [3]: arr = np.array([0, 1, 0, 2], dtype='u8') In [4]: pd.factorize(arr) Out[4]: (array([0, 1, 0, 2]), array([0, 1, 2], dtype=uint64)) In [5]: pd.factorize(arr, na_value=0) Out[5]: (array([-1, 0, -1, 1]), array([1, 2], dtype=uint64)) ``` The basic idea is that our hashtables now have two "modes" for detecting NA values 1. The previous way, based on the templated na_condition 2. Equality to a user-specified NA value. Note: specifying NA value does not "turn off" the old way of checking NAs. ```python In [2]: pd.factorize(np.array([0.0, np.nan, 1.0]), na_value=0.0) Out[2]: (array([-1, -1, 0]), array([1.])) ``` I'm sure the implementation can be cleaned up, but I wanted to put this up for feedback. Hopefully someone can tell me how to avoid the `use_default_na` nonsense :) cc @WillAyd closes https://github.com/pandas-dev/pandas/issues/20328
https://api.github.com/repos/pandas-dev/pandas/pulls/20473
2018-03-23T19:12:53Z
2018-03-27T10:28:11Z
2018-03-27T10:28:10Z
2018-05-02T13:10:07Z
DOC: update the pandas.Series.shift docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6ca8f6731bbb8..096abd4255779 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8361,32 +8361,59 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors=errors) _shared_docs['shift'] = (""" - Shift index by desired number of periods with an optional time freq + Shift index by desired number of periods with an optional time `freq`. + + When `freq` is not passed, shift the index without realigning the data. + If `freq` is passed (in this case, the index must be date or datetime, + or it will raise a `NotImplementedError`), the index will be + increased using the periods and the `freq`. Parameters ---------- periods : int - Number of periods to move, can be positive or negative. - freq : DateOffset, timedelta, or time rule string, optional - Increment to use from the tseries module or time rule (e.g. 'EOM'). - See Notes. - axis : %(axes_single_arg)s + Number of periods to shift. Can be positive or negative. + freq : DateOffset, tseries.offsets, timedelta, or str, optional + Offset to use from the tseries module or time rule (e.g. 'EOM'). + If `freq` is specified then the index values are shifted but the + data is not realigned. That is, use `freq` if you would like to + extend the index when shifting and preserve the original data. + axis : {0 or 'index', 1 or 'columns', None}, default None + Shift direction. + + Returns + ------- + %(klass)s + Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. + tshift : Shift the time index, using the index's frequency if + available. - Notes - ----- - If freq is specified then the index values are shifted but the data - is not realigned. That is, use freq if you would like to extend the - index when shifting and preserve the original data. - - Returns - ------- - shifted : %(klass)s + Examples + -------- + >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45], + ... 'Col2': [13, 23, 18, 33, 48], + ... 'Col3': [17, 27, 22, 37, 52]}) + + >>> df.shift(periods=3) + Col1 Col2 Col3 + 0 NaN NaN NaN + 1 NaN NaN NaN + 2 NaN NaN NaN + 3 10.0 13.0 17.0 + 4 20.0 23.0 27.0 + + >>> df.shift(periods=1, axis='columns') + Col1 Col2 Col3 + 0 NaN 10.0 13.0 + 1 NaN 20.0 23.0 + 2 NaN 15.0 18.0 + 3 NaN 30.0 33.0 + 4 NaN 45.0 48.0 """) @Appender(_shared_docs['shift'] % _shared_doc_kwargs)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ####################### Docstring (pandas.Series.shift) ####################### ################################################################################ Shift index by desired number of periods with an optional time freq. When freq is not passed, shift the index without realign the data. If freq is passed (in this case, the index must be date or datetime), the index will be increased using the periods and the freq. Parameters ---------- periods : int Number of periods to move, can be positive or negative. freq : DateOffset, timedelta, or time rule string, optional Increment to use from the tseries module or time rule (e.g. 'EOM'). See Notes. axis : int or str Shift direction. {0, 'index'}. Notes ----- If freq is specified then the index values are shifted but the data is not realigned. That is, use freq if you would like to extend the index when shifting and preserve the original data. Returns ------- shifted : Series Examples -------- >>> df = pd.DataFrame({'Col1': [10, 20, 30, 20, 15, 30, 45], ... 'Col2': [13, 23, 33, 23, 18, 33, 48], ... 'Col3': [17, 27, 37, 27, 22, 37, 52]}) >>> print(df) Col1 Col2 Col3 0 10 13 17 1 20 23 27 2 30 33 37 3 20 23 27 4 15 18 22 5 30 33 37 6 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 5 30.0 33.0 37.0 6 20.0 23.0 27.0 >>> df.shift(periods=1, axis=1) Col1 Col2 Col3 0 NaN 10.0 13.0 1 NaN 20.0 23.0 2 NaN 30.0 33.0 3 NaN 20.0 23.0 4 NaN 15.0 18.0 5 NaN 30.0 33.0 6 NaN 45.0 48.0 >>> import datetime >>> names = ['João', 'Maria', 'Emanuel', 'Jussara', 'José'] >>> dates = [datetime.datetime(2018, 3, 1, 11, 15), ... datetime.datetime(2018, 3, 5, 11, 15), ... datetime.datetime(2018, 3, 10, 11, 15), ... datetime.datetime(2018, 3, 15, 11, 15), ... datetime.datetime(2018, 3, 20, 11, 15)] >>> df = pd.DataFrame(data={'names': names}, index=dates) >>> print(df) names 2018-03-01 11:15:00 João 2018-03-05 11:15:00 Maria 2018-03-10 11:15:00 Emanuel 2018-03-15 11:15:00 Jussara 2018-03-20 11:15:00 José >>> df.shift(periods=2, freq='D') names 2018-03-03 11:15:00 João 2018-03-07 11:15:00 Maria 2018-03-12 11:15:00 Emanuel 2018-03-17 11:15:00 Jussara 2018-03-22 11:15:00 José >>> df.shift(periods=75, freq='min') names 2018-03-01 12:30:00 João 2018-03-05 12:30:00 Maria 2018-03-10 12:30:00 Emanuel 2018-03-15 12:30:00 Jussara 2018-03-20 12:30:00 José See Also -------- slice_shift: Equivalent to shift without copying data. tshift: Shift the time index, using the index’s frequency if available. ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.shift" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20472
2018-03-23T18:54:42Z
2018-11-04T15:24:10Z
2018-11-04T15:24:10Z
2018-11-04T15:24:23Z
DOC: Fix broken dependency links
diff --git a/doc/source/install.rst b/doc/source/install.rst index 7d741c6c2c75a..c96d4fbeb4ad2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -212,7 +212,7 @@ Recommended Dependencies ``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups. If installed, must be Version 2.4.6 or higher. -* `bottleneck <http://berkeleyanalytics.com/bottleneck>`__: for accelerating certain types of ``nan`` +* `bottleneck <https://github.com/kwgoodman/bottleneck>`__: for accelerating certain types of ``nan`` evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed, must be Version 1.0.0 or higher. @@ -233,7 +233,7 @@ Optional Dependencies * `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended. * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended. * `Feather Format <https://github.com/wesm/feather>`__: necessary for feather-based storage, version 0.3.1 or higher. -* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.4.1) or `fastparquet <https://fastparquet.readthedocs.io/en/latest/necessary>`__ (>= 0.0.6) for parquet-based storage. The `snappy <https://pypi.python.org/pypi/python-snappy>`__ and `brotli <https://pypi.python.org/pypi/brotlipy>`__ are available for compression support. +* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.4.1) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.0.6) for parquet-based storage. The `snappy <https://pypi.python.org/pypi/python-snappy>`__ and `brotli <https://pypi.python.org/pypi/brotlipy>`__ are available for compression support. * `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are: * `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL
- Change bottleneck link to github repository page. - Fix link to fastparquet documentation.
https://api.github.com/repos/pandas-dev/pandas/pulls/20471
2018-03-23T13:39:39Z
2018-03-25T14:32:14Z
2018-03-25T14:32:14Z
2018-03-25T14:32:16Z
TST: Fixed version comparison
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index aec561ece8573..8083a1ce69092 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -1,15 +1,14 @@ import operator -import sys import pytest +from pandas.compat import PY2, PY36 from pandas.tests.extension import base from .array import JSONArray, JSONDtype, make_data -pytestmark = pytest.mark.skipif(sys.version_info[0] == 2, - reason="Py2 doesn't have a UserDict") +pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict") @pytest.fixture @@ -81,7 +80,7 @@ def test_fillna_frame(self): class TestMethods(base.BaseMethodsTests): unhashable = pytest.mark.skip(reason="Unhashable") - unstable = pytest.mark.skipif(sys.version_info <= (3, 5), + unstable = pytest.mark.skipif(not PY36, # 3.6 or higher reason="Dictionary order unstable") @unhashable
This failed to skip for 3.5.x because the micro component made it False. closes https://github.com/pandas-dev/pandas/issues/20468
https://api.github.com/repos/pandas-dev/pandas/pulls/20469
2018-03-23T11:11:38Z
2018-03-24T01:14:11Z
2018-03-24T01:14:11Z
2018-03-24T01:14:14Z
TST: clean deprecation warnings & some parametrizing
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8ff2b6c85eeed..9b09c87689762 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2153,7 +2153,7 @@ def _verbose_repr(): lines.append(_put_str(col, space) + tmpl % (count, dtype)) def _non_verbose_repr(): - lines.append(self.columns.summary(name='Columns')) + lines.append(self.columns._summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 9f7b06ed2d61c..c4c02c0bf6f17 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -647,83 +647,83 @@ def test_value_counts_bins(self): assert s.nunique() == 0 - def test_value_counts_datetime64(self): - klasses = [Index, Series] - for klass in klasses: - # GH 3002, datetime64[ns] - # don't test names though - txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', - 'xxyyzz20100101EGG', 'xxyyww20090101EGG', - 'foofoo20080909PIE', 'foofoo20080909GUM']) - f = StringIO(txt) - df = pd.read_fwf(f, widths=[6, 8, 3], - names=["person_id", "dt", "food"], - parse_dates=["dt"]) - - s = klass(df['dt'].copy()) - s.name = None - - idx = pd.to_datetime(['2010-01-01 00:00:00Z', - '2008-09-09 00:00:00Z', - '2009-01-01 00:00:00X']) - expected_s = Series([3, 2, 1], index=idx) - tm.assert_series_equal(s.value_counts(), expected_s) - - expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z', - '2009-01-01 00:00:00Z', - '2008-09-09 00:00:00Z'], - dtype='datetime64[ns]') - if isinstance(s, Index): - tm.assert_index_equal(s.unique(), DatetimeIndex(expected)) - else: - tm.assert_numpy_array_equal(s.unique(), expected) - - assert s.nunique() == 3 - - # with NaT - s = df['dt'].copy() - s = klass([v for v in s.values] + [pd.NaT]) - - result = s.value_counts() - assert result.index.dtype == 'datetime64[ns]' - tm.assert_series_equal(result, expected_s) - - result = s.value_counts(dropna=False) - expected_s[pd.NaT] = 1 - tm.assert_series_equal(result, expected_s) - - unique = s.unique() - assert unique.dtype == 'datetime64[ns]' - - # numpy_array_equal cannot compare pd.NaT - if isinstance(s, Index): - exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]) - tm.assert_index_equal(unique, exp_idx) - else: - tm.assert_numpy_array_equal(unique[:3], expected) - assert pd.isna(unique[3]) - - assert s.nunique() == 3 - assert s.nunique(dropna=False) == 4 - - # timedelta64[ns] - td = df.dt - df.dt + timedelta(1) - td = klass(td, name='dt') - - result = td.value_counts() - expected_s = Series([6], index=[Timedelta('1day')], name='dt') - tm.assert_series_equal(result, expected_s) - - expected = TimedeltaIndex(['1 days'], name='dt') - if isinstance(td, Index): - tm.assert_index_equal(td.unique(), expected) - else: - tm.assert_numpy_array_equal(td.unique(), expected.values) - - td2 = timedelta(1) + (df.dt - df.dt) - td2 = klass(td2, name='dt') - result2 = td2.value_counts() - tm.assert_series_equal(result2, expected_s) + @pytest.mark.parametrize('klass', [Index, Series]) + def test_value_counts_datetime64(self, klass): + + # GH 3002, datetime64[ns] + # don't test names though + txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', + 'xxyyzz20100101EGG', 'xxyyww20090101EGG', + 'foofoo20080909PIE', 'foofoo20080909GUM']) + f = StringIO(txt) + df = pd.read_fwf(f, widths=[6, 8, 3], + names=["person_id", "dt", "food"], + parse_dates=["dt"]) + + s = klass(df['dt'].copy()) + s.name = None + + idx = pd.to_datetime(['2010-01-01 00:00:00Z', + '2008-09-09 00:00:00Z', + '2009-01-01 00:00:00Z']) + expected_s = Series([3, 2, 1], index=idx) + tm.assert_series_equal(s.value_counts(), expected_s) + + expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z', + '2009-01-01 00:00:00Z', + '2008-09-09 00:00:00Z'], + dtype='datetime64[ns]') + if isinstance(s, Index): + tm.assert_index_equal(s.unique(), DatetimeIndex(expected)) + else: + tm.assert_numpy_array_equal(s.unique(), expected) + + assert s.nunique() == 3 + + # with NaT + s = df['dt'].copy() + s = klass([v for v in s.values] + [pd.NaT]) + + result = s.value_counts() + assert result.index.dtype == 'datetime64[ns]' + tm.assert_series_equal(result, expected_s) + + result = s.value_counts(dropna=False) + expected_s[pd.NaT] = 1 + tm.assert_series_equal(result, expected_s) + + unique = s.unique() + assert unique.dtype == 'datetime64[ns]' + + # numpy_array_equal cannot compare pd.NaT + if isinstance(s, Index): + exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]) + tm.assert_index_equal(unique, exp_idx) + else: + tm.assert_numpy_array_equal(unique[:3], expected) + assert pd.isna(unique[3]) + + assert s.nunique() == 3 + assert s.nunique(dropna=False) == 4 + + # timedelta64[ns] + td = df.dt - df.dt + timedelta(1) + td = klass(td, name='dt') + + result = td.value_counts() + expected_s = Series([6], index=[Timedelta('1day')], name='dt') + tm.assert_series_equal(result, expected_s) + + expected = TimedeltaIndex(['1 days'], name='dt') + if isinstance(td, Index): + tm.assert_index_equal(td.unique(), expected) + else: + tm.assert_numpy_array_equal(td.unique(), expected.values) + + td2 = timedelta(1) + (df.dt - df.dt) + td2 = klass(td2, name='dt') + result2 = td2.value_counts() + tm.assert_series_equal(result2, expected_s) def test_factorize(self): for orig in self.objs:
https://api.github.com/repos/pandas-dev/pandas/pulls/20467
2018-03-23T10:18:52Z
2018-03-23T14:48:19Z
2018-03-23T14:48:19Z
2018-03-23T14:48:57Z
DOC: update the pandas.DatetimeIndex docstring
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 75f4ec4f0d341..8288fac026730 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -175,41 +175,49 @@ def _new_DatetimeIndex(cls, d): class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, Int64Index): """ + Datetime index using NumPy's datetime64 dtype. + Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and carry metadata such as frequency information. Parameters ---------- - data : array-like (1-dimensional), optional - Optional datetime-like data to construct index with - copy : bool - Make a copy of input ndarray - freq : string or pandas offset object, optional - One of pandas date offset strings or corresponding objects - start : starting value, datetime-like, optional - If data is None, start is used as the start point in generating regular - timestamp data. - periods : int, optional, > 0 + data : array-like (1-dimensional), optional + Optional datetime-like data to construct index with. + freq : str or pandas offset object, optional + One of pandas date offset strings or corresponding objects. + start : datetime-like, optional + The starting value. If `data` is None, `start` is used as the + start point in generating regular timestamp data. + end : datetime-like, optional + The end time. If `periods` is None, generated index will extend + to first conforming time on or just past end argument. + periods : int (> 0), optional Number of periods to generate, if generating index. Takes precedence - over end argument - end : end time, datetime-like, optional - If periods is none, generated index will extend to first conforming - time on or just past end argument - closed : string or None, default None + over `end` argument + copy : bool, default False + Make a copy of input ndarray. + name : object + Name to be stored in the index. + tz : pytz.timezone or dateutil.tz.tzfile, optional + verify_integrity : bool, default False + Check new index for duplicates, otherwise defer until necessary. + normalize : bool + Return DatetimeIndex with times to midnight. + closed : str or None, default None Make the interval closed with respect to the given frequency to - the 'left', 'right', or both sides (None) - tz : pytz.timezone or dateutil.tz.tzfile + the 'left', 'right', or both sides (None). ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' - 'infer' will attempt to infer fall dst-transition hours based on - order + order. - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous - times) - - 'NaT' will return NaT where there are ambiguous times + times). + - 'NaT' will return NaT where there are ambiguous times. - 'raise' will raise an AmbiguousTimeError if there are ambiguous times - name : object - Name to be stored in the index + dtype : object + Specify the datatype to DataFrame constructor. dayfirst : bool, default False If True, parse dates in `data` with the day first order yearfirst : bool, default False @@ -274,6 +282,35 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, TimedeltaIndex : Index of timedelta64 data PeriodIndex : Index of Period data pandas.to_datetime : Convert argument to datetime + + Examples + -------- + Generate a date range with month end frequency + + >>> pd.DatetimeIndex(start='2018-3-10', freq='M', periods=4) + DatetimeIndex(['2018-03-31', '2018-04-30', + '2018-05-31', '2018-06-30'], + dtype='datetime64[ns]', freq='M') + + Hourly time range with end conforming the period + + >>> pd.DatetimeIndex(start='2015-03-01 21:00:00', end='2015-03-02', freq='H') + DatetimeIndex(['2015-03-01 21:00:00', '2015-03-01 22:00:00', + '2015-03-01 23:00:00', '2015-03-02 00:00:00'], + dtype='datetime64[ns]', freq='H') + + Generate time range with with 1 minute frequency + + >>> pd.date_range('2018-03-10', periods=4, freq='1min') + DatetimeIndex(['2018-03-10 00:00:00', '2018-03-10 00:01:00', + '2018-03-10 00:02:00', '2018-03-10 00:03:00'], + dtype='datetime64[ns]', freq='T') + + Weekly date range with closed argument to mark end date interval + + >>> pd.date_range('2018-03-10', '2018-03-31', freq='W', closed='right') + DatetimeIndex(['2018-03-11', '2018-03-18', '2018-03-25'], + dtype='datetime64[ns]', freq='W-SUN') """ _typ = 'datetimeindex'
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ ] PR title is "DOC: update the pandas.DatetimeIndex docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py pandas.DatetimeIndex` - [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single pandas.DatetimeIndex` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ####################### Docstring (pandas.DatetimeIndex) ####################### ################################################################################ Store Datetime index using NumPy's datetime64 dtype. Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and carry metadata such as frequency information. Parameters ---------- data : array-like (1-dimensional), optional Optional datetime-like data to construct index with. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. start : datetime-like, optional Shapes the starting value and if data is None, start is used as the start point in generating regular timestamp data. end : datetime-like, optional Shapes the end time, if periods is none, generated index will extend to first conforming time on or just past end argument. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tz : pytz.timezone or dateutil.tz.tzfile. verify_integrity : bool, default False Check new index for duplicates, otherwise defer until necessary. normalize : bool Return DatetimeIndex with times to midnight. closed : str or None, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' - 'infer' will attempt to infer fall dst-transition hours based on order. - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times). - 'NaT' will return NaT where there are ambiguous times. - 'raise' will raise an AmbiguousTimeError if there are ambiguous times dtype : object Specify the datatype to DataFrame constructor. **kwargs For compatibility. Has no effect on the result. Attributes ---------- year month day hour minute second microsecond nanosecond date time dayofyear weekofyear week dayofweek weekday quarter tz freq freqstr is_month_start is_month_end is_quarter_start is_quarter_end is_year_start is_year_end is_leap_year inferred_freq Methods ------- normalize strftime snap tz_convert tz_localize round floor ceil to_period to_perioddelta to_pydatetime to_series to_frame month_name day_name Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. See Also --------- Index : The base pandas Index type TimedeltaIndex : Index of timedelta64 data PeriodIndex : Index of Period data Examples -------- >>> # Generate a date range with month end frequency >>> pd.DatetimeIndex(start='2018-3-10', freq='M', periods=4) DatetimeIndex(['2018-03-31', '2018-04-30', '2018-05-31', '2018-06-30'], dtype='datetime64[ns]', freq='M') >>> # Hourly time range with end conforming the period >>> pd.DatetimeIndex(start='2015-03-01 21:00:00', end='2015-03-02', freq='H') DatetimeIndex(['2015-03-01 21:00:00', '2015-03-01 22:00:00', '2015-03-01 23:00:00', '2015-03-02 00:00:00'], dtype='datetime64[ns]', freq='H') >>> # Generate time range with with 1 minute frequency >>> pd.date_range('2018-03-10', periods=4, freq='1min') DatetimeIndex(['2018-03-10 00:00:00', '2018-03-10 00:01:00', '2018-03-10 00:02:00', '2018-03-10 00:03:00'], dtype='datetime64[ns]', freq='T') >>> # Weekly date range with closed argument to mark end date interval >>> pd.date_range('2018-03-10', '2018-03-31', freq='W', closed='right') DatetimeIndex(['2018-03-11', '2018-03-18', '2018-03-25'], dtype='datetime64[ns]', freq='W-SUN') ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameters {'periods', 'kwargs'} not documented Unknown parameters {'**kwargs'} ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20465
2018-03-23T07:27:59Z
2018-11-01T01:38:35Z
null
2018-11-02T14:18:05Z
DOC: update the Series.str.join docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index b98fa106336fc..6796c14a18eaa 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -941,17 +941,59 @@ def str_get_dummies(arr, sep='|'): def str_join(arr, sep): """ - Join lists contained as elements in the Series/Index with - passed delimiter. Equivalent to :meth:`str.join`. + Join lists contained as elements in the Series/Index with passed delimiter. + + If the elements of a Series are lists themselves, join the content of these + lists using the delimiter passed to the function. + This function is an equivalent to :meth:`str.join`. Parameters ---------- - sep : string - Delimiter + sep : str + Delimiter to use between list entries. Returns ------- - joined : Series/Index of objects + Series/Index: object + + Notes + ----- + If any of the lists does not contain string objects the result of the join + will be `NaN`. + + See Also + -------- + str.join : Standard library version of this method. + Series.str.split : Split strings around given separator/delimiter. + + Examples + -------- + + Example with a list that contains non-string elements. + + >>> s = pd.Series([['lion', 'elephant', 'zebra'], + ... [1.1, 2.2, 3.3], + ... ['cat', np.nan, 'dog'], + ... ['cow', 4.5, 'goat'] + ... ['duck', ['swan', 'fish'], 'guppy']]) + >>> s + 0 [lion, elephant, zebra] + 1 [1.1, 2.2, 3.3] + 2 [cat, nan, dog] + 3 [cow, 4.5, goat] + 4 [duck, [swan, fish], guppy] + dtype: object + + Join all lists using an '-', the lists containing object(s) of types other + than str will become a NaN. + + >>> s.str.join('-') + 0 lion-elephant-zebra + 1 NaN + 2 NaN + 3 NaN + 4 NaN + dtype: object """ return _na_map(sep.join, arr)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ###################### Docstring (pandas.Series.str.join) ###################### ################################################################################ Join lists contained as elements in the Series/Index with passed delimiter. If the elements of a Series are lists themselves, join the content of these lists using the delimiter passed to the function. This function is an equivalent to :meth:`str.join`. Parameters ---------- sep : str Delimiter to use between list entries. Returns ------- Series/Index of objects Notes ----- If any of the lists does not contain string objects the result of the join will be `NaN`. See Also -------- str.join : Standard library version of this method. Series.str.split : Split strings around given separator/delimiter. Examples -------- Example with a list that contains non-string elements. >>> s = pd.Series([['lion', 'elephant', 'zebra'], ... [1.1, 2.2, 3.3], ... ["cat", np.nan, "dog"], ... ["cow", 4.5, "goat"]]) >>> s 0 [lion, elephant, zebra] 1 [1.1, 2.2, 3.3] 2 [cat, nan, dog] 3 [cow, 4.5, goat] dtype: object Join all lists using an '-', the list of floats will become a NaN. >>> s.str.join('-') 0 lion-elephant-zebra 1 NaN 2 NaN 3 NaN dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.str.join" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20463
2018-03-22T21:57:36Z
2018-03-27T12:10:37Z
2018-03-27T12:10:37Z
2018-03-27T12:10:49Z
DOC: Improving the docstring of Series.str.upper and related
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index b98fa106336fc..09eed94b36c6b 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2145,11 +2145,68 @@ def rindex(self, sub, start=0, end=None): _shared_docs['casemethods'] = (""" Convert strings in the Series/Index to %(type)s. + Equivalent to :meth:`str.%(method)s`. Returns ------- - converted : Series/Index of objects + Series/Index of objects + + See Also + -------- + Series.str.lower : Converts all characters to lowercase. + Series.str.upper : Converts all characters to uppercase. + Series.str.title : Converts first character of each word to uppercase and + remaining to lowercase. + Series.str.capitalize : Converts first character to uppercase and + remaining to lowercase. + Series.str.swapcase : Converts uppercase to lowercase and lowercase to + uppercase. + + Examples + -------- + >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']) + >>> s + 0 lower + 1 CAPITALS + 2 this is a sentence + 3 SwApCaSe + dtype: object + + >>> s.str.lower() + 0 lower + 1 capitals + 2 this is a sentence + 3 swapcase + dtype: object + + >>> s.str.upper() + 0 LOWER + 1 CAPITALS + 2 THIS IS A SENTENCE + 3 SWAPCASE + dtype: object + + >>> s.str.title() + 0 Lower + 1 Capitals + 2 This Is A Sentence + 3 Swapcase + dtype: object + + >>> s.str.capitalize() + 0 Lower + 1 Capitals + 2 This is a sentence + 3 Swapcase + dtype: object + + >>> s.str.swapcase() + 0 LOWER + 1 capitals + 2 THIS IS A SENTENCE + 3 sWaPcAsE + dtype: object """) _shared_docs['lower'] = dict(type='lowercase', method='lower') _shared_docs['upper'] = dict(type='uppercase', method='upper')
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ##################### Docstring (pandas.Series.str.upper) ##################### ################################################################################ Convert strings in the Series/Index to uppercase. Equivalent to :meth:`str.upper`. Returns ------- Series/Index of objects See Also -------- Series.str.lower : Converts all characters to lower case. Series.str.upper : Converts all characters to upper case. Series.str.title : Converts first character of each word to upper case and remaining to lower case. Series.str.capitalize : Converts first character to upper case and remaining to lower case. Series.str.swapcase : Converts upper case to lower case and lower case to upper case. Examples -------- >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']) >>> s.str.lower() 0 lower 1 capitals 2 this is a sentence 3 swapcase dtype: object >>> s.str.upper() 0 LOWER 1 CAPITALS 2 THIS IS A SENTENCE 3 SWAPCASE dtype: object >>> s.str.title() 0 Lower 1 Capitals 2 This Is A Sentence 3 Swapcase dtype: object >>> s.str.capitalize() 0 Lower 1 Capitals 2 This is a sentence 3 Swapcase dtype: object >>> s.str.swapcase() 0 LOWER 1 capitals 2 THIS IS A SENTENCE 3 sWaPcAsE dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.str.upper" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20462
2018-03-22T21:53:54Z
2018-03-28T16:22:12Z
2018-03-28T16:22:12Z
2018-03-28T16:22:13Z
CLN: Replacing %s with .format in pandas/core/frame.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f476bff4df2cd..a71ade3da87de 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -411,7 +411,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' - 'incompatible data and dtype: %s' % e) + 'incompatible data and dtype: {e}'.format(e=e)) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: @@ -520,8 +520,9 @@ def _get_axes(N, K, index=index, columns=columns): try: values = values.astype(dtype) except Exception as orig: - e = ValueError("failed to cast to '%s' (Exception was: %s)" - % (dtype, orig)) + e = ValueError("failed to cast to '{dtype}' (Exception " + "was: {orig})".format(dtype=dtype, + orig=orig)) raise_with_traceback(e) index, columns = _get_axes(*values.shape) @@ -873,8 +874,9 @@ def dot(self, other): lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: - raise ValueError('Dot product shape mismatch, %s vs %s' % - (lvals.shape, rvals.shape)) + raise ValueError('Dot product shape mismatch, ' + '{l} vs {r}'.format(l=lvals.shape, + r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, @@ -888,7 +890,7 @@ def dot(self, other): else: return Series(result, index=left.index) else: # pragma: no cover - raise TypeError('unsupported type: %s' % type(other)) + raise TypeError('unsupported type: {oth}'.format(oth=type(other))) def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5 """ @@ -1098,7 +1100,7 @@ def to_dict(self, orient='dict', into=dict): return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples()) else: - raise ValueError("orient '%s' not understood" % orient) + raise ValueError("orient '{o}' not understood".format(o=orient)) def to_gbq(self, destination_table, project_id, chunksize=None, verbose=None, reauth=False, if_exists='fail', private_key=None, @@ -2140,7 +2142,7 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, lines.append(self.index._summary()) if len(self.columns) == 0: - lines.append('Empty %s' % type(self).__name__) + lines.append('Empty {name}'.format(name=type(self).__name__)) fmt.buffer_put_lines(buf, lines) return @@ -2166,13 +2168,15 @@ def _verbose_repr(): space = max(len(pprint_thing(k)) for k in self.columns) + 4 counts = None - tmpl = "%s%s" + tmpl = "{count}{dtype}" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover - raise AssertionError('Columns must equal counts (%d != %d)' - % (len(cols), len(counts))) - tmpl = "%s non-null %s" + raise AssertionError( + 'Columns must equal counts ' + '({cols:d} != {counts:d})'.format( + cols=len(cols), counts=len(counts))) + tmpl = "{count} non-null {dtype}" dtypes = self.dtypes for i, col in enumerate(self.columns): @@ -2183,7 +2187,8 @@ def _verbose_repr(): if show_counts: count = counts.iloc[i] - lines.append(_put_str(col, space) + tmpl % (count, dtype)) + lines.append(_put_str(col, space) + tmpl.format(count=count, + dtype=dtype)) def _non_verbose_repr(): lines.append(self.columns._summary(name='Columns')) @@ -2192,9 +2197,12 @@ def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: - return "%3.1f%s %s" % (num, size_qualifier, x) + return ("{num:3.1f}{size_q}" + "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 - return "%3.1f%s %s" % (num, size_qualifier, 'PB') + return "{num:3.1f}{size_q} {pb}".format(num=num, + size_q=size_qualifier, + pb='PB') if verbose: _verbose_repr() @@ -2207,8 +2215,9 @@ def _sizeof_fmt(num, size_qualifier): _verbose_repr() counts = self.get_dtype_counts() - dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] - lines.append('dtypes: %s' % ', '.join(dtypes)) + dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k + in sorted(compat.iteritems(counts))] + lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: memory_usage = get_option('display.memory_usage') @@ -2226,8 +2235,9 @@ def _sizeof_fmt(num, size_qualifier): self.index._is_memory_usage_qualified()): size_qualifier = '+' mem_usage = self.memory_usage(index=True, deep=deep).sum() - lines.append("memory usage: %s\n" % - _sizeof_fmt(mem_usage, size_qualifier)) + lines.append("memory usage: {mem}\n".format( + mem=_sizeof_fmt(mem_usage, size_qualifier))) + fmt.buffer_put_lines(buf, lines) def memory_usage(self, index=True, deep=False): @@ -3013,8 +3023,8 @@ def select_dtypes(self, include=None, exclude=None): # can't both include AND exclude! if not include.isdisjoint(exclude): - raise ValueError('include and exclude overlap on %s' % - (include & exclude)) + raise ValueError('include and exclude overlap on {inc_ex}'.format( + inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) @@ -3869,7 +3879,8 @@ def set_index(self, keys, drop=True, append=False, inplace=False, if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() - raise ValueError('Index has duplicate keys: %s' % duplicates) + raise ValueError('Index has duplicate keys: {dup}'.format( + dup=duplicates)) for c in to_remove: del frame[c] @@ -4241,7 +4252,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, mask = count > 0 else: if how is not None: - raise ValueError('invalid how option: %s' % how) + raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') @@ -6750,8 +6761,8 @@ def _count_level(self, level, axis=0, numeric_only=False): agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): - raise TypeError("Can only count levels on hierarchical %s." % - self._get_axis_name(axis)) + raise TypeError("Can only count levels on hierarchical " + "{ax}.".format(ax=self._get_axis_name(axis))) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might @@ -6829,9 +6840,9 @@ def f(x): elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover - e = NotImplementedError("Handling exception with filter_" - "type %s not implemented." % - filter_type) + e = NotImplementedError( + "Handling exception with filter_type {f} not" + "implemented.".format(f=filter_type)) raise_with_traceback(e) with np.errstate(all='ignore'): result = f(data.values) @@ -6843,8 +6854,8 @@ def f(x): elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover - msg = ("Generating numeric_only data with filter_type %s" - "not supported." % filter_type) + msg = ("Generating numeric_only data with filter_type {f}" + "not supported.".format(f=filter_type)) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) @@ -7119,7 +7130,8 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True): elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover - raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) + raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( + ax=axis)) return self._constructor(new_data) @@ -7150,7 +7162,8 @@ def to_period(self, freq=None, axis=0, copy=True): elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover - raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) + raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( + ax=axis)) return self._constructor(new_data) @@ -7509,8 +7522,9 @@ def _convert_object_array(content, columns, coerce_float=False, dtype=None): else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... - raise AssertionError('%d columns passed, passed data had %s ' - 'columns' % (len(columns), len(content))) + raise AssertionError('{col:d} columns passed, passed data had ' + '{con} columns'.format(col=len(columns), + con=len(content))) # provide soft conversion of object dtypes def convert(arr): @@ -7585,4 +7599,4 @@ def _from_nested_dict(data): def _put_str(s, space): - return ('%s' % s)[:space].ljust(space) + return u'{s}'.format(s=s)[:space].ljust(space)
- [x] Progress towards #16130 - [x] tests added / passed - [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff
https://api.github.com/repos/pandas-dev/pandas/pulls/20461
2018-03-22T21:49:32Z
2018-04-17T10:31:43Z
2018-04-17T10:31:43Z
2018-04-17T10:48:55Z
ENH: fill_value argument for shift #15486
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cf41737a04ba6..64a793d7be05c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3619,9 +3619,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, method=method, axis=axis) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0): + def shift(self, periods=1, freq=None, axis=0, fill_value=np.nan): return super(DataFrame, self).shift(periods=periods, freq=freq, - axis=axis) + axis=axis, fill_value=fill_value) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bd1a2371315a0..24203e021a9be 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7539,15 +7539,21 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, """) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0): + def shift(self, periods=1, freq=None, axis=0, fill_value=np.nan): if periods == 0: return self block_axis = self._get_block_manager_axis(axis) + shift_kwargs = {'periods': periods, 'axis': block_axis} + if not is_categorical_dtype(self): + shift_kwargs['fill_value'] = fill_value if freq is None: - new_data = self._data.shift(periods=periods, axis=block_axis) + new_data = self._data.shift(**shift_kwargs) else: - return self.tshift(periods, freq) + tshift_kwargs = {'periods': periods, 'freq': freq} + if not is_categorical_dtype(self): + tshift_kwargs['fill_value'] = fill_value + return self.tshift(**tshift_kwargs) return self._constructor(new_data).__finalize__(self) @@ -7587,18 +7593,20 @@ def slice_shift(self, periods=1, axis=0): return new_obj.__finalize__(self) - def tshift(self, periods=1, freq=None, axis=0): + def tshift(self, periods=1, freq=None, axis=0, fill_value=np.nan): """ Shift the time index, using the index's frequency if available. Parameters ---------- periods : int - Number of periods to move, can be positive or negative + Number of periods to move, can be positive or negative. freq : DateOffset, timedelta, or time rule string, default None - Increment to use from the tseries module or time rule (e.g. 'EOM') + Increment to use from the tseries module or time rule (e.g. 'EOM'). axis : int or basestring - Corresponds to the axis that contains the Index + Corresponds to the axis that contains the Index. + fill_value : + Value to use to cover missing values. Notes ----- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a0e122d390240..59be34ad16133 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1279,12 +1279,12 @@ def diff(self, n, axis=1, mgr=None): new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] - def shift(self, periods, axis=0, mgr=None): + def shift(self, periods, axis=0, mgr=None, fill_value=np.nan): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also - new_values, fill_value = maybe_upcast(self.values) + new_values, fill_value = maybe_upcast(self.values, fill_value) # make sure array sent to np.roll is c_contiguous f_ordered = new_values.flags.f_contiguous @@ -2541,7 +2541,7 @@ def _try_coerce_result(self, result): return result - def shift(self, periods, axis=0, mgr=None): + def shift(self, periods, axis=0, mgr=None, fill_value=np.nan): return self.make_block_same_class(values=self.values.shift(periods), placement=self.mgr_locs) @@ -2879,7 +2879,7 @@ def _try_coerce_result(self, result): def _box_func(self): return lambda x: tslib.Timestamp(x, tz=self.dtype.tz) - def shift(self, periods, axis=0, mgr=None): + def shift(self, periods, axis=0, mgr=None, fill_value=np.nan): """ shift the block by periods """ # think about moving this to the DatetimeIndex. This is a non-freq @@ -3072,7 +3072,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None, return [self.make_block_same_class(values=values, placement=self.mgr_locs)] - def shift(self, periods, axis=0, mgr=None): + def shift(self, periods, axis=0, mgr=None, fill_value=np.nan): """ shift the block by periods """ N = len(self.values.T) indexer = np.zeros(N, dtype=int) diff --git a/pandas/core/series.py b/pandas/core/series.py index 3e3600898ba7f..d8de1d8963f53 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3358,8 +3358,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, axis=axis) @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0): - return super(Series, self).shift(periods=periods, freq=freq, axis=axis) + def shift(self, periods=1, freq=None, axis=0, fill_value=np.nan): + return super(Series, self).shift(periods=periods, freq=freq, axis=axis, + fill_value=fill_value) def reindex_axis(self, labels, axis=0, **kwargs): """Conform Series to new index with optional filling logic. diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index ceb6c942c81b1..c8ffde727aa94 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -308,6 +308,14 @@ def test_shift_bool(self): columns=['high', 'low']) assert_frame_equal(rs, xp) + def test_shift_bool_fillna(self): + df = DataFrame({'high': [True, False], + 'low': [False, False]}) + rs = df.shift(1, fill_value=True) + xp = DataFrame({'high': [True, True], + 'low': [True, False]}) + assert_frame_equal(rs, xp) + def test_shift_categorical(self): # GH 9416 s1 = pd.Series(['a', 'b', 'c'], dtype='category') diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 0e6e44e839464..c4c47ec9e7698 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1607,6 +1607,17 @@ def test_shift_int(self): expected = ts.astype(float).shift(1) assert_series_equal(shifted, expected) + def test_shift_fillna(self): + # ENH 15486 + ts = self.ts.astype(int) + fillval = 0 + shifted = ts.shift(1, fill_value=fillval) + # default behaviour adds nan so converts to floats + default = ts.shift(1) + default.iloc[0] = fillval + expected = default.astype(int) + assert_series_equal(shifted, expected) + def test_shift_categorical(self): # GH 9416 s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
This is a work in progress. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [X] closes #15486 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20460
2018-03-22T21:39:37Z
2018-10-11T02:00:30Z
null
2018-10-11T02:00:30Z
DOC: update the isna, isnull, notna and notnull docstring
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 01c88c269e7e0..2c8d229f9b0cb 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -29,23 +29,78 @@ def isna(obj): - """Detect missing values (NaN in numeric arrays, None/NaN in object arrays) + """ + Detect missing values for an array-like object. + + This function takes a scalar or array-like object and indictates + whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` + in object arrays, ``NaT`` in datetimelike). Parameters ---------- - arr : ndarray or object value - Object to check for null-ness + obj : scalar or array-like + Object to check for null or missing values. Returns ------- - isna : array-like of bool or bool - Array or bool indicating whether an object is null or if an array is - given which of the element is null. + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is missing. - See also + See Also + -------- + notna : boolean inverse of pandas.isna. + Series.isna : Detetct missing values in a Series. + DataFrame.isna : Detect missing values in a DataFrame. + Index.isna : Detect missing values in an Index. + + Examples -------- - pandas.notna: boolean inverse of pandas.isna - pandas.isnull: alias of isna + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.isna('dog') + False + + >>> pd.isna(np.nan) + True + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.isna(array) + array([[False, True, False], + [False, False, True]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.isna(index) + array([False, False, True, False]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.isna(df) + 0 1 2 + 0 False False False + 1 False True False + + >>> pd.isna(df[1]) + 0 False + 1 True + Name: 1, dtype: bool """ return _isna(obj) @@ -197,24 +252,78 @@ def _isna_ndarraylike_old(obj): def notna(obj): - """Replacement for numpy.isfinite / -numpy.isnan which is suitable for use - on object arrays. + """ + Detect non-missing values for an array-like object. + + This function takes a scalar or array-like object and indictates + whether values are valid (not missing, which is ``NaN`` in numeric + arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- - arr : ndarray or object value - Object to check for *not*-null-ness + obj : array-like or object value + Object to check for *not* null or *non*-missing values. Returns ------- - notisna : array-like of bool or bool - Array or bool indicating whether an object is *not* null or if an array - is given which of the element is *not* null. + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is valid. - See also + See Also + -------- + isna : boolean inverse of pandas.notna. + Series.notna : Detetct valid values in a Series. + DataFrame.notna : Detect valid values in a DataFrame. + Index.notna : Detect valid values in an Index. + + Examples -------- - pandas.isna : boolean inverse of pandas.notna - pandas.notnull : alias of notna + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.notna('dog') + True + + >>> pd.notna(np.nan) + False + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.notna(array) + array([[ True, False, True], + [ True, True, False]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.notna(index) + array([ True, True, False, True]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.notna(df) + 0 1 2 + 0 True True True + 1 True False True + + >>> pd.notna(df[1]) + 0 True + 1 False + Name: 1, dtype: bool """ res = isna(obj) if is_scalar(res):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` The validation script did not work on this docstring ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. The validation script did not work on this docstring
https://api.github.com/repos/pandas-dev/pandas/pulls/20459
2018-03-22T21:27:35Z
2018-03-26T20:12:21Z
2018-03-26T20:12:21Z
2018-03-26T20:12:24Z
DOC: update the pandas.Series.str.startswith docstring
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index b98fa106336fc..d6a67435aeb09 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -328,19 +328,54 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): def str_startswith(arr, pat, na=np.nan): """ - Return boolean Series/``array`` indicating whether each string in the - Series/Index starts with passed pattern. Equivalent to - :meth:`str.startswith`. + Test if the start of each string element matches a pattern. + + Equivalent to :meth:`str.startswith`. Parameters ---------- - pat : string - Character sequence - na : bool, default NaN + pat : str + Character sequence. Regular expressions are not accepted. + na : object, default NaN + Object shown if element tested is not a string. Returns ------- - startswith : Series/array of boolean values + Series or Index of bool + A Series of booleans indicating whether the given pattern matches + the start of each string element. + + See Also + -------- + str.startswith : Python standard library string method. + Series.str.endswith : Same as startswith, but tests the end of string. + Series.str.contains : Tests if string element contains a pattern. + + Examples + -------- + >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan]) + >>> s + 0 bat + 1 Bear + 2 cat + 3 NaN + dtype: object + + >>> s.str.startswith('b') + 0 True + 1 False + 2 False + 3 NaN + dtype: object + + Specifying `na` to be `False` instead of `NaN`. + + >>> s.str.startswith('b', na=False) + 0 True + 1 False + 2 False + 3 False + dtype: bool """ f = lambda x: x.startswith(pat) return _na_map(f, arr, na, dtype=bool)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################### Docstring (pandas.Series.str.startswith) ################### ################################################################################ Test if the start of each string element matches a pattern. Return a Series of booleans indicating whether the given pattern matches the start of each string element. Equivalent to :meth: `str.startswith`. Parameters ---------- pat : string Character sequence. na : object, default NaN Character sequence shown if element tested is not a string. Returns ------- startswith : Series/array of boolean values Examples -------- >>> s = pd.Series(['bat', 'bear', 'cat']) >>> s.str.startswith('b') 0 True 1 True 2 False dtype: bool >>> s = pd.Series(['bat', 'bear', 'cat', np.nan]) >>> s.str.startswith('b', na='not_a_string') 0 True 1 True 2 False 3 not_a_string dtype: object See Also -------- endswith : same as startswith, but tests the end of string ################################################################################ ################################## Validation ################################## ################################################################################ ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20458
2018-03-22T21:21:21Z
2018-03-25T22:49:17Z
2018-03-25T22:49:17Z
2018-03-26T06:26:16Z
CLN: Removed dead GroupBy code
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 4352a001aa989..7b68ad67675ff 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -944,23 +944,6 @@ def _cumcount_array(self, ascending=True): rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False) - def _index_with_as_index(self, b): - """ - Take boolean mask of index to be returned from apply, if as_index=True - - """ - # TODO perf, it feels like this should already be somewhere... - from itertools import chain - original = self._selected_obj.index - gp = self.grouper - levels = chain((gp.levels[i][gp.labels[i][b]] - for i in range(len(gp.groupings))), - (original._get_level_values(i)[b] - for i in range(original.nlevels))) - new = MultiIndex.from_arrays(list(levels)) - new.names = gp.names + original.names - return new - def _try_cast(self, result, obj, numeric_only=False): """ try to cast the result to our obj original type, @@ -2295,18 +2278,6 @@ def size(self): index=self.result_index, dtype='int64') - @cache_readonly - def _max_groupsize(self): - """ - Compute size of largest group - """ - # For many items in each group this is much faster than - # self.size().max(), in worst case marginally slower - if self.indices: - return max(len(v) for v in self.indices.values()) - else: - return 0 - @cache_readonly def groups(self): """ dict {group name -> group labels} """ @@ -2941,9 +2912,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, if isinstance(grouper, MultiIndex): self.grouper = grouper.values - # pre-computed - self._should_compress = True - # we have a single grouper which may be a myriad of things, # some of which are dependent on the passing in level @@ -4964,10 +4932,6 @@ def _wrap_aggregated_output(self, output, names=None): raise com.AbstractMethodError(self) -class NDArrayGroupBy(GroupBy): - pass - - # ---------------------------------------------------------------------- # Splitting / application @@ -5020,10 +4984,6 @@ def apply(self, f): raise com.AbstractMethodError(self) -class ArraySplitter(DataSplitter): - pass - - class SeriesSplitter(DataSplitter): def _chop(self, sdata, slice_obj):
- [X] closes #20456 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20457
2018-03-22T18:45:18Z
2018-03-22T22:48:48Z
2018-03-22T22:48:47Z
2018-12-25T06:12:49Z
DOC: update the pandas.Series.map docstring
diff --git a/pandas/core/series.py b/pandas/core/series.py index df0fa1c6c0659..2e6270e8739ae 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2951,74 +2951,25 @@ def unstack(self, level=-1, fill_value=None): def map(self, arg, na_action=None): """ - Map values of Series using input correspondence (a dict, Series, or - function). + Map values of Series according to input correspondence. + + Used for substituting each value in a Series with another value, + that may be derived from a function, a ``dict`` or + a :class:`Series`. Parameters ---------- arg : function, dict, or Series Mapping correspondence. - na_action : {None, 'ignore'} - If 'ignore', propagate NA values, without passing them to the + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- - y : Series + Series Same index as caller. - Examples - -------- - - Map inputs to outputs (both of type `Series`): - - >>> x = pd.Series([1,2,3], index=['one', 'two', 'three']) - >>> x - one 1 - two 2 - three 3 - dtype: int64 - - >>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3]) - >>> y - 1 foo - 2 bar - 3 baz - - >>> x.map(y) - one foo - two bar - three baz - - If `arg` is a dictionary, return a new Series with values converted - according to the dictionary's mapping: - - >>> z = {1: 'A', 2: 'B', 3: 'C'} - - >>> x.map(z) - one A - two B - three C - - Use na_action to control whether NA values are affected by the mapping - function. - - >>> s = pd.Series([1, 2, 3, np.nan]) - - >>> s2 = s.map('this is a string {}'.format, na_action=None) - 0 this is a string 1.0 - 1 this is a string 2.0 - 2 this is a string 3.0 - 3 this is a string nan - dtype: object - - >>> s3 = s.map('this is a string {}'.format, na_action='ignore') - 0 this is a string 1.0 - 1 this is a string 2.0 - 2 this is a string 3.0 - 3 NaN - dtype: object - See Also -------- Series.apply : For applying more complex functions on a Series. @@ -3027,20 +2978,51 @@ def map(self, arg, na_action=None): Notes ----- - When `arg` is a dictionary, values in Series that are not in the + When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used - rather than ``NaN``: - - >>> from collections import Counter - >>> counter = Counter() - >>> counter['bar'] += 1 - >>> y.map(counter) - 1 0 - 2 1 - 3 0 - dtype: int64 + rather than ``NaN``. + + Examples + -------- + >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) + >>> s + 0 cat + 1 dog + 2 NaN + 3 rabbit + dtype: object + + ``map`` accepts a ``dict`` or a ``Series``. Values that are not found + in the ``dict`` are converted to ``NaN``, unless the dict has a default + value (e.g. ``defaultdict``): + + >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) + 0 kitten + 1 puppy + 2 NaN + 3 NaN + dtype: object + + It also accepts a function: + + >>> s.map('I am a {}'.format) + 0 I am a cat + 1 I am a dog + 2 I am a nan + 3 I am a rabbit + dtype: object + + To avoid applying the function to missing values (and keep them as + ``NaN``) ``na_action='ignore'`` can be used: + + >>> s.map('I am a {}'.format, na_action='ignore') + 0 I am a cat + 1 I am a dog + 2 NaN + 3 I am a rabbit + dtype: object """ new_values = super(Series, self)._map_values( arg, na_action=na_action)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ######################## Docstring (pandas.Series.map) ######################## ################################################################################ Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`pandas.Series`. Parameters ---------- arg : function, dict, or Seriess Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- y : Series Same index as caller. Examples -------- Map inputs to outputs (both of type :class:`pandas.Series`): >>> x = pd.Series([1,2,3], index=['one', 'two', 'three']) >>> x one 1 two 2 three 3 dtype: int64 >>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3]) >>> y 1 foo 2 bar 3 baz dtype: object >>> x.map(y) one foo two bar three baz dtype: object Map a function to a :class:`pandas.Series`. >>> x.map(lambda x: x**2) one 1 two 4 three 9 dtype: int64 If ``arg`` is a dictionary, return a new :class:`pandas.Series` with values converted according to the dictionary's mapping: >>> z = {1: 'A', 2: 'B', 3: 'C'} >>> x.map(z) one A two B three C dtype: object Use ``na_action`` to control whether NA values are affected by the mapping function. >>> s = pd.Series([1, 2, 3, np.nan]) >>> s.map('this is a string {}'.format, na_action=None) 0 this is a string 1.0 1 this is a string 2.0 2 this is a string 3.0 3 this is a string nan dtype: object >>> s.map('this is a string {}'.format, na_action='ignore') 0 this is a string 1.0 1 this is a string 2.0 2 this is a string 3.0 3 NaN dtype: object See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When `arg` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``: >>> from collections import Counter >>> counter = Counter() >>> counter['bar'] += 1 >>> y.map(counter) 1 0 2 1 3 0 dtype: int64 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.map" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20450
2018-03-22T12:40:32Z
2018-08-18T22:22:28Z
2018-08-18T22:22:28Z
2018-08-19T10:50:51Z
DOC: general docstring formatting fixes
diff --git a/pandas/core/base.py b/pandas/core/base.py index f686975366419..b3eb9a0ae7530 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -855,6 +855,7 @@ def min(self): 'a' For a MultiIndex, the minimum is determined lexicographically. + >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 081a8b39a3849..cf41737a04ba6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5465,8 +5465,6 @@ def _gotitem(self, key, ndim, subset=None): return self[key] _agg_doc = dedent(""" - Notes - ----- The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 74b760fa4e3c4..bd1a2371315a0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1910,20 +1910,20 @@ def to_hdf(self, path_or_buf, key, **kwargs): Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - + - 'w': write, a new file is created (an existing file with - the same name would be deleted). + the same name would be deleted). - 'a': append, an existing file is opened for reading and - writing, and if the file does not exist it is created. + writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. format : {'fixed', 'table'}, default 'fixed' Possible values: - + - 'fixed': Fixed format. Fast writing/reading. Not-appendable, - nor searchable. + nor searchable. - 'table': Table format. Write as a PyTables Table structure - which may perform worse but allow more flexible operations - like searching / selecting subsets of the data. + which may perform worse but allow more flexible operations + like searching / selecting subsets of the data. append : bool, default False For Table formats, append the input data to the existing. data_columns : list of columns or True, optional @@ -5795,10 +5795,11 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, * None: (default) no fill restriction * 'inside' Only fill NaNs surrounded by valid values (interpolate). * 'outside' Only fill NaNs outside valid values (extrapolate). - .. versionadded:: 0.21.0 If limit is specified, consecutive NaNs will be filled in this direction. + + .. versionadded:: 0.21.0 inplace : bool, default False Update the NDFrame in place if possible. downcast : optional, 'infer' or None, defaults to None @@ -7717,6 +7718,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): The index values in ``truncate`` can be datetimes or string dates. + >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() @@ -7960,7 +7962,7 @@ def abs(self): 0 1 days dtype: timedelta64[ns] - Select rows with data closest to certian value using argsort (from + Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 95bfc8bfcb5c5..40f543e211f0c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,6 +1,7 @@ from datetime import datetime, timedelta import warnings import operator +from textwrap import dedent import numpy as np from pandas._libs import (lib, index as libindex, tslib as libts, @@ -2183,7 +2184,7 @@ def isna(self): mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values - (unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`). + (unless you set ``pandas.options.mode.use_inf_as_na = True``). .. versionadded:: 0.20.0 @@ -4700,7 +4701,7 @@ def _add_logical_methods(cls): %(outname)s : bool or array_like (if axis is specified) A single element array_like may be converted to bool.""" - _index_shared_docs['index_all'] = """ + _index_shared_docs['index_all'] = dedent(""" See Also -------- @@ -4738,9 +4739,9 @@ def _add_logical_methods(cls): >>> pd.Index([0, 0, 0]).any() False - """ + """) - _index_shared_docs['index_any'] = """ + _index_shared_docs['index_any'] = dedent(""" See Also -------- @@ -4761,7 +4762,7 @@ def _add_logical_methods(cls): >>> index = pd.Index([0, 0, 0]) >>> index.any() False - """ + """) def _make_logical_function(name, desc, f): @Substitution(outname=name, desc=desc) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e9011a3eb912c..b906ea0f4784c 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -86,16 +86,12 @@ def strftime(self, date_format): Examples -------- - >>> import datetime - >>> data = pd.date_range(datetime.datetime(2018,3,10,19,27,52), - ... periods=4, freq='B') - >>> df = pd.DataFrame(data, columns=['date']) - >>> df.date[1] - Timestamp('2018-03-13 19:27:52') - >>> df.date[1].strftime('%d-%m-%Y') - '13-03-2018' - >>> df.date[1].strftime('%B %d, %Y, %r') - 'March 13, 2018, 07:27:52 PM' + >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), + ... periods=3, freq='s') + >>> rng.strftime('%B %d, %Y, %r') + Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', + 'March 10, 2018, 09:00:02 AM'], + dtype='object') """.format("https://docs.python.org/3/library/datetime.html" "#strftime-and-strptime-behavior") diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index be28f7091712f..118198ea0320d 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -51,7 +51,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` - indicate (1,2], (2,3], (3,4]. This argument is ignored when + indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as
Some general formatting fixes (+ fix example in DatetimeIndex.strftime to actually use that method and not Timestamp.strftime). Merging directly as it contains some PEP8 issues I merged in master.
https://api.github.com/repos/pandas-dev/pandas/pulls/20449
2018-03-22T10:32:17Z
2018-03-22T10:34:27Z
2018-03-22T10:34:27Z
2018-03-22T10:34:30Z
scatter plot and hexbin plot lose x-axis when colorbar is included.
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 0ca5b9cdf1d57..68f634bd5f85f 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -304,7 +304,7 @@ I/O Plotting ^^^^^^^^ -- +- Bug in :func:'DataFrame.plot.scatter' and :func:'DataFrame.plot.hexbin' caused x-axis label and ticklabels to disappear when colorbar was on in IPython inline backend (:issue:`10611` and :issue:`10678`) - - diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 8c713548d1ede..8c2ee90014302 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -833,6 +833,32 @@ def _post_plot_logic(self, ax, data): ax.set_ylabel(pprint_thing(y)) ax.set_xlabel(pprint_thing(x)) + def _plot_colorbar(self, ax, **kwds): + # Addresses issues #10611 and #10678: + # When plotting scatterplots and hexbinplots in IPython + # inline backend the colorbar axis height tends not to + # exactly match the parent axis height. + # The difference is due to small fractional differences + # in floating points with similar representation. + # To deal with this, this method forces the colorbar + # height to take the height of the parent axes. + # For a more detailed description of the issue + # see the following link: + # https://github.com/ipython/ipython/issues/11215 + + img = ax.collections[0] + cbar = self.fig.colorbar(img, **kwds) + points = ax.get_position().get_points() + cbar_points = cbar.ax.get_position().get_points() + cbar.ax.set_position([cbar_points[0, 0], + points[0, 1], + cbar_points[1, 0] - cbar_points[0, 0], + points[1, 1] - points[0, 1]]) + # To see the discrepancy in axis heights uncomment + # the following two lines: + # print(points[1, 1] - points[0, 1]) + # print(cbar_points[1, 1] - cbar_points[0, 1]) + class ScatterPlot(PlanePlot): _kind = 'scatter' @@ -878,11 +904,9 @@ def _make_plot(self): scatter = ax.scatter(data[x].values, data[y].values, c=c_values, label=label, cmap=cmap, **self.kwds) if cb: - img = ax.collections[0] - kws = dict(ax=ax) if self.mpl_ge_1_3_1(): - kws['label'] = c if c_is_column else '' - self.fig.colorbar(img, **kws) + cbar_label = c if c_is_column else '' + self._plot_colorbar(ax, label=cbar_label) if label is not None: self._add_legend_handle(scatter, label) @@ -923,8 +947,7 @@ def _make_plot(self): ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds) if cb: - img = ax.collections[0] - self.fig.colorbar(img, ax=ax) + self._plot_colorbar(ax) def _make_legend(self): pass diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 101713b06df8c..8ef0cf7154b88 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1089,6 +1089,49 @@ def test_plot_scatter(self): axes = df.plot(x='x', y='y', kind='scatter', subplots=True) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + @pytest.mark.slow + def test_if_scatterplot_colorbar_affects_xaxis_visibility(self): + # addressing issue #10611, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.random((1000, 3)) + df = pd.DataFrame(random_array, + columns=['A label', 'B label', 'C label']) + + ax1 = df.plot.scatter(x='A label', y='B label') + ax2 = df.plot.scatter(x='A label', y='B label', c='C label') + + vis1 = [vis.get_visible() for vis in + ax1.xaxis.get_minorticklabels()] + vis2 = [vis.get_visible() for vis in + ax2.xaxis.get_minorticklabels()] + assert vis1 == vis2 + + vis1 = [vis.get_visible() for vis in + ax1.xaxis.get_majorticklabels()] + vis2 = [vis.get_visible() for vis in + ax2.xaxis.get_majorticklabels()] + assert vis1 == vis2 + + assert (ax1.xaxis.get_label().get_visible() == + ax2.xaxis.get_label().get_visible()) + + @pytest.mark.slow + def test_if_hexbin_xaxis_label_is_visible(self): + # addressing issue #10678, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.random((1000, 3)) + df = pd.DataFrame(random_array, + columns=['A label', 'B label', 'C label']) + + ax = df.plot.hexbin('A label', 'B label', gridsize=12) + assert all([vis.get_visible() for vis in + ax.xaxis.get_minorticklabels()]) + assert all([vis.get_visible() for vis in + ax.xaxis.get_majorticklabels()]) + assert ax.xaxis.get_label().get_visible() + @pytest.mark.slow def test_plot_scatter_with_categorical_data(self): # GH 16199
closes https://github.com/pandas-dev/pandas/issues/10611 closes https://github.com/pandas-dev/pandas/issues/10678. The x-axis for scatter plot and hexbin plot disappears when colorbar is included. This seems to be because colorbar axis is looped through in `_handle_shared_axes`: https://github.com/pandas-dev/pandas/blob/8a5830305d2d9a087fbe46bd968218104ffdfc49/pandas/plotting/_core.py#L426 after discussing with @TomAugspurger (issue https://github.com/pandas-dev/pandas/issues/10611), we decided to try adding the attribute `__is_pandas_colorbar` to colorbar axis object and skipping it during handling of shared axes. I've done some tests that seem to fix the issue. But we may need more tests: ``` %matplotlib inline import matplotlib.pylab as pl from mypandas import pandas as pd import numpy as np random_array = np.random.random((1000,3)) df = pd.DataFrame(random_array,columns=['A label','B label','C label']) df.plot.scatter('A label','B label',c='C label', PATCH_MODE_FLAG = False);pl.title('pandas current version'); df.plot.scatter('A label','B label',c='C label', PATCH_MODE_FLAG = True);pl.title('patch fixing x-axis'); df.plot.hexbin('A label','B label', gridsize=25, PATCH_MODE_FLAG = False);pl.title('pandas current version'); df.plot.hexbin('A label','B label', gridsize=25, PATCH_MODE_FLAG = True);pl.title('patch fixing x-axis'); ``` ![image](https://user-images.githubusercontent.com/26352146/37747996-49642266-2d58-11e8-8c6f-c382267e79db.png) ![image](https://user-images.githubusercontent.com/26352146/37747999-4d88d986-2d58-11e8-8445-f1348c35d892.png) ![image](https://user-images.githubusercontent.com/26352146/37748001-5109d93e-2d58-11e8-9f6f-73a68329f731.png) ![image](https://user-images.githubusercontent.com/26352146/37748005-54beb590-2d58-11e8-8233-4e26b135fdad.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/20446
2018-03-22T02:47:27Z
2018-07-04T12:59:42Z
2018-07-04T12:59:41Z
2018-07-04T13:01:54Z
API: Preserve int columns in to_dict('index')
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1d60febe29b4a..c9653f3b7a3bd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -715,6 +715,7 @@ Other API Changes - :func:`Series.str.replace` now takes an optional `regex` keyword which, when set to ``False``, uses literal string replacement rather than regex replacement (:issue:`16808`) - :func:`DatetimeIndex.strftime` and :func:`PeriodIndex.strftime` now return an ``Index`` instead of a numpy array to be consistent with similar accessors (:issue:`20127`) - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). +- :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cf41737a04ba6..d3eda29eceac8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1102,7 +1102,8 @@ def to_dict(self, orient='dict', into=dict): for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): - return into_c((k, v.to_dict(into)) for k, v in self.iterrows()) + return into_c((t[0], dict(zip(self.columns, t[1:]))) + for t in self.itertuples()) else: raise ValueError("orient '%s' not understood" % orient) diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 024de8bc13f72..82dadacd5b1ac 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -5,6 +5,7 @@ import pytest import pytz import collections +from collections import OrderedDict, defaultdict import numpy as np from pandas import compat @@ -288,3 +289,29 @@ def test_frame_to_dict_tz(self): ] tm.assert_dict_equal(result[0], expected[0]) tm.assert_dict_equal(result[1], expected[1]) + + @pytest.mark.parametrize('into, expected', [ + (dict, {0: {'int_col': 1, 'float_col': 1.0}, + 1: {'int_col': 2, 'float_col': 2.0}, + 2: {'int_col': 3, 'float_col': 3.0}}), + (OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}), + (1, {'int_col': 2, 'float_col': 2.0}), + (2, {'int_col': 3, 'float_col': 3.0})])), + (defaultdict(list), defaultdict(list, + {0: {'int_col': 1, 'float_col': 1.0}, + 1: {'int_col': 2, 'float_col': 2.0}, + 2: {'int_col': 3, 'float_col': 3.0}})) + ]) + def test_to_dict_index_dtypes(self, into, expected): + # GH 18580 + # When using to_dict(orient='index') on a dataframe with int + # and float columns only the int columns were cast to float + + df = DataFrame({'int_col': [1, 2, 3], + 'float_col': [1.0, 2.0, 3.0]}) + + result = df.to_dict(orient='index', into=into) + cols = ['int_col', 'float_col'] + result = DataFrame.from_dict(result, orient='index')[cols] + expected = DataFrame.from_dict(expected, orient='index')[cols] + tm.assert_frame_equal(result, expected)
- [x] closes #18580 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20444
2018-03-21T22:58:59Z
2018-03-25T14:10:27Z
2018-03-25T14:10:27Z
2018-03-25T16:09:09Z