title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: Refactor open into context manager in io tests
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index cfac77291803d..0b1c1ca178762 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -128,9 +128,8 @@ def test_string_io(self): with ensure_clean(self.path) as p: s = df.to_msgpack() - fh = open(p, 'wb') - fh.write(s) - fh.close() + with open(p, 'wb') as fh: + fh.write(s) result = read_msgpack(p) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 0b0d4334c86a3..e369dfda6deac 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -75,9 +75,8 @@ def test_from_csv(self): series_h = self.read_csv(path, header=0) assert series_h.name == "series" - outfile = open(path, "w") - outfile.write("1998-01-01|1.0\n1999-01-01|2.0") - outfile.close() + with open(path, "w") as outfile: + outfile.write("1998-01-01|1.0\n1999-01-01|2.0") series = self.read_csv(path, sep="|") check_series = Series({datetime(1998, 1, 1): 1.0,
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Building on #21105 by @WillAyd, I cleaned up some more `open` statements.
https://api.github.com/repos/pandas-dev/pandas/pulls/21139
2018-05-19T23:21:01Z
2018-05-21T10:37:53Z
2018-05-21T10:37:53Z
2018-05-21T12:00:47Z
DOC: Improve the docstring of Timedelta.delta redux
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..3f0b4db87e5ed 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -760,7 +760,32 @@ cdef class _Timedelta(timedelta): @property def delta(self): - """ return out delta in ns (for internal compat) """ + """ + Return the timedelta in nanoseconds (ns), for internal compatibility. + + Returns + ------- + int + Timedelta in nanoseconds. + + Examples + -------- + >>> td = pd.Timedelta('1 days 42 ns') + >>> td.delta + 86400000000042 + + >>> td = pd.Timedelta('3 s') + >>> td.delta + 3000000000 + + >>> td = pd.Timedelta('3 ms 5 us') + >>> td.delta + 3005000 + + >>> td = pd.Timedelta(42, unit='ns') + >>> td.delta + 42 + """ return self.value @property
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry rather than trying to pick and choose items from my previous PR #21135 (which somehow had a number of other items sucked into it) ... gonna start from scratch here and close out the old PR. tried to incorporate changes noted in previous conversations with @WillAyd and @jreback (thanks for the guidance!) ``` ################################################################################ ###################### Docstring (pandas.Timedelta.delta) ###################### ################################################################################ Return the timedelta in nanoseconds (ns), for internal compatibility. Returns ------- int Timedelta in nanoseconds. Examples -------- >>> td = pd.Timedelta('1 days 42 ns') >>> td.delta 86400000000042 >>> td = pd.Timedelta('3 s') >>> td.delta 3000000000 >>> td = pd.Timedelta('3 ms 5 us') >>> td.delta 3005000 >>> td = pd.Timedelta(42, unit='ns') >>> td.delta 42 ################################################################################ ################################## Validation ################################## ################################################################################ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21138
2018-05-19T20:12:03Z
2018-05-21T11:00:05Z
2018-05-21T11:00:04Z
2018-06-08T17:11:44Z
BUG: Should not raise error in concatenating Series with numpy scalar and tuple names (GH21015)
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..5973ad2ebf43f 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -82,7 +82,7 @@ Plotting Reshaping ^^^^^^^^^ -- +- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - Categorical diff --git a/pandas/core/common.py b/pandas/core/common.py index b9182bfd2cbe2..1de8269c9a0c6 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -55,8 +55,11 @@ def flatten(l): def _consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: - if obj.name != name: - return None + try: + if obj.name != name: + name = None + except ValueError: + name = None return name diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index f5e58fa70e1c4..dea305d4b3fee 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2487,3 +2487,14 @@ def test_concat_aligned_sort_does_not_raise(): columns=[1, 'a']) result = pd.concat([df, df], ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("s1name,s2name", [ + (np.int64(190), (43, 0)), (190, (43, 0))]) +def test_concat_series_name_npscalar_tuple(s1name, s2name): + # GH21015 + s1 = pd.Series({'a': 1, 'b': 2}, name=s1name) + s2 = pd.Series({'c': 5, 'd': 6}, name=s2name) + result = pd.concat([s1, s2]) + expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6}) + tm.assert_series_equal(result, expected)
- [x] closes #21015 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21132
2018-05-19T13:26:57Z
2018-05-21T10:41:20Z
2018-05-21T10:41:20Z
2018-06-08T17:11:01Z
Spell check
diff --git a/pandas/core/base.py b/pandas/core/base.py index aa051c6f5eaef..c331ead8d2fef 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -114,7 +114,7 @@ def _reset_cache(self, key=None): def __sizeof__(self): """ - Generates the total memory usage for a object that returns + Generates the total memory usage for an object that returns either a value or Series of values """ if hasattr(self, 'memory_usage'):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21130
2018-05-19T04:24:56Z
2018-05-19T17:46:10Z
2018-05-19T17:46:10Z
2018-06-08T17:09:19Z
DEPR: Add deprecated index attribute names to deprecation list
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..c8a2076064c02 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -44,6 +44,8 @@ Documentation Changes Bug Fixes ~~~~~~~~~ +- tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) + Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c638b9e4ea117..7a853d575aa69 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -12,7 +12,8 @@ class DirNamesMixin(object): _accessors = frozenset([]) - _deprecations = frozenset(['asobject']) + _deprecations = frozenset( + ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f4fa547574b9e..1e4dd2921b3f5 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2088,6 +2088,17 @@ def test_get_duplicates_deprecated(self): with tm.assert_produces_warning(FutureWarning): index.get_duplicates() + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip('IPython', minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; idx = pd.Index([1, 2])" + ip.run_code(code) + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('idx.', 4)) + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ
In ipython, when you press tab (e.g. ``idx.<tab>``) a long list of deprecations shows up: ``` C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.base is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.data is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.flags is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.itemsize is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.strides is deprecated and will be removed in a future version getattr(obj, name) ``` With this PR we avoid getting that list of deprecations in iPython. I'm not sure if/where this should go in the whatsnew document.
https://api.github.com/repos/pandas-dev/pandas/pulls/21125
2018-05-18T20:57:34Z
2018-05-21T10:44:49Z
2018-05-21T10:44:49Z
2018-10-27T08:16:31Z
Resolution docstring
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 76849f2116123..a76ebc8000e54 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -834,7 +834,46 @@ cdef class _Timedelta(timedelta): @property def resolution(self): - """ return a string representing the lowest resolution that we have """ + """ + Return a string representing the lowest timedelta resolution. + + Each timedelta has a defined resolution that represents the lowest OR + most granular level of precision. Each level of resolution is + represented by a short string as defined below: + + Resolution: Return value + + * Days: 'D' + * Hours: 'H' + * Minutes: 'T' + * Seconds: 'S' + * Milliseconds: 'L' + * Microseconds: 'U' + * Nanoseconds: 'N' + + Returns + ------- + str + Timedelta resolution. + + Examples + -------- + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.resolution + 'N' + + >>> td = pd.Timedelta('1 days 2 min 3 us') + >>> td.resolution + 'U' + + >>> td = pd.Timedelta('2 min 3 s') + >>> td.resolution + 'S' + + >>> td = pd.Timedelta(36, unit='us') + >>> td.resolution + 'U' + """ self._ensure_components() if self._ns:
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ################### Docstring (pandas.Timedelta.resolution) ################### ################################################################################ Return a string representing the lowest (i.e. smallest) time resolution. Each timedelta has a defined resolution that represents the lowest OR most granular level of precision. Each level of resolution is represented by a short string as defined below: ============ ============ Resolution Return value ============ ============ Days ``'D'`` Hours ``'H'`` Minutes ``'T'`` Seconds ``'S'`` Milliseconds ``'L'`` Microseconds ``'U'`` Nanoseconds ``'N'`` ============ ============ Returns ------- str Time resolution. Examples -------- **Using string input** >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') >>> td.resolution 'N' >>> td = pd.Timedelta('1 days 2 min 3 us') >>> td.resolution 'U' >>> td = pd.Timedelta('2 min 3 s') >>> td.resolution 'S' **Using integer input** >>> td = pd.Timedelta(36, unit='us') >>> td.resolution 'U' ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21122
2018-05-18T16:16:56Z
2018-07-08T12:58:40Z
2018-07-08T12:58:40Z
2018-07-08T12:58:49Z
Idx droplevel
diff --git a/doc/source/api.rst b/doc/source/api.rst index d00e5511f1100..4faec93490fde 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1459,7 +1459,6 @@ Modifying and Computations Index.is_floating Index.is_integer Index.is_interval - Index.is_lexsorted_for_tuple Index.is_mixed Index.is_numeric Index.is_object @@ -1471,11 +1470,19 @@ Modifying and Computations Index.where Index.take Index.putmask - Index.set_names Index.unique Index.nunique Index.value_counts +Compatibility with MultiIndex +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Index.set_names + Index.is_lexsorted_for_tuple + Index.droplevel + Missing Values ~~~~~~~~~~~~~~ .. autosummary:: diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..0071f315851df 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -65,6 +65,7 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) +- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) - I/O diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 77a67c048a48d..0986ed289e603 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4096,9 +4096,8 @@ def _maybe_casted_values(index, labels=None): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if isinstance(self.index, MultiIndex): - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df39eb5fd8312..f79288c167356 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3158,6 +3158,60 @@ def _get_level_values(self, level): get_level_values = _get_level_values + def droplevel(self, level=0): + """ + Return index with requested level(s) removed. If resulting index has + only 1 level left, the result will be of Index type, not MultiIndex. + + .. versionadded:: 0.23.1 (support for non-MultiIndex) + + Parameters + ---------- + level : int, str, or list-like, default 0 + If a string is given, must be the name of a level + If list-like, elements must be names or indexes of levels. + + Returns + ------- + index : Index or MultiIndex + """ + if not isinstance(level, (tuple, list)): + level = [level] + + levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] + + if len(level) == 0: + return self + if len(level) >= self.nlevels: + raise ValueError("Cannot remove {} levels from an index with {} " + "levels: at least one level must be " + "left.".format(len(level), self.nlevels)) + # The two checks above guarantee that here self is a MultiIndex + + new_levels = list(self.levels) + new_labels = list(self.labels) + new_names = list(self.names) + + for i in levnums: + new_levels.pop(i) + new_labels.pop(i) + new_names.pop(i) + + if len(new_levels) == 1: + + # set nan if needed + mask = new_labels[0] == -1 + result = new_levels[0].take(new_labels[0]) + if mask.any(): + result = result.putmask(mask, np.nan) + + result.name = new_names[0] + return result + else: + from .multi import MultiIndex + return MultiIndex(levels=new_levels, labels=new_labels, + names=new_names, verify_integrity=False) + _index_shared_docs['get_indexer'] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..ea0fab7e17648 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1761,52 +1761,6 @@ def _drop_from_level(self, labels, level): return self[mask] - def droplevel(self, level=0): - """ - Return Index with requested level removed. If MultiIndex has only 2 - levels, the result will be of Index type not MultiIndex. - - Parameters - ---------- - level : int/level name or list thereof - - Notes - ----- - Does not check if result index is unique or not - - Returns - ------- - index : Index or MultiIndex - """ - levels = level - if not isinstance(levels, (tuple, list)): - levels = [level] - - new_levels = list(self.levels) - new_labels = list(self.labels) - new_names = list(self.names) - - levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1] - - for i in levnums: - new_levels.pop(i) - new_labels.pop(i) - new_names.pop(i) - - if len(new_levels) == 1: - - # set nan if needed - mask = new_labels[0] == -1 - result = new_levels[0].take(new_labels[0]) - if mask.any(): - result = result.putmask(mask, np.nan) - - result.name = new_names[0] - return result - else: - return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names, verify_integrity=False) - def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. diff --git a/pandas/core/series.py b/pandas/core/series.py index 6d396e845219e..7947ce576dc6f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1199,9 +1199,8 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if isinstance(self.index, MultiIndex): - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index @@ -3177,7 +3176,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): - f = lambda x: func(x, *args, **kwds) + def f(x): + return func(x, *args, **kwds) else: f = func diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f4fa547574b9e..7fc00ed8f5411 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -245,6 +245,25 @@ def test_constructor_int_dtype_nan(self): result = Index(data, dtype='float') tm.assert_index_equal(result, expected) + def test_droplevel(self, indices): + # GH 21115 + if isinstance(indices, MultiIndex): + # Tested separately in test_multi.py + return + + assert indices.droplevel([]).equals(indices) + + for level in indices.name, [indices.name]: + if isinstance(indices.name, tuple) and level is indices.name: + # GH 21121 : droplevel with tuple name + continue + with pytest.raises(ValueError): + indices.droplevel(level) + + for level in 'wrong', ['wrong']: + with pytest.raises(KeyError): + indices.droplevel(level) + @pytest.mark.parametrize("dtype", ['int64', 'uint64']) def test_constructor_int_dtype_nan_raises(self, dtype): # see gh-15187 diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 37f70090c179f..c9f6bc9151d00 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -99,7 +99,8 @@ def test_where_array_like(self): cond = [False, True] for klass in klasses: - f = lambda: i.where(klass(cond)) + def f(): + return i.where(klass(cond)) pytest.raises(NotImplementedError, f) def test_repeat(self): @@ -2078,7 +2079,7 @@ def test_droplevel_with_names(self): expected = index.droplevel(1) assert dropped.equals(expected) - def test_droplevel_multiple(self): + def test_droplevel_list(self): index = MultiIndex( levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( @@ -2089,6 +2090,16 @@ def test_droplevel_multiple(self): expected = index[:2].droplevel(2).droplevel(0) assert dropped.equals(expected) + dropped = index[:2].droplevel([]) + expected = index[:2] + assert dropped.equals(expected) + + with pytest.raises(ValueError): + index[:2].droplevel(['one', 'two', 'three']) + + with pytest.raises(KeyError): + index[:2].droplevel(['one', 'four']) + def test_drop_not_lexsorted(self): # GH 12078 @@ -2405,7 +2416,8 @@ def check(nlevels, with_nulls): # with a dup if with_nulls: - f = lambda a: np.insert(a, 1000, a[0]) + def f(a): + return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) index = MultiIndex(levels=levels, labels=labels) else:
- [x] closes #21115 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Collateral change: if ``mi`` is a ``MultiIndex``, ``mi.droplevel([])`` will now return ``mi`` itself, not a copy of it. Given that these are immutable objects, seems OK to me - but if a copy is preferred, the change is trivial.
https://api.github.com/repos/pandas-dev/pandas/pulls/21116
2018-05-18T08:18:02Z
2018-05-21T23:13:49Z
2018-05-21T23:13:49Z
2018-06-29T08:41:52Z
DOC: Add linspace range behavior to the timeseries/timedeltas/interval docs
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c81842d3d9212..ec517d3e07bdf 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -924,6 +924,55 @@ bins, with ``NaN`` representing a missing value similar to other dtypes. pd.cut([0, 3, 5, 1], bins=c.categories) + +Generating Ranges of Intervals +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If we need intervals on a regular frequency, we can use the :func:`interval_range` function +to create an ``IntervalIndex`` using various combinations of ``start``, ``end``, and ``periods``. +The default frequency for ``interval_range`` is a 1 for numeric intervals, and calendar day for +datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, end=5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4) + + pd.interval_range(end=pd.Timedelta('3 days'), periods=3) + +The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety +of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, periods=5, freq=1.5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W') + + pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H') + +Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals +are closed on. Intervals are closed on the right side by default. + +.. ipython:: python + + pd.interval_range(start=0, end=4, closed='both') + + pd.interval_range(start=0, end=4, closed='neither') + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``IntervalIndex``: + +.. ipython:: python + + pd.interval_range(start=0, end=6, periods=4) + + pd.interval_range(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-02-28'), periods=3) + Miscellaneous indexing FAQ -------------------------- diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 5f3a01f0725d4..745810704f665 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -352,8 +352,8 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the TimedeltaIndex -------------- -To generate an index with time delta, you can use either the ``TimedeltaIndex`` or -the ``timedelta_range`` constructor. +To generate an index with time delta, you can use either the :class:`TimedeltaIndex` or +the :func:`timedelta_range` constructor. Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``, or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values. @@ -363,13 +363,47 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)]) -Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``: +Generating Ranges of Time Deltas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Similar to :func:`date_range`, you can construct regular ranges of a ``TimedeltaIndex`` +using :func:`timedelta_range`. The default frequency for ``timedelta_range`` is +calendar day: + +.. ipython:: python + + pd.timedelta_range(start='1 days', periods=5) + +Various combinations of ``start``, ``end``, and ``periods`` can be used with +``timedelta_range``: + +.. ipython:: python + + pd.timedelta_range(start='1 days', end='5 days') + + pd.timedelta_range(end='10 days', periods=4) + +The ``freq`` parameter can passed a variety of :ref:`frequency aliases <timeseries.offset_aliases>`: .. ipython:: python - pd.timedelta_range(start='1 days', periods=5, freq='D') pd.timedelta_range(start='1 days', end='2 days', freq='30T') + pd.timedelta_range(start='1 days', periods=5, freq='2D5H') + + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +timedeltas from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``TimedeltaIndex``: + +.. ipython:: python + + pd.timedelta_range('0 days', '4 days', periods=5) + + pd.timedelta_range('0 days', '4 days', periods=10) + Using the TimedeltaIndex ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 73e3e721aad71..1b0cf86995a39 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -393,6 +393,18 @@ of those specified will not be generated: pd.bdate_range(start=start, periods=20) +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +dates from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the +resulting ``DatetimeIndex``: + +.. ipython:: python + + pd.date_range('2018-01-01', '2018-01-05', periods=5) + + pd.date_range('2018-01-01', '2018-01-05', periods=10) + .. _timeseries.custom-freq-ranges: Custom Frequency Ranges
Follow-up to #21009 `timeseries.rst`: - Added documentation for linspace behavior of `date_range` `timedeltas.rst`: - Cleaned up `timedelta_range` documentation for the standard behavior - Added documentation for linspace behavior of `timedelta_range` `advanced.rst`: - Added documentation for the standard behavior of `interval_range` - Added documentation for linspace behavior of `interval_range`
https://api.github.com/repos/pandas-dev/pandas/pulls/21114
2018-05-18T04:43:06Z
2018-05-22T07:15:04Z
2018-05-22T07:15:04Z
2018-06-22T17:14:57Z
BUG: to_clipboard fails to format output for Excel
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 987f171878d0b..1ca693755b3c6 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -57,6 +57,7 @@ Fixed Regressions - Bug in both :meth:`DataFrame.first_valid_index` and :meth:`Series.first_valid_index` raised for a row index having duplicate values (:issue:`21441`) - Fixed regression in unary negative operations with object dtype (:issue:`21380`) - Bug in :meth:`Timestamp.ceil` and :meth:`Timestamp.floor` when timestamp is a multiple of the rounding frequency (:issue:`21262`) +- Fixed regression in :func:`to_clipboard` that defaulted to copying dataframes with space delimited instead of tab delimited (:issue:`21104`) .. _whatsnew_0232.performance: diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index dcc221ce978b3..b3f40b3a2429c 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,6 +1,7 @@ """ io on the clipboard """ from pandas import compat, get_option, option_context, DataFrame -from pandas.compat import StringIO, PY2 +from pandas.compat import StringIO, PY2, PY3 +import warnings def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover @@ -32,7 +33,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover # try to decode (if needed on PY3) # Strange. linux py33 doesn't complain, win py33 does - if compat.PY3: + if PY3: try: text = compat.bytes_to_str( text, encoding=(kwargs.get('encoding') or @@ -55,11 +56,27 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: - sep = r'\t' + sep = '\t' + # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get('delim_whitespace') is None: sep = r'\s+' + # Regex separator currently only works with python engine. + # Default to python if separator is multi-character (regex) + if len(sep) > 1 and kwargs.get('engine') is None: + kwargs['engine'] = 'python' + elif len(sep) > 1 and kwargs.get('engine') == 'c': + warnings.warn('read_clipboard with regex separator does not work' + ' properly with c engine') + + # In PY2, the c table reader first encodes text with UTF-8 but Python + # table reader uses the format of the passed string. For consistency, + # encode strings for python engine so that output from python and c + # engines produce consistent results + if kwargs.get('engine') == 'python' and PY2: + text = text.encode('utf-8') + return read_table(StringIO(text), sep=sep, **kwargs) @@ -99,7 +116,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover if excel: try: if sep is None: - sep = r'\t' + sep = '\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) @@ -108,8 +125,11 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover text = text.decode('utf-8') clipboard_set(text) return - except: - pass + except TypeError: + warnings.warn('to_clipboard in excel mode requires a single ' + 'character separator.') + elif sep is not None: + warnings.warn('to_clipboard with excel=False ignores the sep argument') if isinstance(obj, DataFrame): # str(df) has various unhelpful defaults, like truncation diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 80fddd50fc9a8..a6b331685e72a 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -88,8 +88,6 @@ def check_round_trip_frame(self, data, excel=None, sep=None, tm.assert_frame_equal(data, result, check_dtype=False) # Test that default arguments copy as tab delimited - @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' - 'Issue in #21104, Fixed in #21111') def test_round_trip_frame(self, df): self.check_round_trip_frame(df) @@ -99,10 +97,6 @@ def test_round_trip_frame_sep(self, df, sep): self.check_round_trip_frame(df, sep=sep) # Test white space separator - @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " - "aren't handled correctly in default c engine. Fixed " - "in #21111 by defaulting to python engine for " - "whitespace separator") def test_round_trip_frame_string(self, df): df.to_clipboard(excel=False, sep=None) result = read_clipboard() @@ -111,21 +105,17 @@ def test_round_trip_frame_string(self, df): # Two character separator is not supported in to_clipboard # Test that multi-character separators are not silently passed - @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") def test_excel_sep_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=True, sep=r'\t') # Separator is ignored when excel=False and should produce a warning - @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") def test_copy_delim_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=False, sep='\t') # Tests that the default behavior of to_clipboard is tab # delimited and excel="True" - @pytest.mark.xfail(reason="to_clipboard defaults to space delim. Issue in " - "#21104, Fixed in #21111") @pytest.mark.parametrize('sep', ['\t', None, 'default']) @pytest.mark.parametrize('excel', [True, None, 'default']) def test_clipboard_copy_tabs_default(self, sep, excel, df): @@ -139,10 +129,6 @@ def test_clipboard_copy_tabs_default(self, sep, excel, df): assert clipboard_get() == df.to_csv(sep='\t') # Tests reading of white space separated tables - @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " - "aren't handled correctly. in default c engine. Fixed " - "in #21111 by defaulting to python engine for " - "whitespace separator") @pytest.mark.parametrize('sep', [None, 'default']) @pytest.mark.parametrize('excel', [False]) def test_clipboard_copy_strings(self, sep, excel, df): @@ -193,8 +179,6 @@ def test_invalid_encoding(self, df): with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') - @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' - 'Issue in #21104, Fixed in #21111') @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) def test_round_trip_valid_encodings(self, enc, df): self.check_round_trip_frame(df, encoding=enc)
`DataFrame.to_clipboard` has been broken for pasting to excel. Tables are copied with spaces as delimiters instead of tabs (#21104). This issue originated in https://github.com/pandas-dev/pandas/commit/e1d5a2738235fec22f3cfad4814e09e3e3786f8c#diff-3f25860d9237143c1952a1f93c3aae18R102 which I've partially reverted. By setting the delimiter to `r'\t'`, a 2 character string, obj.to_csv raised an error, but is was caught and passed silently. I reverted the separator to `'\t'`. Similar issue in `read_clipboard` also fixed. - [x] closes #21104
https://api.github.com/repos/pandas-dev/pandas/pulls/21111
2018-05-17T23:18:20Z
2018-06-29T12:22:17Z
2018-06-29T12:22:16Z
2018-07-02T15:44:24Z
DOC: Add sphinx spelling extension
diff --git a/Makefile b/Makefile index c79175cd3c401..4a82566cf726e 100644 --- a/Makefile +++ b/Makefile @@ -23,3 +23,4 @@ doc: cd doc; \ python make.py clean; \ python make.py html + python make.py spellcheck diff --git a/doc/make.py b/doc/make.py index 4967f30453fd1..4d54a2415a194 100755 --- a/doc/make.py +++ b/doc/make.py @@ -224,8 +224,9 @@ def _sphinx_build(self, kind): -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ - if kind not in ('html', 'latex'): - raise ValueError('kind must be html or latex, not {}'.format(kind)) + if kind not in ('html', 'latex', 'spelling'): + raise ValueError('kind must be html, latex or ' + 'spelling, not {}'.format(kind)) self._run_os('sphinx-build', '-j{}'.format(self.num_jobs), @@ -304,6 +305,18 @@ def zip_html(self): '-q', *fnames) + def spellcheck(self): + """Spell check the documentation.""" + self._sphinx_build('spelling') + output_location = os.path.join('build', 'spelling', 'output.txt') + with open(output_location) as output: + lines = output.readlines() + if lines: + raise SyntaxError( + 'Found misspelled words.' + ' Check pandas/doc/build/spelling/output.txt' + ' for more details.') + def main(): cmds = [method for method in dir(DocBuilder) if not method.startswith('_')] diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c81842d3d9212..19d745121ce17 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -342,7 +342,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. columns=micolumns).sort_index().sort_index(axis=1) dfmi -Basic multi-index slicing using slices, lists, and labels. +Basic MultiIndex slicing using slices, lists, and labels. .. ipython:: python @@ -990,7 +990,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be KeyError: 'Cannot get right slice bound for non-unique label: 3' :meth:`Index.is_monotonic_increasing` and :meth:`Index.is_monotonic_decreasing` only check that -an index is weakly monotonic. To check for strict montonicity, you can combine one of those with +an index is weakly monotonic. To check for strict monotonicity, you can combine one of those with :meth:`Index.is_unique` .. ipython:: python diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 8d09f1fc04c1f..d4efa8a28f6c5 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -593,7 +593,7 @@ categorical columns: frame = pd.DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)}) frame.describe() -This behaviour can be controlled by providing a list of types as ``include``/``exclude`` +This behavior can be controlled by providing a list of types as ``include``/``exclude`` arguments. The special value ``all`` can also be used: .. ipython:: python diff --git a/doc/source/conf.py b/doc/source/conf.py index d516e67b947ba..97081bec863b7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -73,10 +73,14 @@ 'sphinx.ext.ifconfig', 'sphinx.ext.linkcode', 'nbsphinx', + 'sphinxcontrib.spelling' ] exclude_patterns = ['**.ipynb_checkpoints'] +spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt'] +spelling_ignore_pypi_package_names = True + with open("index.rst") as f: index_rst_lines = f.readlines() diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index e9939250052f1..6ae93ba46fa5c 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -436,6 +436,25 @@ the documentation are also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__, see also the :ref:`Continuous Integration <contributing.ci>` section. +Spell checking documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When contributing to documentation to **pandas** it's good to check if your work +contains any spelling errors. Sphinx provides an easy way to spell check documentation +and docstrings. + +Running the spell check is easy. Just navigate to your local ``pandas/doc/`` directory and run:: + + python make.py spellcheck + +The spellcheck will take a few minutes to run (between 1 to 6 minutes). Sphinx will alert you +with warnings and misspelt words - these misspelt words will be added to a file called +``output.txt`` and you can find it on your local directory ``pandas/doc/build/spelling/``. + +The Sphinx spelling extension uses an EN-US dictionary to correct words, what means that in +some cases you might need to add a word to this dictionary. You can do so by adding the word to +the bag-of-words file named ``spelling_wordlist.txt`` located in the folder ``pandas/doc/``. + .. _contributing.code: Contributing to the code base diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst index f80bfd9253764..6b2ecfe66d5e2 100644 --- a/doc/source/contributing_docstring.rst +++ b/doc/source/contributing_docstring.rst @@ -103,7 +103,7 @@ left before or after the docstring. The text starts in the next line after the opening quotes. The closing quotes have their own line (meaning that they are not at the end of the last sentence). -In rare occasions reST styles like bold text or itallics will be used in +In rare occasions reST styles like bold text or italics will be used in docstrings, but is it common to have inline code, which is presented between backticks. It is considered inline code: @@ -706,7 +706,7 @@ than 5, to show the example with the default values. If doing the ``mean``, we could use something like ``[1, 2, 3]``, so it is easy to see that the value returned is the mean. -For more complex examples (groupping for example), avoid using data without +For more complex examples (grouping for example), avoid using data without interpretation, like a matrix of random numbers with columns A, B, C, D... And instead use a meaningful example, which makes it easier to understand the concept. Unless required by the example, use names of animals, to keep examples @@ -877,7 +877,7 @@ be tricky. Here are some attention points: the actual error only the error name is sufficient. * If there is a small part of the result that can vary (e.g. a hash in an object - represenation), you can use ``...`` to represent this part. + representation), you can use ``...`` to represent this part. If you want to show that ``s.plot()`` returns a matplotlib AxesSubplot object, this will fail the doctest :: diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 893642410af02..fdc3b38cfdebc 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -286,7 +286,7 @@ New Columns df = pd.DataFrame( {'AAA' : [1,1,1,2,2,2,3,3], 'BBB' : [2,1,3,4,5,1,2,3]}); df -Method 1 : idxmin() to get the index of the mins +Method 1 : idxmin() to get the index of the minimums .. ipython:: python @@ -307,7 +307,7 @@ MultiIndexing The :ref:`multindexing <advanced.hierarchical>` docs. -`Creating a multi-index from a labeled frame +`Creating a MultiIndex from a labeled frame <http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__ .. ipython:: python @@ -330,7 +330,7 @@ The :ref:`multindexing <advanced.hierarchical>` docs. Arithmetic ********** -`Performing arithmetic with a multi-index that needs broadcasting +`Performing arithmetic with a MultiIndex that needs broadcasting <http://stackoverflow.com/questions/19501510/divide-entire-pandas-multiindex-dataframe-by-dataframe-variable/19502176#19502176>`__ .. ipython:: python @@ -342,7 +342,7 @@ Arithmetic Slicing ******* -`Slicing a multi-index with xs +`Slicing a MultiIndex with xs <http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__ .. ipython:: python @@ -363,7 +363,7 @@ To take the cross section of the 1st level and 1st axis the index: df.xs('six',level=1,axis=0) -`Slicing a multi-index with xs, method #2 +`Slicing a MultiIndex with xs, method #2 <http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__ .. ipython:: python @@ -386,13 +386,13 @@ To take the cross section of the 1st level and 1st axis the index: df.loc[(All,'Math'),('Exams')] df.loc[(All,'Math'),(All,'II')] -`Setting portions of a multi-index with xs +`Setting portions of a MultiIndex with xs <http://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill>`__ Sorting ******* -`Sort by specific column or an ordered list of columns, with a multi-index +`Sort by specific column or an ordered list of columns, with a MultiIndex <http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__ .. ipython:: python @@ -664,7 +664,7 @@ The :ref:`Pivot <reshaping.pivot>` docs. `Plot pandas DataFrame with year over year data <http://stackoverflow.com/questions/30379789/plot-pandas-data-frame-with-year-over-year-data>`__ -To create year and month crosstabulation: +To create year and month cross tabulation: .. ipython:: python @@ -677,7 +677,7 @@ To create year and month crosstabulation: Apply ***** -`Rolling Apply to Organize - Turning embedded lists into a multi-index frame +`Rolling Apply to Organize - Turning embedded lists into a MultiIndex frame <http://stackoverflow.com/questions/17349981/converting-pandas-dataframe-with-categorical-values-into-binary-values>`__ .. ipython:: python @@ -1029,8 +1029,8 @@ Skip row between header and data 01.01.1990 05:00;21;11;12;13 """ -Option 1: pass rows explicitly to skiprows -"""""""""""""""""""""""""""""""""""""""""" +Option 1: pass rows explicitly to skip rows +""""""""""""""""""""""""""""""""""""""""""" .. ipython:: python diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index ca6cefac9e842..b5b56fc6815c9 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -1014,7 +1014,7 @@ Deprecate Panel Over the last few years, pandas has increased in both breadth and depth, with new features, datatype support, and manipulation routines. As a result, supporting efficient indexing and functional routines for ``Series``, ``DataFrame`` and ``Panel`` has contributed to an increasingly fragmented and -difficult-to-understand codebase. +difficult-to-understand code base. The 3-D structure of a ``Panel`` is much less common for many types of data analysis, than the 1-D of the ``Series`` or the 2-D of the ``DataFrame``. Going forward it makes sense for @@ -1023,7 +1023,7 @@ pandas to focus on these areas exclusively. Oftentimes, one can simply use a MultiIndex ``DataFrame`` for easily working with higher dimensional data. In addition, the ``xarray`` package was built from the ground up, specifically in order to -support the multi-dimensional analysis that is one of ``Panel`` s main usecases. +support the multi-dimensional analysis that is one of ``Panel`` s main use cases. `Here is a link to the xarray panel-transition documentation <http://xarray.pydata.org/en/stable/pandas.html#panel-transition>`__. .. ipython:: python diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 30cdb06b28487..8631ec7878af5 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -184,8 +184,8 @@ and metadata disseminated in `SDMX <http://www.sdmx.org>`_ 2.1, an ISO-standard widely used by institutions such as statistics offices, central banks, and international organisations. pandaSDMX can expose datasets and related -structural metadata including dataflows, code-lists, -and datastructure definitions as pandas Series +structural metadata including data flows, code-lists, +and data structure definitions as pandas Series or multi-indexed DataFrames. `fredapi <https://github.com/mortada/fredapi>`__ @@ -260,7 +260,7 @@ Data validation `Engarde <http://engarde.readthedocs.io/en/latest/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Engarde is a lightweight library used to explicitly state your assumptions abour your datasets +Engarde is a lightweight library used to explicitly state your assumptions about your datasets and check that they're *actually* true. .. _ecosystem.extensions: diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index b786b1d0c134a..979d025111df1 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -32,7 +32,7 @@ Cython (Writing C extensions for pandas) ---------------------------------------- For many use cases writing pandas in pure Python and NumPy is sufficient. In some -computationally heavy applications however, it can be possible to achieve sizeable +computationally heavy applications however, it can be possible to achieve sizable speed-ups by offloading work to `cython <http://cython.org/>`__. This tutorial assumes you have refactored as much as possible in Python, for example @@ -806,7 +806,7 @@ truncate any strings that are more than 60 characters in length. Second, we can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be evaluated in Python space. -The upshot is that this *only* applies to object-dtype'd expressions. So, if +The upshot is that this *only* applies to object-dtype expressions. So, if you have an expression--for example .. ipython:: python diff --git a/doc/source/extending.rst b/doc/source/extending.rst index f665b219a7bd1..431c69bc0b6b5 100644 --- a/doc/source/extending.rst +++ b/doc/source/extending.rst @@ -167,7 +167,7 @@ you can retain subclasses through ``pandas`` data manipulations. There are 3 constructor properties to be defined: -- ``_constructor``: Used when a manipulation result has the same dimesions as the original. +- ``_constructor``: Used when a manipulation result has the same dimensions as the original. - ``_constructor_sliced``: Used when a manipulation result has one lower dimension(s) as the original, such as ``DataFrame`` single columns slicing. - ``_constructor_expanddim``: Used when a manipulation result has one higher dimension as the original, such as ``Series.to_frame()`` and ``DataFrame.to_panel()``. diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index da13a34cccfea..1c4c3f93726a9 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -994,7 +994,7 @@ is only interesting over one column (here ``colname``), it may be filtered Handling of (un)observed Categorical values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When using a ``Categorical`` grouper (as a single grouper, or as part of multipler groupers), the ``observed`` keyword +When using a ``Categorical`` grouper (as a single grouper, or as part of multiple groupers), the ``observed`` keyword controls whether to return a cartesian product of all possible groupers values (``observed=False``) or only those that are observed groupers (``observed=True``). @@ -1010,7 +1010,7 @@ Show only the observed values: pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], categories=['a', 'b']), observed=True).count() -The returned dtype of the grouped will *always* include *all* of the catergories that were grouped. +The returned dtype of the grouped will *always* include *all* of the categories that were grouped. .. ipython:: python diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index e834efd1cb6d1..2b9fcf874ef22 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -700,7 +700,7 @@ Current Behavior Reindexing ~~~~~~~~~~ -The idiomatic way to achieve selecting potentially not-found elmenents is via ``.reindex()``. See also the section on :ref:`reindexing <basics.reindexing>`. +The idiomatic way to achieve selecting potentially not-found elements is via ``.reindex()``. See also the section on :ref:`reindexing <basics.reindexing>`. .. ipython:: python diff --git a/doc/source/install.rst b/doc/source/install.rst index 6054be112f52c..e655136904920 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -31,7 +31,7 @@ PyPI and through conda. Starting **January 1, 2019**, all releases will be Python 3 only. If there are people interested in continued support for Python 2.7 past December -31, 2018 (either backporting bugfixes or funding) please reach out to the +31, 2018 (either backporting bug fixes or funding) please reach out to the maintainers on the issue tracker. For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_. @@ -199,7 +199,7 @@ Running the test suite ---------------------- pandas is equipped with an exhaustive set of unit tests, covering about 97% of -the codebase as of this writing. To run it on your machine to verify that +the code base as of this writing. To run it on your machine to verify that everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest <http://doc.pytest.org/en/latest/>`__ and run: diff --git a/doc/source/internals.rst b/doc/source/internals.rst index b120e3a98db7f..caf5790fb24c6 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -41,7 +41,7 @@ There are functions that make the creation of a regular index easy: - ``date_range``: fixed frequency date range generated from a time rule or DateOffset. An ndarray of Python datetime objects - ``period_range``: fixed frequency date range generated from a time rule or - DateOffset. An ndarray of ``Period`` objects, representing Timespans + DateOffset. An ndarray of ``Period`` objects, representing timespans The motivation for having an ``Index`` class in the first place was to enable different implementations of indexing. This means that it's possible for you, diff --git a/doc/source/io.rst b/doc/source/io.rst index aa2484b0cb5c3..7bd56d52b3492 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -116,7 +116,7 @@ header : int or list of ints, default ``'infer'`` existing names. The header can be a list of ints that specify row locations - for a multi-index on the columns e.g. ``[0,1,3]``. Intervening rows + for a MultiIndex on the columns e.g. ``[0,1,3]``. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so header=0 denotes the first @@ -503,7 +503,7 @@ This matches the behavior of :meth:`Categorical.set_categories`. converted using the :func:`to_numeric` function, or as appropriate, another converter such as :func:`to_datetime`. - When ``dtype`` is a ``CategoricalDtype`` with homogenous ``categories`` ( + When ``dtype`` is a ``CategoricalDtype`` with homogeneous ``categories`` ( all numeric, all datetimes, etc.), the conversion is done automatically. .. ipython:: python @@ -554,7 +554,7 @@ If the header is in a row other than the first, pass the row number to Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column names - are inferred from the first nonblank line of the file, if column + are inferred from the first non-blank line of the file, if column names are passed explicitly then the behavior is identical to ``header=None``. @@ -868,7 +868,7 @@ data columns: df .. note:: - If a column or index contains an unparseable date, the entire column or + If a column or index contains an unparsable date, the entire column or index will be returned unaltered as an object data type. For non-standard datetime parsing, use :func:`to_datetime` after ``pd.read_csv``. @@ -1644,7 +1644,7 @@ over the string representation of the object. All arguments are optional: argument and returns a formatted string; to be applied to floats in the ``DataFrame``. - ``sparsify`` default True, set to False for a ``DataFrame`` with a hierarchical - index to print every multiindex key at each row. + index to print every MultiIndex key at each row. - ``index_names`` default True, will print the names of the indices - ``index`` default True, will print the index (ie, row labels) - ``header`` default True, will print the column labels @@ -2178,7 +2178,7 @@ A few notes on the generated table schema: - The ``schema`` object contains a ``pandas_version`` field. This contains the version of pandas' dialect of the schema, and will be incremented with each revision. -- All dates are converted to UTC when serializing. Even timezone naïve values, +- All dates are converted to UTC when serializing. Even timezone naive values, which are treated as UTC with an offset of 0. .. ipython:: python @@ -2245,7 +2245,7 @@ A few notes on the generated table schema: .. versionadded:: 0.23.0 ``read_json`` also accepts ``orient='table'`` as an argument. This allows for -the preserveration of metadata such as dtypes and index names in a +the preservation of metadata such as dtypes and index names in a round-trippable manner. .. ipython:: python @@ -2356,7 +2356,7 @@ Read a URL and match a table that contains specific text: Specify a header row (by default ``<th>`` or ``<td>`` elements located within a ``<thead>`` are used to form the column index, if multiple rows are contained within -``<thead>`` then a multiindex is created); if specified, the header row is taken +``<thead>`` then a multi-index is created); if specified, the header row is taken from the data minus the parsed header elements (``<th>`` elements). .. code-block:: python @@ -3141,7 +3141,7 @@ any pickled pandas object (or any other pickled object) from file: .. warning:: - Several internal refactorings have been done while still preserving + Several internal refactoring have been done while still preserving compatibility with pickles created with older versions of pandas. However, for such cases, pickled ``DataFrames``, ``Series`` etc, must be read with ``pd.read_pickle``, rather than ``pickle.load``. @@ -4721,7 +4721,7 @@ writes ``data`` to the database in batches of 1000 rows at a time: .. note:: - The function :func:`~pandas.DataFrame.to_sql` will perform a multivalue + The function :func:`~pandas.DataFrame.to_sql` will perform a multi-value insert if the engine dialect ``supports_multivalues_insert``. This will greatly speed up the insert in some cases. diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 1161656731f88..0de6b871712a3 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -1310,7 +1310,7 @@ For this, use the :meth:`~DataFrame.combine_first` method: Note that this method only takes values from the right ``DataFrame`` if they are missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`, -alters non-NA values inplace: +alters non-NA values in place: .. ipython:: python :suppress: diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 3950e4c80749b..e4b5578af15f0 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -105,7 +105,7 @@ Datetimes For datetime64[ns] types, ``NaT`` represents missing values. This is a pseudo-native sentinel value that can be represented by NumPy in a singular dtype (datetime64[ns]). -pandas objects provide intercompatibility between ``NaT`` and ``NaN``. +pandas objects provide compatibility between ``NaT`` and ``NaN``. .. ipython:: python @@ -349,7 +349,7 @@ Interpolation The ``limit_area`` keyword argument was added. Both Series and DataFrame objects have :meth:`~DataFrame.interpolate` -that, by default, performs linear interpolation at missing datapoints. +that, by default, performs linear interpolation at missing data points. .. ipython:: python :suppress: diff --git a/doc/source/names_wordlist.txt b/doc/source/names_wordlist.txt new file mode 100644 index 0000000000000..032883b7febf6 --- /dev/null +++ b/doc/source/names_wordlist.txt @@ -0,0 +1,1652 @@ +Critchley +Villanova +del +Hohmann +Rychyk +Buchkovsky +Lenail +Schade +datetimeindex +Aly +Sivji +Költringer +Bui +András +Novoszáth +Anh +Anil +Pallekonda +Pitrou +Linde +Quinonez +Varshokar +Artem +Bogachev +Avi +Azeez +Oluwafemi +Auffarth +Thiel +Bhavesh +Poddar +Haffner +Naul +Guinta +Moreira +García +Márquez +Cheuk +Chitrank +Dixit +Catalfo +Mazzullo +Chwala +Cihan +Ceyhan +Brunner +Riemenschneider +Dixey +Garrido +Sakuma +Hirschfeld +Adrián +Cañones +Castellano +Arcos +Hoese +Stansby +Kamau +Niederhut +Dror +Atariah +Chea +Kisslinger +Retkowski +Sar +Maeztu +Gianpaolo +Macario +Giftlin +Rajaiah +Olimpio +Gjelt +Inggs +Grzegorz +Konefał +Guilherme +Beltramini +Pitkeathly +Mashkoor +Ferchland +Haochen +Hissashi +Sharaf +Ignasi +Fosch +Alves +Shelvinskyi +Imanflow +Ingolf +Saeta +Pérez +Koevska +Jakub +Nowacki +Werkmann +Zoutkamp +Bandlow +Jaume +Bonet +Alammar +Reback +Jing +Qiang +Goh +Miralles +Nothman +Joeun +Metz +Mease +Schulze +Jongwony +Jordi +Contestí +Joris +Bossche +José +Fonseca +Jovixe +Jörg +Döpfert +Ittoku +Surta +Kuhl +Krzysztof +Chomski +Ksenia +Ksenia +Bobrova +Kunal +Gosar +Kerstein +Laksh +Arora +Geffert +Licht +Takeuchi +Liudmila +Villalba +Manan +Singh +Manraj +Singh +Hemken +Bibiloni +Corchero +Woodbridge +Journois +Gallo +Heikkilä +Braymer +Maybeno +Rocklin +Roeschke +Bussonnier +Mikhaylov +Veksler +Roos +Maximiliano +Greco +Penkov +Röttger +Selik +Waskom +Mie +Kutzma +Mitar +Negus +Münst +Mortada +Mehyar +Braithwaite +Chmura +Karagiannakis +Nipun +Sadvilkar +Martensen +Noémi +Éltető +Bilodeau +Ondrej +Kokes +Onno +Ganssle +Mannino +Reidy +Oliveira +Hoffmann +Ngo +Battiston +Pranav +Suri +Priyanka +Ojha +Pulkit +Maloo +Magliocchetti +Ridhwan +Luthra +Kiplang'at +Rohan +Pandit +Rok +Mihevc +Rouz +Azari +Ryszard +Kaleta +Samir +Musali +Sinayoko +Sangwoong +Yoon +Sharad +Vijalapuram +Shubham +Chaudhary +Sietse +Brouwer +Delprete +Cianciulli +Childs +Stijn +Hoey +Talitha +Pumar +Tarbo +Fukazawa +Petrou +Caswell +Hoffmann +Swast +Augspurger +Tulio +Casagrande +Tushar +Tushar +Mittal +Upkar +Lidder +Vinícius +Figueiredo +Vipin +WBare +Wenhuan +Ayd +Xbar +Yaroslav +Halchenko +Yee +Mey +Yeongseon +Choe +Yian +Yimeng +Zhang +Zihao +Zhao +adatasetaday +akielbowicz +akosel +alinde +amuta +bolkedebruin +cbertinato +cgohlke +charlie +chris +csfarkas +dajcs +deflatSOCO +derestle +htwg +discort +dmanikowski +donK +elrubio +fivemok +fjdiod +fjetter +froessler +gabrielclow +gfyoung +ghasemnaddaf +vetinari +himanshu +awasthi +ignamv +jayfoad +jazzmuesli +jbrockmendel +jjames +joaoavf +joders +jschendel +juan +huguet +luzpaz +mdeboc +miguelmorin +miker +miquelcamprodon +orereta +ottiP +peterpanmj +rafarui +raph +readyready +rmihael +samghelms +scriptomation +sfoo +stefansimik +stonebig +tmnhat +tomneep +tv +verakai +xpvpc +zhanghui +API +Mazzullo +Riemenschneider +Hirschfeld +Stansby +Dror +Atariah +Kisslinger +Ingolf +Werkmann +Reback +Joris +Bossche +Jörg +Döpfert +Kuhl +Krzysztof +Chomski +Licht +Takeuchi +Manraj +Singh +Braymer +Waskom +Mie +Hoffmann +Sietse +Brouwer +Swast +Augspurger +Ayd +Yee +Mey +bolkedebruin +cgohlke +derestle +htwg +fjdiod +gabrielclow +gfyoung +ghasemnaddaf +jbrockmendel +jschendel +miker +pypy +Gleave +Liaw +Velasco +Yee +Marchenko +Amol +Winkler +亮 +André +Jonasson +Sweger +Berkay +Haffner +Tu +Chankey +Pathak +Billington +Filo +Gorgolewski +Mazzullo +Prinoth +Stade +Schuldt +Moehl +Himmelstein +Willmer +Niederhut +Wieser +Fredriksen +Kint +Giftlin +Giftlin +Rajaiah +Guilherme +Beltramini +Guillem +Borrell +Hanmin +Qin +Makait +Hussain +Tamboli +Miholic +Novotný +Helie +Schiratti +Deschenes +Knupp +Reback +Tratner +Nothman +Crall +Mease +Helmus +Joris +Bossche +Bochi +Kuhlmann +Brabandere +Keeton +Keiron +Pizzey +Kernc +Licht +Takeuchi +Kushner +Jelloul +Makarov +Malgorzata +Turzanska +Sy +Roeschke +Picus +Mehmet +Akmanalp +Gasvoda +Penkov +Eubank +Shteynbuk +Tillmann +Pankaj +Pandey +Luo +O'Melveny +Reidy +Quackenbush +Yanovich +Haessig +Battiston +Pradyumna +Reddy +Chinthala +Prasanjit +Prakash +Sangwoong +Yoon +Sudeep +Telt +Caswell +Swast +Augspurger +Tuan +Utkarsh +Upadhyay +Vivek +Aiyong +WBare +Yi +Liu +Yosuke +Nakabayashi +aaron +abarber +gh +aernlund +agustín +méndez +andymaheshw +aviolov +bpraggastis +cbertinato +cclauss +chernrick +chris +dkamm +dwkenefick +faic +fding +gfyoung +guygoldberg +hhuuggoo +huashuai +ian +iulia +jaredsnyder +jbrockmendel +jdeschenes +jebob +jschendel +keitakurita +kernc +kiwirob +kjford +linebp +lloydkirk +louispotok +majiang +manikbhandari +matthiashuschle +mattip +maxwasserman +mjlove +nmartensen +parchd +philipphanemann +rdk +reidy +ri +ruiann +rvernica +weigand +scotthavard +skwbc +tobycheese +tsdlovell +ysau +zzgao +cov +abaldenko +adrian +stepien +Saxena +Akash +Tandon +Aleksey +Bilogur +alexandercbooth +Amol +Kahat +Winkler +Kittredge +Anthonios +Partheniou +Arco +Ashish +Singal +atbd +bastewart +Baurzhan +Muftakhidinov +Kandel +bmagnusson +carlosdanielcsantos +Souza +chaimdemulder +chris +Aycock +Gohlke +Paulik +Warth +Brunner +Himmelstein +Willmer +Krych +dickreuter +Dimitris +Spathis +discort +Dmitry +Suria +Wijaya +Stanczak +dr +leo +dubourg +dwkenefick +Andrade +Ennemoser +Francesc +Alted +Fumito +Hamamura +funnycrab +gfyoung +Ferroni +goldenbull +Jeffries +Guilherme +Beltramini +Guilherme +Samora +Hao +Harshit +Patni +Ilya +Schurov +Iván +Vallés +Pérez +Leng +Jaehoon +Hwang +Goppert +Santucci +Reback +Crist +Jevnik +Nothman +Zwinck +jojomdt +Whitmore +Mease +Mease +Joost +Kranendonk +Joris +Bossche +Bradt +Santander +Julien +Marrec +Solinsky +Kacawi +Kamal +Kamalaldin +Shedden +Kernc +Keshav +Ramaswamy +Ren +linebp +Pedersen +Cestaro +Scarabello +Lukasz +paramstyle +Lababidi +Unserialized +manu +manuels +Roeschke +mattip +Picus +Roeschke +maxalbert +Roos +mcocdawc +Lamparski +Michiel +Mikolaj +Chwalisz +Miroslav +Šedivý +Mykola +Golubyev +Rud +Halen +Chmura +nuffe +Pankaj +Pandey +paul +mannino +Pawel +Kordek +pbreach +Csizsek +Petio +Petrov +Ruffwind +Battiston +Chromiec +Prasanjit +Prakash +Forgione +Rouz +Azari +Sahil +Dua +sakkemo +Sami +Salonen +Sarma +Tangirala +scls +Gsänger +Sébastien +Menten +Heide +Shyam +Saladi +sinhrks +Sinhrks +Rauch +stijnvanhoey +Adiseshan +themrmax +Thiago +Serafim +Thoralf +Thrasibule +Gustafsson +Augspurger +tomrod +Shen +tzinckgraf +Uwe +wandersoncferreira +watercrossing +wcwagner +Wiktor +Tomczak +xgdgsc +Yaroslav +Halchenko +Yimeng +Zhang +yui +knk +Saxena +Kandel +Aycock +Himmelstein +Willmer +gfyoung +hesham +shabana +Reback +Jevnik +Joris +Bossche +Santander +Shedden +Keshav +Ramaswamy +Scarabello +Picus +Roeschke +Roos +Mykola +Golubyev +Halen +Pawel +Kordek +Battiston +sinhrks +Adiseshan +Augspurger +wandersoncferreira +Yaroslav +Halchenko +Chainz +Anthonios +Partheniou +Arash +Rouhani +Kandel +chris +Warth +Krych +dubourg +gfyoung +Iván +Vallés +Pérez +Reback +Jevnik +Mease +Joris +Bossche +Keshav +Ramaswamy +Ren +mattrijk +paul +mannino +Chromiec +Sinhrks +Thiago +Serafim +adneu +agraboso +Alekseyev +Vig +Riddell +Amol +Amol +Agrawal +Anthonios +Partheniou +babakkeyvani +Kandel +Baxley +Camilo +Cota +chris +Grinolds +Hudon +Aycock +Warth +cmazzullo +cr +Siladji +Drewrey +Lupton +dsm +Blancas +Marsden +Marczinowski +O'Donovan +Gábor +Lipták +Geraint +gfyoung +Ferroni +Haleemur +harshul +Hassan +Shamim +iamsimha +Iulius +Nazarov +jackieleng +Reback +Crist +Jevnik +Liekezer +Zwinck +Erenrich +Joris +Bossche +Howes +Brandys +Kamil +Sindi +Ka +Wo +Shedden +Kernc +Brucher +Roos +Scherer +Mortada +Mehyar +mpuels +Haseeb +Tariq +Bonnotte +Virtanen +Mestemaker +Pawel +Kordek +Battiston +pijucha +Jucha +priyankjain +Nimmi +Gieseke +Keyes +Sahil +Dua +Sanjiv +Lobo +Sašo +Stanovnik +Heide +sinhrks +Sinhrks +Kappel +Choi +Sudarshan +Konge +Caswell +Augspurger +Uwe +Hoffmann +wcwagner +Xiang +Zhang +Yadunandan +Yaroslav +Halchenko +YG +Riku +Yuichiro +Kaneko +yui +knk +zhangjinjie +znmean +颜发才 +Yan +Facai +Fiore +Gartland +Bastiaan +Benoît +Vinot +Fustin +Freitas +Ter +Livschitz +Gábor +Lipták +Hassan +Kibirige +Iblis +Saeta +Pérez +Wolosonovich +Reback +Jevnik +Joris +Bossche +Storck +Ka +Wo +Shedden +Kieran +O'Mahony +Lababidi +Maoyuan +Liu +Wittmann +MaxU +Roos +Droettboom +Eubank +Bonnotte +Virtanen +Battiston +Prabhjot +Singh +Augspurger +Aiyong +Winand +Xbar +Yan +Facai +adneu +ajenkins +cargometrics +behzad +nouri +chinskiy +gfyoung +jeps +jonaslb +kotrfa +nileracecrew +onesandzeroes +sinhrks +tsdlovell +Alekseyev +Rosenfeld +Anthonios +Partheniou +Sipos +Carroux +Aycock +Scanlin +Da +Dorozhko +O'Donovan +Cleary +Gianluca +Jeffries +Horel +Schwabacher +Deschenes +Reback +Jevnik +Fremlin +Hoersch +Joris +Bossche +Joris +Vankerschaver +Ka +Wo +Keming +Zhang +Shedden +Farrugia +Lurie +Roos +Mayank +Asthana +Mortada +Mehyar +Moussa +Taifi +Navreet +Bonnotte +Reiners +Gura +Battiston +Carnevale +Rinoc +Rishipuri +Sangmin +Lasley +Sereger +Seabold +Thierry +Moisan +Caswell +Augspurger +Hauck +Varun +Yoong +Kang +Lim +Yoshiki +Vázquez +Baeza +Joong +Younggun +Yuval +Langer +argunov +behzad +nouri +boombard +brian +pantano +chromy +daniel +dgram +gfyoung +hcontrast +jfoo +kaustuv +deolal +llllllllll +ranarag +rockg +scls +seales +sinhrks +srib +surveymedia +tworec +Drozd +Anthonios +Partheniou +Berendt +Piersall +Hamed +Saljooghinejad +Iblis +Deschenes +Reback +Callin +Joris +Bossche +Ka +Wo +Loïc +Séguin +Luo +Yicheng +Magnus +Jöud +Leonhardt +Roos +Bonnotte +Pastafarianist +Chong +Schaf +Philipp +deCarvalho +Khomenko +Rémy +Léone +Thierry +Moisan +Augspurger +Varun +Hoffmann +Winterflower +Younggun +ajcr +azuranski +behzad +nouri +cel +emilydolson +hironow +lexual +llllllllll +rockg +silentquasar +sinhrks +taeold +unparseable +Rothberg +Bedini +Rosenfeld +Anthonios +Partheniou +Artemy +Kolchinsky +Willers +Gohlke +Clearfield +Ringwalt +Cottrell +Gagne +Schettino +Panfilov +Araujo +Gianluca +Poulin +Nisar +Henriksen +Hoegen +Jaidev +Deshpande +Swails +Reback +Buyl +Joris +Bossche +Joris +Vankerschaver +Julien +Danjou +Ka +Wo +Kehoe +Jordahl +Shedden +Buitinck +Gambogi +Savoie +Roos +D'Agostino +Mortada +Mehyar +Eubank +Nipun +Batra +Ondřej +Čertík +Pratap +Vardhan +Rafal +Skolasinski +Rinoc +Gieseke +Safia +Abdalla +Saumitra +Shahapure +Pölsterl +Rubbert +Sinhrks +Siu +Kwan +Seabold +Carrucciu +Hoyer +Pascoe +Santegoeds +Grainger +Tjerk +Santegoeds +Augspurger +Winterflower +Yaroslav +Halchenko +agijsberts +ajcr +behzad +nouri +cel +cyrusmaher +davidovitch +ganego +jreback +juricast +larvian +maximilianr +msund +rekcahpassyla +robertzk +scls +seth +sinhrks +springcoil +terrytangyuan +tzinckgraf +Rosenfeld +Artemy +Kolchinsky +Willers +Christer +der +Meeren +Hudon +Lasiman +Brundu +Gaëtan +Menten +Hiebert +Reback +Joris +Bossche +Ka +Wo +Mortada +Mehyar +Grainger +Ajamian +Augspurger +Yoshiki +Vázquez +Baeza +Younggun +austinc +behzad +nouri +jreback +lexual +rekcahpassyla +scls +sinhrks +Artemy +Kolchinsky +Gilmer +Grinolds +Birken +Hirschfeld +Dunné +Hatem +Nassrat +Sperr +Herter +Blackburne +Reback +Crist +Abernot +Joris +Bossche +Shedden +Razoumov +Riel +Mortada +Mehyar +Eubank +Grisel +Battiston +Hyunjin +Zhang +Hoyer +Tiago +Antao +Ajamian +Augspurger +Tomaz +Berisa +Shirgur +Filimonov +Hogman +Yasin +Younggun +behzad +nouri +dsm +floydsoft +gfr +jnmclarty +jreback +ksanghai +lucas +mschmohl +ptype +rockg +scls +sinhrks +Toth +Amici +Artemy +Kolchinsky +Ashwini +Chaudhary +Letson +Chau +Hoang +Christer +der +Meeren +Cottrell +Ehsan +Azarnasab +Torcasso +Sexauer +Reback +Joris +Bossche +Joschka +zur +Jacobsmühlen +Bochi +Junya +Hayashi +Shedden +Kieran +O'Mahony +Kodi +Arfer +Airas +Mortada +Mehyar +Lasley +Lasley +Pascual +Seabold +Hoyer +Grainger +Augspurger +Filimonov +Vyomkesh +Tripathi +Holmgren +Yulong +behzad +nouri +bertrandhaut +bjonen +cel +clham +hsperr +ischwabacher +jnmclarty +josham +jreback +omtinez +roch +sinhrks +unutbu +Angelos +Evripiotis +Artemy +Kolchinsky +Pointet +Jacobowski +Charalampos +Papaloizou +Warth +Zanini +Francesc +Kleynhans +Reback +Tratner +Joris +Bossche +Suggit +Lasley +Hoyer +Sylvain +Corlay +Grainger +Tiago +Antao +Hauck +Chaves +Salgado +Bhandoh +Aiyong +Holmgren +behzad +nouri +broessli +charalampos +papaloizou +immerrr +jnmclarty +jreback +mgilbert +onesandzeroes +peadarcoyle +rockg +seth +sinhrks +unutbu +wavedatalab +Åsmund +Hjulstad +Rosenfeld +Sipos +Artemy +Kolchinsky +Letson +Horel +Reback +Joris +Bossche +Sanghee +Hoyer +Aiyong +behzad +nouri +immerrr +jnmclarty +jreback +pallav +fdsi +unutbu +Greenhall +Artemy +Kolchinsky +behzad +nouri +Sauer +benjamin +Thyreau +bjonen +Stoafer +dlovell +dsm +Herrero +Hsiaoming +Huan +hunterowens +Hyungtae +immerrr +Slavitt +ischwabacher +Schaer +Tratner +Farnham +jmorris +jnmclarty +Bradish +Joerg +Rittinger +Joris +Bossche +jreback +klonuo +lexual +mcjcode +Schatzow +Mortada +Mehyar +mtrbean +Typanski +onesandzeroes +Masurel +Battiston +rockg +Petchler +seth +Shahul +Hameed +Shashank +Agarwal +sinhrks +someben +stahlous +stas +sl +Hoyer +thatneat +alcorn +Augspurger +unutbu +Yevgeniy +Grechka +Yoshiki +VÃ +zquez +Baeza +zachcp +Rosenfeld +Quistorff +Wignall +bwignall +clham +Waeber +Bew +dsm +helger +immerrr +Schaer +jaimefrio +Reaver +Joris +Bossche +jreback +Julien +Danjou +lexual +Wittmann +Mortada +Mehyar +onesandzeroes +rockg +sanguineturtle +Schaer +seth +sinhrks +Hoyer +Kluyver +yelite +hexbin +Acanthostega +agijsberts +akittredge +Gaudio +Rothberg +Rosenfeld +ankostis +anomrake +Mazières +anton +bashtage +Sauer +benjamin +Buran +bwignall +cgohlke +chebee +clham +Birken +danielballan +Waeber +Drapala +Gouthaman +Balaraman +Poulin +hshimizu +hugo +immerrr +ischwabacher +Schaer +jaimefrio +Sexauer +Reback +Tratner +Reaver +Joris +Bossche +jreback +jsexauer +Júlio +kdiether +Jordahl +Wittmann +Grender +Gruen +michaelws +mikebailey +Nipun +Batra +ojdo +onesandzeroes +phaebz +Battiston +Carnevale +ribonoous +Gibboni +rockg +sinhrks +Seabold +Hoyer +Cera +Augspurger +unutbu +westurner +Yaroslav +Halchenko +lexual +danbirken +travis +Billington +Cobzarenco +Gamboa +Cavazos +Gaudecker +Gerigk +Yaroslav +Halchenko +sharey +Vytautas +Jancauskas +Hammerbacher +Hilboll +Luc +Kesters +JanSchulz +Negusse +Wouter +Overmeire +Reeson +Aman +Thakral +Uga +Vandenbussche +Pinxing +astype +Buglet +Beltrame +Hilboll +Jev +Kuznetsov +Wouter +Overmeire +Reyfman +Joon +Ro +Uga +Vandenbussche +setupegg +Hammerbacher +Jev +Kuznetsov +Wouter +Overmeire +Aman +Thakral +Uga +Vandenbussche +carljv +rsamson +newaxis +Fortunov +Aman +Thakral +Beltrame +Wouter +Overmeire +rsamson +Laserson +Pentreath +Joon +Ro +Uga +Fortunov +Berka +Vandenbussche +krogh +akima +BPoly +isna +kurt diff --git a/doc/source/options.rst b/doc/source/options.rst index 48247eb48baaf..697cc0682e39a 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -149,7 +149,7 @@ More information can be found in the `ipython documentation Frequently Used Options ----------------------- -The following is a walkthrough of the more frequently used display options. +The following is a walk-through of the more frequently used display options. ``display.max_rows`` and ``display.max_columns`` sets the maximum number of rows and columns displayed when a frame is pretty-printed. Truncated diff --git a/doc/source/release.rst b/doc/source/release.rst index 32db2ff5ebb24..04c499ff6797b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2429,7 +2429,7 @@ New Features - ``plot(kind='kde')`` now accepts the optional parameters ``bw_method`` and ``ind``, passed to scipy.stats.gaussian_kde() (for scipy >= 0.11.0) to set - the bandwidth, and to gkde.evaluate() to specify the indicies at which it + the bandwidth, and to gkde.evaluate() to specify the indices at which it is evaluated, respectively. See scipy docs. (:issue:`4298`) - Added ``isin`` method to DataFrame (:issue:`4211`) - ``df.to_clipboard()`` learned a new ``excel`` keyword that let's you @@ -2540,7 +2540,7 @@ Improvements to existing features - ``read_json`` now raises a (more informative) ``ValueError`` when the dict contains a bad key and ``orient='split'`` (:issue:`4730`, :issue:`4838`) - ``read_stata`` now accepts Stata 13 format (:issue:`4291`) -- ``ExcelWriter`` and ``ExcelFile`` can be used as contextmanagers. +- ``ExcelWriter`` and ``ExcelFile`` can be used as context managers. (:issue:`3441`, :issue:`4933`) - ``pandas`` is now tested with two different versions of ``statsmodels`` (0.4.3 and 0.5.0) (:issue:`4981`). @@ -2553,7 +2553,7 @@ Improvements to existing features that cannot be concatenated (:issue:`4608`). - Add ``halflife`` option to exponentially weighted moving functions (PR :issue:`4998`) -- ``to_dict`` now takes ``records`` as a possible outtype. Returns an array +- ``to_dict`` now takes ``records`` as a possible out type. Returns an array of column-keyed dictionaries. (:issue:`4936`) - ``tz_localize`` can infer a fall daylight savings transition based on the structure of unlocalized data (:issue:`4230`) @@ -2664,13 +2664,13 @@ API Changes - ``select_as_coordinates`` will now return an ``Int64Index`` of the resultant selection set - support ``timedelta64[ns]`` as a serialization type (:issue:`3577`) - - store `datetime.date` objects as ordinals rather then timetuples to avoid + - store `datetime.date` objects as ordinals rather then time-tuples to avoid timezone issues (:issue:`2852`), thanks @tavistmorph and @numpand - ``numexpr`` 2.2.2 fixes incompatibility in PyTables 2.4 (:issue:`4908`) - ``flush`` now accepts an ``fsync`` parameter, which defaults to ``False`` (:issue:`5364`) - ``unicode`` indices not supported on ``table`` formats (:issue:`5386`) - - pass thru store creation arguments; can be used to support in-memory stores + - pass through store creation arguments; can be used to support in-memory stores - ``JSON`` - added ``date_unit`` parameter to specify resolution of timestamps. @@ -2736,7 +2736,7 @@ API Changes created when passing floating values in index creation. This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the same. Indexing on other index types - are preserved (and positional fallback for ``[],ix``), with the exception, + are preserved (and positional fall back for ``[],ix``), with the exception, that floating point slicing on indexes on non ``Float64Index`` will raise a ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`,:issue:`5375`) - Make Categorical repr nicer (:issue:`4368`) @@ -2765,7 +2765,7 @@ API Changes (:issue:`5339`) - default for `display.max_seq_len` is now 100 rather then `None`. This activates truncated display ("...") of long sequences in various places. (:issue:`3391`) -- **All** division with ``NDFrame`` - likes is now truedivision, regardless +- **All** division with ``NDFrame`` - likes is now true division, regardless of the future import. You can use ``//`` and ``floordiv`` to do integer division. @@ -2787,7 +2787,7 @@ API Changes dtype: float64 - raise/warn ``SettingWithCopyError/Warning`` exception/warning when setting of a - copy thru chained assignment is detected, settable via option ``mode.chained_assignment`` + copy through chained assignment is detected, settable via option ``mode.chained_assignment`` - test the list of ``NA`` values in the csv parser. add ``N/A``, ``#NA`` as independent default na values (:issue:`5521`) - The refactoring involving``Series`` deriving from ``NDFrame`` breaks ``rpy2<=2.3.8``. an Issue @@ -2888,7 +2888,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` (datetime/timedelta/time etc.) into a separate, cleaned up wrapper class. (:issue:`4613`) - Complex compat for ``Series`` with ``ndarray``. (:issue:`4819`) -- Removed unnecessary ``rwproperty`` from codebase in favor of builtin +- Removed unnecessary ``rwproperty`` from code base in favor of builtin property. (:issue:`4843`) - Refactor object level numeric methods (mean/sum/min/max...) from object level modules to ``core/generic.py`` (:issue:`4435`). @@ -3014,7 +3014,7 @@ Bug Fixes - Fix boolean indexing on an empty series loses index names (:issue:`4235`), infer_dtype works with empty arrays. - Fix reindexing with multiple axes; if an axes match was not replacing the - current axes, leading to a possible lazay frequency inference issue + current axes, leading to a possible lazy frequency inference issue (:issue:`3317`) - Fixed issue where ``DataFrame.apply`` was reraising exceptions incorrectly (causing the original stack trace to be truncated). @@ -3036,7 +3036,7 @@ Bug Fixes (:issue:`4727`) - Fix some inconsistencies with ``Index.rename`` and ``MultiIndex.rename``, etc. (:issue:`4718`, :issue:`4628`) -- Bug in using ``iloc/loc`` with a cross-sectional and duplicate indicies +- Bug in using ``iloc/loc`` with a cross-sectional and duplicate indices (:issue:`4726`) - Bug with using ``QUOTE_NONE`` with ``to_csv`` causing ``Exception``. (:issue:`4328`) @@ -3171,7 +3171,7 @@ Bug Fixes - Fixed bug in Excel writers where frames with duplicate column names weren't written correctly. (:issue:`5235`) - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) -- Fixed seg fault in C parser caused by passing more names than columns in +- Fixed segfault in C parser caused by passing more names than columns in the file. (:issue:`5156`) - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) - C and Python Parser can now handle the more common multi-index column @@ -3377,7 +3377,7 @@ API Changes - more consistency in the to_datetime return types (give string/array of string inputs) (:issue:`3888`) - The internal ``pandas`` class hierarchy has changed (slightly). The previous ``PandasObject`` now is called ``PandasContainer`` and a new - ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + ``PandasObject`` has become the base class for ``PandasContainer`` as well as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) @@ -3729,7 +3729,7 @@ Bug Fixes - Bug in value_counts of ``datetime64[ns]`` Series (:issue:`3002`) - Fixed printing of ``NaT`` in an index - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2982`) -- Bug in ``icol, take`` with negative indicies was producing incorrect return +- Bug in ``icol, take`` with negative indices was producing incorrect return values (see :issue:`2922`, :issue:`2892`), also check for out-of-bounds indices (:issue:`3029`) - Bug in DataFrame column insertion when the column creation fails, existing frame is left in an irrecoverable state (:issue:`3010`) @@ -3752,7 +3752,7 @@ Bug Fixes - Fix upsampling bug with closed='left' and daily to daily data (:issue:`3020`) - Fixed missing tick bars on scatter_matrix plot (:issue:`3063`) - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (:issue:`2993`) -- series.plot(kind='bar') now respects pylab color schem (:issue:`3115`) +- series.plot(kind='bar') now respects pylab color scheme (:issue:`3115`) - Fixed bug in reshape if not passed correct input, now raises TypeError (:issue:`2719`) - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (:issue:`3282`) - Fix NameError issue on RESO_US (:issue:`2787`) @@ -3790,7 +3790,7 @@ Bug Fixes a simple index (:issue:`2893`) - Fix Python ASCII file parsing when integer falls outside of floating point spacing (:issue:`3258`) -- fixed pretty priniting of sets (:issue:`3294`) +- fixed pretty printing of sets (:issue:`3294`) - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (:issue:`3303`) - DataFrame where with a datetimelike incorrectly selecting (:issue:`3311`) - Ensure index casts work even in Int64Index @@ -3837,7 +3837,7 @@ Improvements to existing features keyword to append - support automagic indexing via ``index`` keyword to append - support ``expectedrows`` keyword in append to inform ``PyTables`` about - the expected tablesize + the expected table size - support ``start`` and ``stop`` keywords in select to limit the row selection space - added ``get_store`` context manager to automatically import with pandas @@ -3908,7 +3908,7 @@ Bug Fixes - Fix setitem on a Series with a boolean key and a non-scalar as value (:issue:`2686`) - Box datetime64 values in Series.apply/map (:issue:`2627`, :issue:`2689`) -- Upconvert datetime + datetime64 values when concatenating frames (:issue:`2624`) +- Up convert datetime + datetime64 values when concatenating frames (:issue:`2624`) - Raise a more helpful error message in merge operations when one DataFrame has duplicate columns (:issue:`2649`) - Fix partial date parsing issue occurring only when code is run at EOM @@ -4115,7 +4115,7 @@ Bug Fixes datetime64 when calling DataFrame.apply. (:issue:`2374`) - Raise exception when calling to_panel on non uniquely-indexed frame (:issue:`2441`) - Improved detection of console encoding on IPython zmq frontends (:issue:`2458`) -- Preserve time zone when .append-ing two time series (:issue:`2260`) +- Preserve time zone when .appending two time series (:issue:`2260`) - Box timestamps when calling reset_index on time-zone-aware index rather than creating a tz-less datetime64 column (:issue:`2262`) - Enable searching non-string columns in DataFrame.filter(like=...) (:issue:`2467`) @@ -4359,7 +4359,7 @@ Bug Fixes - Fix DatetimeIndex.isin to function properly (:issue:`1763`) - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with right time zone (:issue:`1777`) -- Fix DST issues with generating ancxhored date ranges (:issue:`1778`) +- Fix DST issues with generating anchored date ranges (:issue:`1778`) - Fix issue calling sort on result of Series.unique (:issue:`1807`) - Fix numerical issue leading to square root of negative number in rolling_std (:issue:`1840`) @@ -4612,14 +4612,14 @@ New Features - Add keys() method on DataFrame (:issue:`1240`) - Add new ``match`` function to API (similar to R) (:issue:`502`) - Add dayfirst option to parsers (:issue:`854`) -- Add ``method`` argument to ``align`` method for forward/backward fillin +- Add ``method`` argument to ``align`` method for forward/backward filling (:issue:`216`) - Add Panel.transpose method for rearranging axes (:issue:`695`) - Add new ``cut`` function (patterned after R) for discretizing data into equal range-length bins or arbitrary breaks of your choosing (:issue:`415`) - Add new ``qcut`` for cutting with quantiles (:issue:`1378`) - Add ``value_counts`` top level array method (:issue:`1392`) -- Added Andrews curves plot tupe (:issue:`1325`) +- Added Andrews curves plot type (:issue:`1325`) - Add lag plot (:issue:`1440`) - Add autocorrelation_plot (:issue:`1425`) - Add support for tox and Travis CI (:issue:`1382`) @@ -4690,7 +4690,7 @@ API Changes - Remove deprecated DataMatrix name - Default merge suffixes for overlap now have underscores instead of periods to facilitate tab completion, etc. (:issue:`1239`) -- Deprecation of offset, time_rule timeRule parameters throughout codebase +- Deprecation of offset, time_rule timeRule parameters throughout code base - Series.append and DataFrame.append no longer check for duplicate indexes by default, add verify_integrity parameter (:issue:`1394`) - Refactor Factor class, old constructor moved to Factor.from_array @@ -4879,7 +4879,7 @@ Bug Fixes - Fix combineAdd NotImplementedError for SparseDataFrame (:issue:`887`) - Fix DataFrame.to_html encoding and columns (:issue:`890`, :issue:`891`, :issue:`909`) - Fix na-filling handling in mixed-type DataFrame (:issue:`910`) -- Fix to DataFrame.set_value with non-existant row/col (:issue:`911`) +- Fix to DataFrame.set_value with non-existent row/col (:issue:`911`) - Fix malformed block in groupby when excluding nuisance columns (:issue:`916`) - Fix inconsistent NA handling in dtype=object arrays (:issue:`925`) - Fix missing center-of-mass computation in ewmcov (:issue:`862`) @@ -4935,7 +4935,7 @@ Bug Fixes - Fix indexing operation for floating point values (:issue:`780`, :issue:`798`) - Fix groupby case resulting in malformed dataframe (:issue:`814`) - Fix behavior of reindex of Series dropping name (:issue:`812`) -- Improve on redudant groupby computation (:issue:`775`) +- Improve on redundant groupby computation (:issue:`775`) - Catch possible NA assignment to int/bool series with exception (:issue:`839`) pandas 0.7.0 @@ -5116,7 +5116,7 @@ Bug Fixes - Raise exception in out-of-bounds indexing of Series instead of seg-faulting, regression from earlier releases (:issue:`495`) - Fix error when joining DataFrames of different dtypes within the same - typeclass (e.g. float32 and float64) (:issue:`486`) + type class (e.g. float32 and float64) (:issue:`486`) - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH :issue:`487`) - Preserve index names in Index.union (:issue:`501`) @@ -5162,7 +5162,7 @@ Bug Fixes - Format floats to default to same number of digits (:issue:`395`) - Added decorator to copy docstring from one function to another (:issue:`449`) - Fix error in monotonic many-to-one left joins -- Fix __eq__ comparison between DateOffsets with different relativedelta +- Fix __eq__ comparison between DateOffsets with different relative delta keywords passed - Fix exception caused by parser converter returning strings (:issue:`583`) - Fix MultiIndex formatting bug with integer names (:issue:`601`) @@ -5461,7 +5461,7 @@ Improvements to existing features `Series.map` significantly when passed elementwise Python function, motivated by :issue:`355` - Cythonized `cache_readonly`, resulting in substantial micro-performance - enhancements throughout the codebase (:issue:`361`) + enhancements throughout the code base (:issue:`361`) - Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) - Add `raw` option to `DataFrame.apply` for getting better performance when @@ -5751,7 +5751,7 @@ pandas 0.4.3 **Release date:** 10/9/2011 -This is largely a bugfix release from 0.4.2 but also includes a handful of new +This is largely a bug fix release from 0.4.2 but also includes a handful of new and enhanced features. Also, pandas can now be installed and used on Python 3 (thanks Thomas Kluyver!). @@ -5803,7 +5803,7 @@ Bug Fixes - Fix Python ndarray access in Cython code for sparse blocked index integrity check - Fix bug writing Series to CSV in Python 3 (:issue:`209`) -- Miscellaneous Python 3 bugfixes +- Miscellaneous Python 3 bug fixes Thanks ~~~~~~ @@ -5828,7 +5828,7 @@ New Features int64-based time series (e.g. using NumPy's datetime64 one day) and also faster operations on DataFrame objects storing record array-like data. - Refactored `Index` classes to have a `join` method and associated data - alignment routines throughout the codebase to be able to leverage optimized + alignment routines throughout the code base to be able to leverage optimized joining / merging routines. - Added `Series.align` method for aligning two series with choice of join method @@ -6164,7 +6164,7 @@ API Changes - Removed `pandas.core.pytools` module. Code has been moved to `pandas.core.common` - Tacked on `groupName` attribute for groups in GroupBy renamed to `name` -- Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant +- Panel/LongPanel `dims` attribute renamed to `shape` to be more conforming - Slicing a `Series` returns a view now - More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`, `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`, diff --git a/doc/source/spelling_wordlist.txt b/doc/source/spelling_wordlist.txt new file mode 100644 index 0000000000000..4c355a1b9c435 --- /dev/null +++ b/doc/source/spelling_wordlist.txt @@ -0,0 +1,916 @@ +IPython +ipython +numpy +NumPy +Reindexing +reindexing +ga +fe +reindexed +automagic +Histogramming +histogramming +concat +resampling +iterables +sparsified +df +loc +gc +Timeseries +ndarrays +ndarray +dtype +dtypes +dtyped +reindex +sliceable +timedelta +Timedeltas +timedeltas +subpackages +subpackage +filepath +io +nthreads +kwargs +kwarg +arg +args +Datetimelike +datetime +datetimes +tz +builtin +NaN +nan +behaviour +quantiling +aggregators +aggregator +Dtypes +groupby +GroupBy +Tablewise +Elementwise +ufunc +ufuncs +dict +namedtuples +namedtuple +iterrows +upcasted +upcasting +upcast +searchsorted +downcasting +Likert +categoricals +Groupby +Unioning +csv +Upcase +resampling +Upcase +Lowcase +Propcase +Interop +Stata +stata +bysort +Spearman +Wikipedia +debiasing +docstrings +docstring +Docstrings +autosummary +linting +toolchain +Appveyor +Akogun +online +pdf +reStructuredText +reST +backticks +cpus +str +idxmin +mins +agg +DataFrame +dataframes +NaT +len +Statsmodels +Bokeh +Protovis +Seaborn +Wickham +shareability +apps +app +Plotly +Spyder +Fama +Eurostat +organisations +Geopandas +Dask +Scikit +backends +Engarde +Cyberpandas +Accessor +Numba +optimising +Cython +cython +cythonizing +cythonized +Vectorize +ol +subclassing +IPv +iteritems +itertuples +dt +upcast +subsetting +programmatically +stderr +scipy +SparseArray +doctests +nd +refactored +Jit +stdout +Typeclass +Pythonic +zscore +SQL +broadcastable +resample +resamples +groupbys +metaprogramming +upcast +un +dropna +ints +int +boxplot +groupwise +indices +pre +datetimelike +dev +gd +colname +intemname +nd +isin +backporting +admin +Debian +Ubuntu +Centos +RHEL +xlsx +xz +ftp +impl +timespans +pre +Regex +regex +sortedness +delim +usecols +skipinitialspace +skiprows +skipfooter +nrows +na +iso +dayfirst +chunksize +gz +bz +lineterminator +quotechar +doublequote +escapechar +tupleize +prepended +colspecs +NONNUMERIC +serializer +localhost +json +strtod +deserialization +Hadoop +ns +stringified +xclip +xsel +gtk +gtpy +Msgpacks +msgpack +msgpacks +foo +ptrepack +sqlalchemy +sqlite +Sqlite +dta +bdat +netCDF +backend +deserialising +deserializing +qtpy +indexables +itemsize +de +sas +Miniconda +itemname +ndims +ndim +mergands +Timeseries +timeseries +asof +Nans +DataFrames +fillna +ffill +bfill +alignable +sim +py +ipy +colheader +yearfirst +repr +EngFormatter +frontends +frontend +longtable +multirow +cline +clines +colwidth +Sparsify +html +pprint +mathjax +Jupyter +xls +xlsm +hdf +numexpr +matplotlib +timedeltas +lexual +danbirken +isnull +Timestamp +np +xs +locs +datelike +dups +recarray +setitem +rhs +gaussian +kde +gkde +fwf +iNf +astyping +vbench +lgautier +jnothman +roundtrip +xlrd +buf +jtratner +tavistmorph +numpand +unserialiable +tseries +mul +completers +refactor +Refactor +subclassed +consolidatable +setitem +DataFrame +klass +jtratner +bs +lxml +rockg +inplace +pyt +tslib +vals +pos +cparser +locs +repr'd +cumsum +cumprod +rhs +datetimeindex +reraising +iloc +setitem +lhs +ticklocs +ticklabels +immerrr +np +kwds +travis +ci +yarikoptic +setitem +delitem +cpcloud +pprinting +hoechenberger +Faq +FAQ +faq +mtkini +spearman +SleepingPills +astypes +cov +timedeltalike +weekmasks +Weekmasks +xlrd +unioning +uint +iget +applymap +stonebig +recarrays +tdsmith +tokenization +google +xN +sharex +famafrench +strptime +stephenwlin +nans +diff +ohlc +util +seg +getitem +queryables +Dataframe +idxmax +putmasking +argsort +unsampling +pylab +fromordinal +andrews +strftime +wb +gzipped +gzip +aggfunc +multithreading +unicode +bork +tokenizer +sortlevel +Scikits +isnull +ndpanel +notnul +ctor +tzinfo +tzoffset +endianness +Upsampling +upsampling +upsampled +locators +locator +astimezone +iget +qcut +ewma +icol +printoption +quantileTS +UTC +utc +bool +init +OLS +Isnull +nansum +Cythonize +extlinks +utcoffset +khash +kendall +tolist +unhandled +downsampling +dayofyear +setops +discretizing +klib +ylabel +bday +BDay +timeRule +unmergeable +navar +pyplot +multiindex +combineAdd +ewmcov +algos +unpickling +MultiIndex +Memoize +Unbox +nanops +vectorize +DataFame +fallback +sharey +xlabel +notnull +asfreq +crit +rpy +nanvar +ddof +ols +printoptions +rankdata +pyo +camelCased +cacheable +unindexed +reduceat +blosc +aggregatable +idx +tradeoff +nPeriods +camelCasing +camelCased +LongPanel +truediv +px +parseCSV +unpivoted +extractall +weekofyear +dayofweek +CDay +Nano +parameterised +sunday +monday +tuesday +friday +upsample +resampled +tzfile +bools +xlsxwriter +ggplot +Colormaps +colormaps +trippable +callables +pivotting +GBQ +intersphinx +hashable +compat +Compat +rollforward +seekable +endian +subrecords +readonly +orderedness +eval +datetimelikes +pytables +argmax +argmin +utf +segfault +segfaults +xlims +CPython +MultiIndexed +blosc +blosclz +hc +lz +zlib +zstd +tput +boxplot +UInt +unioned +hashtable +saslib +resampled +dicts +datetimetz +ascii +evals +Compat +lexsorted +errstate +incompat +boxplots +honour +UTF +subclasse +ungrouped +xport +writeable +unencodable +serialising +serialise +Segfault +ceiled +xarray +jupyter +ified +isoformat +downsample +upsample +aggregator +ascii +compat +src +ness +unencoded +submethods +gbq +vectorised +nanos +Bigquery +complib +overridable +xlabels +xticklabels +listlike +jobComplete +cummin +cummax +undeprecated +triang +errored +unpickle +ngroups +multiindexes +xticks +yticks +errorbars +barplots +rcParams +dfs +nw +Openpyxl +barh +timestamp +inv +Welford +tarball +hdfstore +Pandonic +Perf +factorizer +sharey +yyyy +dd +xxx +bdays +nfrequencies +XYZ +Vytautas +Jancauskas +rankdata +Astype +astyped +mergesort +nano +unpickled +dataframe +serialised +serialisation +numpies +deserialize +hashtables +unpivoting +cubehelix +unparsable +fu +Unpivots +rownames +retbins +objs +sep +stubnames +expr +func +skipna +halflife +cond +ceil +fillchar +swapcased +deletechars +figsize +bw +xlabelsize +ftypes +ge +Unpivots +lsuffix +fname +fo +ftypes +rsuffix +sparsifying +tup +cls +nonunique +xrange +periodIndex +pytz +ctime +dst +localtime +proleptic +tzname +stddev +resampler +Resampler +searchpath +cmap +visualising +figsize +desc +Iterable +da +ta +CategoricalIndex +specialised +takeable +iter +upcase +Outlier +fontsize +pearson +corrwith +eq +ewm +floordiv +ftype +iat +typeR +slinear +krogh +akima +BPoly +isna +kurt +le +lt +ne +notna +nsmallest +Deutsche +Colormap +colorbar +silverman +gridsize +radd +rdiv +regexes +rfloordiv +rmod +rmul +rpow +rsub +rtruediv +RandomState +sem +quicksort +heapsort +organised +swapaxes +swaplevel +OAuth +defaultdict +tablename +HDFStore +appendable +searchable +serialisable +lzo +usepackage +booktabs +coereced +spellcheck +misspelt +rcl +multicolumns +gfc +automagically +fastparquet +brotli +sql +nullable +performant +lexsorted +tw +latin +StrL +tshift +basestring +DatetimeIndex +periodIndex +pydatetime +perioddelta +ExcelFile +noqa +deepcopy +Discretize +hasnans +nbytes +nlevels +DateOffset +stringr +orderable +IntervalIndex +versionadded +lexsort +droplevel +swaplevel +kurt +IGNORECASE +findall +isalnum +isalpha +isdecimal +isdigit +islower +isnumeric +isspace +istitle +isupper +ljust +lstrip +rfind +rindex +rpartition +rsplit +rstrip +startswith +deletechars +whitespaces +insecable +stringr +zfill +tshift +SparseSeries +isoweekday +isocalendar +fromtimestamp +dateutil +utcfromtimestamp +utcnow +utctimetuple +api +ExtensionArray +nbytes +abc +ABCMeta +Typecode +ExtensionDtype +biufcmMOSUV +accessor +CategoricalDtype +DataFrameGroupBy +Weekmask +walkthrough +wieldy +stubnames +unix +asian +Eg +recomputation +useQueryCache +LocalPath +fspath +params +datatypes +connectable +multirows +sparsify +parseable +TimedeltaIndex +baz +pathlib +radviz +axvline +xtick +unpivot +StataWriter +StataReader +IndexSlice +uuid +cellstyle +tablewise +rowwise +columnwise +env +fba +Regexp +sparsify +multiline +UnsupportedFunctionCall +UnsortedIndexError +PerformanceWarning +ParserWarning +ParserError +OutOfBoundsDatetime +EmptyDataError +DtypeWarning +crosstab +SeriesGroupBy +nunique +nlargest +Truthy +cumcount +ngroup +bdate +toordinal +julian +timetz +timetuple +freqstr +daysinmonth +asm +TimedeltaIndex +pytimedelta +autodetect +coords +endswith +SparseDataFrame +spmatrix +swapcase +rjust +ndarrary +regexs +ptp +imag +gca +keywors +intercalary +daysinmonth +divmod +autocorr +asobject +Argsorts +xrot +RangeIndex +PeriodIndex +qyear +timeries +scikits +fromDict +levshape +putmask +asi +repl \ No newline at end of file diff --git a/doc/source/text.rst b/doc/source/text.rst index 4af64d9f791cc..34bb1a07dfc08 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -55,8 +55,8 @@ Since ``df.columns`` is an Index object, we can use the ``.str`` accessor df.columns.str.lower() These string methods can then be used to clean up the columns as needed. -Here we are removing leading and trailing whitespaces, lowercasing all names, -and replacing any remaining whitespaces with underscores: +Here we are removing leading and trailing white spaces, lower casing all names, +and replacing any remaining white spaces with underscores: .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 73e3e721aad71..f1011f7c5c3c6 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1738,7 +1738,7 @@ If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ... ValueError: Input has different freq from Period(freq=H) -If ``Period`` has other freqs, only the same ``offsets`` can be added. Otherwise, ``ValueError`` will be raised. +If ``Period`` has other frequencies, only the same ``offsets`` can be added. Otherwise, ``ValueError`` will be raised. .. ipython:: python diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 09a52ee527cb5..17197b805e86a 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -1061,7 +1061,7 @@ Plot Formatting Setting the plot style ~~~~~~~~~~~~~~~~~~~~~~ -From version 1.5 and up, matplotlib offers a range of preconfigured plotting styles. Setting the +From version 1.5 and up, matplotlib offers a range of pre-configured plotting styles. Setting the style can be used to easily give plots the general look that you want. Setting the style is as easy as calling ``matplotlib.style.use(my_plot_style)`` before creating your plot. For example you could write ``matplotlib.style.use('ggplot')`` for ggplot-style diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index 3fc05158b7fe7..3a269e53a2404 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -370,7 +370,7 @@ Updated PyTables Support df1.get_dtype_counts() - performance improvements on table writing -- support for arbitrarily indexed dimensions +- support for arbitrary indexed dimensions - ``SparseSeries`` now has a ``density`` property (:issue:`2384`) - enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument to strip arbitrary characters (:issue:`2411`) diff --git a/doc/source/whatsnew/v0.10.1.txt b/doc/source/whatsnew/v0.10.1.txt index 2d5843101dec2..bb405c283ba24 100644 --- a/doc/source/whatsnew/v0.10.1.txt +++ b/doc/source/whatsnew/v0.10.1.txt @@ -149,7 +149,7 @@ combined result, by using ``where`` on a selector table. `nan`. - You can pass ``index`` to ``append``. This defaults to ``True``. This will - automagically create indicies on the *indexables* and *data columns* of the + automagically create indices on the *indexables* and *data columns* of the table - You can pass ``chunksize=an integer`` to ``append``, to change the writing @@ -157,7 +157,7 @@ combined result, by using ``where`` on a selector table. on writing. - You can pass ``expectedrows=an integer`` to the first ``append``, to set the - TOTAL number of expectedrows that ``PyTables`` will expected. This will + TOTAL number of expected rows that ``PyTables`` will expected. This will optimize read/write performance. - ``Select`` now supports passing ``start`` and ``stop`` to provide selection @@ -191,7 +191,7 @@ combined result, by using ``where`` on a selector table. levels with a very large number of combinatorial values (:issue:`2684`) - Fixed bug that causes plotting to fail when the index is a DatetimeIndex with a fixed-offset timezone (:issue:`2683`) -- Corrected businessday subtraction logic when the offset is more than 5 bdays +- Corrected business day subtraction logic when the offset is more than 5 bdays and the starting date is on a weekend (:issue:`2680`) - Fixed C file parser behavior when the file has more columns than data (:issue:`2668`) diff --git a/doc/source/whatsnew/v0.11.0.txt b/doc/source/whatsnew/v0.11.0.txt index b90a597815ec5..3c9cfda49aebd 100644 --- a/doc/source/whatsnew/v0.11.0.txt +++ b/doc/source/whatsnew/v0.11.0.txt @@ -33,7 +33,7 @@ three types of multi-axis indexing. See more at :ref:`Selection by Label <indexing.label>` -- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of the axis), will raise ``IndexError`` when the requested indicies are out of bounds. Allowed inputs are: +- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of the axis), will raise ``IndexError`` when the requested indices are out of bounds. Allowed inputs are: - An integer e.g. ``5`` - A list or array of integers ``[4, 3, 0]`` @@ -44,7 +44,7 @@ three types of multi-axis indexing. - ``.ix`` supports mixed integer and label based access. It is primarily label based, but will fallback to integer positional access. ``.ix`` is the most general and will support any of the inputs to ``.loc`` and ``.iloc``, as well as support for floating point label schemes. ``.ix`` is especially useful when dealing with mixed positional and label - based hierarchial indexes. + based hierarchical indexes. As using integer slices with ``.ix`` have different behavior depending on whether the slice is interpreted as position based or label based, it's usually better to be @@ -211,7 +211,7 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT API changes ~~~~~~~~~~~ - - Added to_series() method to indicies, to facilitate the creation of indexers + - Added to_series() method to indices, to facilitate the creation of indexers (:issue:`3275`) - ``HDFStore`` diff --git a/doc/source/whatsnew/v0.12.0.txt b/doc/source/whatsnew/v0.12.0.txt index ad33c49792d9f..69483b18a5490 100644 --- a/doc/source/whatsnew/v0.12.0.txt +++ b/doc/source/whatsnew/v0.12.0.txt @@ -73,7 +73,7 @@ API changes e.g. a boolean Series, even with integer labels, will raise. Since ``iloc`` is purely positional based, the labels on the Series are not alignable (:issue:`3631`) - This case is rarely used, and there are plently of alternatives. This preserves the + This case is rarely used, and there are plenty of alternatives. This preserves the ``iloc`` API to be *purely* positional based. .. ipython:: python @@ -166,7 +166,7 @@ API changes - The internal ``pandas`` class hierarchy has changed (slightly). The previous ``PandasObject`` now is called ``PandasContainer`` and a new - ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + ``PandasObject`` has become the base class for ``PandasContainer`` as well as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) @@ -296,7 +296,7 @@ Other Enhancements df.replace(regex=r'\s*\.\s*', value=np.nan) to replace all occurrences of the string ``'.'`` with zero or more - instances of surrounding whitespace with ``NaN``. + instances of surrounding white space with ``NaN``. Regular string replacement still works as expected. For example, you can do @@ -403,7 +403,7 @@ Bug Fixes :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which - elimnates any spurious plots from showing up. + eliminates any spurious plots from showing up. - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is a list or tuple. diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt index 02ddc362255ec..94cd451196ead 100644 --- a/doc/source/whatsnew/v0.13.0.txt +++ b/doc/source/whatsnew/v0.13.0.txt @@ -414,7 +414,7 @@ HDFStore API Changes - add the keyword ``dropna=True`` to ``append`` to change whether ALL nan rows are not written to the store (default is ``True``, ALL nan rows are NOT written), also settable via the option ``io.hdf.dropna_table`` (:issue:`4625`) -- pass thru store creation arguments; can be used to support in-memory stores +- pass through store creation arguments; can be used to support in-memory stores DataFrame repr Changes ~~~~~~~~~~~~~~~~~~~~~~ @@ -443,7 +443,7 @@ Enhancements - Clipboard functionality now works with PySide (:issue:`4282`) - Added a more informative error message when plot arguments contain overlapping color and style arguments (:issue:`4402`) -- ``to_dict`` now takes ``records`` as a possible outtype. Returns an array +- ``to_dict`` now takes ``records`` as a possible out type. Returns an array of column-keyed dictionaries. (:issue:`4936`) - ``NaN`` handing in get_dummies (:issue:`4446`) with `dummy_na` diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt index 92c699017fc13..4408470c52feb 100644 --- a/doc/source/whatsnew/v0.14.0.txt +++ b/doc/source/whatsnew/v0.14.0.txt @@ -78,10 +78,10 @@ API changes - ``df.iloc[len(df)::-1]`` now enumerates all elements in reverse - The :meth:`DataFrame.interpolate` keyword ``downcast`` default has been changed from ``infer`` to - ``None``. This is to preseve the original dtype unless explicitly requested otherwise (:issue:`6290`). + ``None``. This is to preserve the original dtype unless explicitly requested otherwise (:issue:`6290`). - When converting a dataframe to HTML it used to return `Empty DataFrame`. This special case has been removed, instead a header with the column names is returned (:issue:`6062`). -- ``Series`` and ``Index`` now internall share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are +- ``Series`` and ``Index`` now internally share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are now supported on ``Index`` types as well. The ``Series.weekday`` property from is removed from Series for API consistency. Using a ``DatetimeIndex/PeriodIndex`` method on a Series will now raise a ``TypeError``. (:issue:`4551`, :issue:`4056`, :issue:`5519`, :issue:`6380`, :issue:`7206`). @@ -294,7 +294,7 @@ Display Changes Text Parsing API Changes ~~~~~~~~~~~~~~~~~~~~~~~~ -:func:`read_csv`/:func:`read_table` will now be noiser w.r.t invalid options rather than falling back to the ``PythonParser``. +:func:`read_csv`/:func:`read_table` will now be noisier w.r.t invalid options rather than falling back to the ``PythonParser``. - Raise ``ValueError`` when ``sep`` specified with ``delim_whitespace=True`` in :func:`read_csv`/:func:`read_table` @@ -714,7 +714,7 @@ Deprecations Use the `percentiles` keyword instead, which takes a list of percentiles to display. The default output is unchanged. -- The default return type of :func:`boxplot` will change from a dict to a matpltolib Axes +- The default return type of :func:`boxplot` will change from a dict to a matplotlib Axes in a future release. You can use the future behavior now by passing ``return_type='axes'`` to boxplot. @@ -781,7 +781,7 @@ Enhancements noon, January 1, 4713 BC. Because nanoseconds are used to define the time in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) - ``DataFrame.to_stata`` will now check data for compatibility with Stata data types - and will upcast when needed. When it is not possible to losslessly upcast, a warning + and will upcast when needed. When it is not possible to lossless upcast, a warning is issued (:issue:`6327`) - ``DataFrame.to_stata`` and ``StataWriter`` will accept keyword arguments time_stamp and data_label which allow the time stamp and dataset label to be set when creating a @@ -881,7 +881,7 @@ Bug Fixes - Prevent segfault due to MultiIndex not being supported in HDFStore table format (:issue:`1848`) - Bug in ``pd.DataFrame.sort_index`` where mergesort wasn't stable when ``ascending=False`` (:issue:`6399`) -- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeroes (:issue:`6391`) +- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeros (:issue:`6391`) - Bug in version string gen. for dev versions with shallow clones / install from tarball (:issue:`6127`) - Inconsistent tz parsing ``Timestamp`` / ``to_datetime`` for current year (:issue:`5958`) - Indexing bugs with reordered indexes (:issue:`6252`, :issue:`6254`) @@ -922,7 +922,7 @@ Bug Fixes - Bug in ``Series.reindex`` when specifying a ``method`` with some nan values was inconsistent (noted on a resample) (:issue:`6418`) - Bug in :meth:`DataFrame.replace` where nested dicts were erroneously depending on the order of dictionary keys and values (:issue:`5338`). -- Perf issue in concatting with empty objects (:issue:`3259`) +- Performance issue in concatenating with empty objects (:issue:`3259`) - Clarify sorting of ``sym_diff`` on ``Index`` objects with ``NaN`` values (:issue:`6444`) - Regression in ``MultiIndex.from_product`` with a ``DatetimeIndex`` as input (:issue:`6439`) - Bug in ``str.extract`` when passed a non-default index (:issue:`6348`) @@ -966,8 +966,8 @@ Bug Fixes - Bug in downcasting inference with empty arrays (:issue:`6733`) - Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) -- Bug in ``DataFrame.replace()`` where regex metacharacters were being treated - as regexs even when ``regex=False`` (:issue:`6777`). +- Bug in ``DataFrame.replace()`` where regex meta characters were being treated + as regex even when ``regex=False`` (:issue:`6777`). - Bug in timedelta ops on 32-bit platforms (:issue:`6808`) - Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) - Bug in expressions.py where numexpr would try to evaluate arithmetic ops @@ -983,7 +983,7 @@ Bug Fixes would only replace the first occurrence of a value (:issue:`6689`) - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) - Bug in ``Series.__unicode__`` when ``max_rows=None`` and the Series has more than 1000 rows. (:issue:`6863`) -- Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) +- Bug in ``groupby.get_group`` where a datelike wasn't always accepted (:issue:`5267`) - Bug in ``groupBy.get_group`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`6914`) - Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` converting ``NaT`` incorrectly (:issue:`5546`) - Bug in arithmetic operations affecting ``NaT`` (:issue:`6873`) @@ -994,7 +994,7 @@ Bug Fixes - Bug in ``DataFrame.plot`` and ``Series.plot``, where the legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) - Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) -- Bug in C parser with leading whitespace (:issue:`3374`) +- Bug in C parser with leading white space (:issue:`3374`) - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt index 32a2391c75531..f7f69218e0ef5 100644 --- a/doc/source/whatsnew/v0.14.1.txt +++ b/doc/source/whatsnew/v0.14.1.txt @@ -172,7 +172,7 @@ Bug Fixes - Bug in Panel indexing with a multi-index axis (:issue:`7516`) - Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`) - Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:) -- Bug in timeops with non-aligned Series (:issue:`7500`) +- Bug in time ops with non-aligned Series (:issue:`7500`) - Bug in timedelta inference when assigning an incomplete Series (:issue:`7592`) - Bug in groupby ``.nth`` with a Series and integer-like column name (:issue:`7559`) - Bug in ``Series.get`` with a boolean accessor (:issue:`7407`) @@ -209,7 +209,7 @@ Bug Fixes - Bug in inferred_freq results in None for eastern hemisphere timezones (:issue:`7310`) - Bug in ``Easter`` returns incorrect date when offset is negative (:issue:`7195`) - Bug in broadcasting with ``.div``, integer dtypes and divide-by-zero (:issue:`7325`) -- Bug in ``CustomBusinessDay.apply`` raiases ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) +- Bug in ``CustomBusinessDay.apply`` raises ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) - Bug in ``.loc`` with a list of indexers on a single-multi index level (that is not nested) (:issue:`7349`) - Bug in ``Series.map`` when mapping a dict with tuple keys of different lengths (:issue:`7333`) diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index 0f1a8c324de54..94093b2cfb16c 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -44,7 +44,7 @@ users upgrade to this version. .. warning:: - The refactorings in :class:`~pandas.Categorical` changed the two argument constructor from + The refactoring in :class:`~pandas.Categorical` changed the two argument constructor from "codes/labels and levels" to "values and levels (now called 'categories')". This can lead to subtle bugs. If you use :class:`~pandas.Categorical` directly, please audit your code before updating to this pandas version and change it to use the :meth:`~pandas.Categorical.from_codes` constructor. See more on ``Categorical`` :ref:`here <whatsnew_0150.cat>` @@ -139,7 +139,7 @@ This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a The arguments to ``pd.to_timedelta`` are now ``(arg,unit='ns',box=True,coerce=False)``, previously were ``(arg,box=True,unit='ns')`` as these are more logical. -Consruct a scalar +Construct a scalar .. ipython:: python @@ -794,7 +794,7 @@ Other notable API changes: .. _whatsnew_0150.blanklines: - Made both the C-based and Python engines for `read_csv` and `read_table` ignore empty lines in input as well as - whitespace-filled lines, as long as ``sep`` is not whitespace. This is an API change + white space-filled lines, as long as ``sep`` is not white space. This is an API change that can be controlled by the keyword parameter ``skip_blank_lines``. See :ref:`the docs <io.skiplines>` (:issue:`4466`) - A timeseries/index localized to UTC when inserted into a Series/DataFrame will preserve the UTC timezone @@ -940,7 +940,7 @@ Enhancements Enhancements in the importing/exporting of Stata files: -- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`) +- Added support for bool, uint8, uint16 and uint32 data types in ``to_stata`` (:issue:`7097`, :issue:`7365`) - Added conversion option when importing Stata files (:issue:`8527`) - ``DataFrame.to_stata`` and ``StataWriter`` check string length for compatibility with limitations imposed in dta files where fixed-width @@ -988,7 +988,7 @@ Other: - Added ``split`` as an option to the ``orient`` argument in ``pd.DataFrame.to_dict``. (:issue:`7840`) - The ``get_dummies`` method can now be used on DataFrames. By default only - catagorical columns are encoded as 0's and 1's, while other columns are + categorical columns are encoded as 0's and 1's, while other columns are left untouched. .. ipython:: python @@ -1070,7 +1070,7 @@ Other: idx.duplicated() idx.drop_duplicates() -- add ``copy=True`` argument to ``pd.concat`` to enable pass thru of complete blocks (:issue:`8252`) +- add ``copy=True`` argument to ``pd.concat`` to enable pass through of complete blocks (:issue:`8252`) - Added support for numpy 1.8+ data types (``bool_``, ``int_``, ``float_``, ``string_``) for conversion to R dataframe (:issue:`8400`) diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index 345fc9f1b5da7..918eab3a9763e 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -72,7 +72,7 @@ API changes df.groupby(ts, as_index=False).max() -- ``groupby`` will not erroneously exclude columns if the column name conflics +- ``groupby`` will not erroneously exclude columns if the column name conflicts with the grouper name (:issue:`8112`): .. ipython:: python diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index f1dfab0f57ed3..16a57676c89c0 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -165,7 +165,7 @@ Other enhancements: - Added support for ``utcfromtimestamp()``, ``fromtimestamp()``, and ``combine()`` on `Timestamp` class (:issue:`5351`). - Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here <http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__. - ``Timedelta`` arithmetic returns ``NotImplemented`` in unknown cases, allowing extensions by custom classes (:issue:`8813`). -- ``Timedelta`` now supports arithemtic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`). +- ``Timedelta`` now supports arithmetic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`). - Added ``Timedelta.to_timedelta64()`` method to the public API (:issue:`8884`). - Added ``gbq.generate_bq_schema()`` function to the gbq module (:issue:`8325`). - ``Series`` now works with map objects the same way as generators (:issue:`8909`). @@ -173,7 +173,7 @@ Other enhancements: - ``to_datetime`` gains an ``exact`` keyword to allow for a format to not require an exact match for a provided format string (if its ``False``). ``exact`` defaults to ``True`` (meaning that exact matching is still the default) (:issue:`8904`) - Added ``axvlines`` boolean option to parallel_coordinates plot function, determines whether vertical lines will be printed, default is True - Added ability to read table footers to read_html (:issue:`8552`) -- ``to_sql`` now infers datatypes of non-NA values for columns that contain NA values and have dtype ``object`` (:issue:`8778`). +- ``to_sql`` now infers data types of non-NA values for columns that contain NA values and have dtype ``object`` (:issue:`8778`). .. _whatsnew_0152.performance: @@ -215,7 +215,7 @@ Bug Fixes - ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo and when it receives no data from Yahoo (:issue:`8761`), (:issue:`8783`). - Fix: The font size was only set on x axis if vertical or the y axis if horizontal. (:issue:`8765`) - Fixed division by 0 when reading big csv files in python 3 (:issue:`8621`) -- Bug in outputting a Multindex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) +- Bug in outputting a MultiIndex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) - Imported categorical variables from Stata files retain the ordinal information in the underlying data (:issue:`8836`). - Defined ``.size`` attribute across ``NDFrame`` objects to provide compat with numpy >= 1.9.1; buggy with ``np.array_split`` (:issue:`8846`) - Skip testing of histogram plots for matplotlib <= 1.2 (:issue:`8648`). @@ -230,11 +230,11 @@ Bug Fixes - Bug where index name was still used when plotting a series with ``use_index=False`` (:issue:`8558`). - Bugs when trying to stack multiple columns, when some (or all) of the level names are numbers (:issue:`8584`). - Bug in ``MultiIndex`` where ``__contains__`` returns wrong result if index is not lexically sorted or unique (:issue:`7724`) -- BUG CSV: fix problem with trailing whitespace in skipped rows, (:issue:`8679`), (:issue:`8661`), (:issue:`8983`) +- BUG CSV: fix problem with trailing white space in skipped rows, (:issue:`8679`), (:issue:`8661`), (:issue:`8983`) - Regression in ``Timestamp`` does not parse 'Z' zone designator for UTC (:issue:`8771`) - Bug in `StataWriter` the produces writes strings with 244 characters irrespective of actual size (:issue:`8969`) - Fixed ValueError raised by cummin/cummax when datetime64 Series contains NaT. (:issue:`8965`) -- Bug in Datareader returns object dtype if there are missing values (:issue:`8980`) +- Bug in DataReader returns object dtype if there are missing values (:issue:`8980`) - Bug in plotting if sharex was enabled and index was a timeseries, would show labels on multiple axes (:issue:`3964`). - Bug where passing a unit to the TimedeltaIndex constructor applied the to nano-second conversion twice. (:issue:`9011`). - Bug in plotting of a period-like array (:issue:`9012`) diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 48af06d124f2e..214a08ef0bbff 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -133,7 +133,7 @@ from a ``scipy.sparse.coo_matrix``: String Methods Enhancements ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- Following new methods are accesible via ``.str`` accessor to apply the function to each values. This is intended to make it more consistent with standard methods on strings. (:issue:`9282`, :issue:`9352`, :issue:`9386`, :issue:`9387`, :issue:`9439`) +- Following new methods are accessible via ``.str`` accessor to apply the function to each values. This is intended to make it more consistent with standard methods on strings. (:issue:`9282`, :issue:`9352`, :issue:`9386`, :issue:`9387`, :issue:`9439`) ============= ============= ============= =============== =============== .. .. Methods .. .. @@ -530,7 +530,7 @@ Deprecations We refer users to the external package `pandas-qt <https://github.com/datalyze-solutions/pandas-qt>`_. (:issue:`9615`) - The ``pandas.rpy`` interface is deprecated and will be removed in a future version. - Similar functionaility can be accessed thru the `rpy2 <http://rpy2.bitbucket.org/>`_ project (:issue:`9602`) + Similar functionality can be accessed through the `rpy2 <http://rpy2.bitbucket.org/>`_ project (:issue:`9602`) - Adding ``DatetimeIndex/PeriodIndex`` to another ``DatetimeIndex/PeriodIndex`` is being deprecated as a set-operation. This will be changed to a ``TypeError`` in a future version. ``.union()`` should be used for the union set operation. (:issue:`9094`) - Subtracting ``DatetimeIndex/PeriodIndex`` from another ``DatetimeIndex/PeriodIndex`` is being deprecated as a set-operation. This will be changed to an actual numeric subtraction yielding a ``TimeDeltaIndex`` in a future version. ``.difference()`` should be used for the differencing set operation. (:issue:`9094`) @@ -601,7 +601,7 @@ Bug Fixes - Bug in binary operator method (eg ``.mul()``) alignment with integer levels (:issue:`9463`). - Bug in boxplot, scatter and hexbin plot may show an unnecessary warning (:issue:`8877`) - Bug in subplot with ``layout`` kw may show unnecessary warning (:issue:`9464`) -- Bug in using grouper functions that need passed thru arguments (e.g. axis), when using wrapped function (e.g. ``fillna``), (:issue:`9221`) +- Bug in using grouper functions that need passed through arguments (e.g. axis), when using wrapped function (e.g. ``fillna``), (:issue:`9221`) - ``DataFrame`` now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in ``read_csv`` when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - ``isnull`` now detects ``NaT`` in ``PeriodIndex`` (:issue:`9129`) @@ -613,7 +613,7 @@ Bug Fixes - Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) - Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format was applied. This prevented other row or column formatting being applied. (:issue:`9167`) - Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) -- Bug where ``wide_to_long`` would modify the input stubnames list (:issue:`9204`) +- Bug where ``wide_to_long`` would modify the input stub names list (:issue:`9204`) - Bug in ``to_sql`` not storing float64 values using double precision. (:issue:`9009`) - ``SparseSeries`` and ``SparsePanel`` now accept zero argument constructors (same as their non-sparse counterparts) (:issue:`9272`). - Regression in merging ``Categorical`` and ``object`` dtypes (:issue:`9426`) @@ -624,7 +624,7 @@ Bug Fixes - Fixed bug with reading CSV files from Amazon S3 on python 3 raising a TypeError (:issue:`9452`) - Bug in the Google BigQuery reader where the 'jobComplete' key may be present but False in the query results (:issue:`8728`) - Bug in ``Series.values_counts`` with excluding ``NaN`` for categorical type ``Series`` with ``dropna=True`` (:issue:`9443`) -- Fixed mising numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) +- Fixed missing numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) - Support constructing ``Panel`` or ``Panel4D`` with scalar data (:issue:`8285`) - ``Series`` text representation disconnected from `max_rows`/`max_columns` (:issue:`7508`). diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index 5c716f6ad45c1..e2da12fc94b58 100644 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -133,7 +133,7 @@ groupby operations on the index will preserve the index nature as well reindexing operations, will return a resulting index based on the type of the passed indexer, meaning that passing a list will return a plain-old-``Index``; indexing with a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories -of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with +of the PASSED ``Categorical`` dtype. This allows one to arbitrary index these even with values NOT in the categories, similarly to how you can reindex ANY pandas index. .. code-block:: ipython @@ -237,7 +237,7 @@ enhancements make string operations easier and more consistent with standard pyt idx.str.startswith('a') s[s.index.str.startswith('a')] -- The following new methods are accesible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) +- The following new methods are accessible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) ================ =============== =============== =============== ================ .. .. Methods .. .. @@ -348,7 +348,7 @@ Deprecations Index Representation ~~~~~~~~~~~~~~~~~~~~ -The string representation of ``Index`` and its sub-classes have now been unified. These will show a single-line display if there are few values; a wrapped multi-line display for a lot of values (but less than ``display.max_seq_items``; if lots of items (> ``display.max_seq_items``) will show a truncated display (the head and tail of the data). The formatting for ``MultiIndex`` is unchanges (a multi-line wrapped display). The display width responds to the option ``display.max_seq_items``, which is defaulted to 100. (:issue:`6482`) +The string representation of ``Index`` and its sub-classes have now been unified. These will show a single-line display if there are few values; a wrapped multi-line display for a lot of values (but less than ``display.max_seq_items``; if lots of items (> ``display.max_seq_items``) will show a truncated display (the head and tail of the data). The formatting for ``MultiIndex`` is unchanged (a multi-line wrapped display). The display width responds to the option ``display.max_seq_items``, which is defaulted to 100. (:issue:`6482`) Previous Behavior @@ -437,8 +437,8 @@ Bug Fixes - Bug in ``to_msgpack`` and ``read_msgpack`` zlib and blosc compression support (:issue:`9783`) - Bug ``GroupBy.size`` doesn't attach index name properly if grouped by ``TimeGrouper`` (:issue:`9925`) - Bug causing an exception in slice assignments because ``length_of_indexer`` returns wrong results (:issue:`9995`) -- Bug in csv parser causing lines with initial whitespace plus one non-space character to be skipped. (:issue:`9710`) -- Bug in C csv parser causing spurious NaNs when data started with newline followed by whitespace. (:issue:`10022`) +- Bug in csv parser causing lines with initial white space plus one non-space character to be skipped. (:issue:`9710`) +- Bug in C csv parser causing spurious NaNs when data started with newline followed by white space. (:issue:`10022`) - Bug causing elements with a null group to spill into the final group when grouping by a ``Categorical`` (:issue:`9603`) - Bug where .iloc and .loc behavior is not consistent on empty dataframes (:issue:`9964`) - Bug in invalid attribute access on a ``TimedeltaIndex`` incorrectly raised ``ValueError`` instead of ``AttributeError`` (:issue:`9680`) diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt index 29f6832b48aaf..047da4c94093b 100644 --- a/doc/source/whatsnew/v0.16.2.txt +++ b/doc/source/whatsnew/v0.16.2.txt @@ -125,7 +125,7 @@ Bug Fixes - Bug where ``HDFStore.select`` modifies the passed columns list (:issue:`7212`) - Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`) - Bug in ``to_json`` with certain orients and a ``CategoricalIndex`` would segfault (:issue:`10317`) -- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`) +- Bug where some of the nan functions do not have consistent return dtypes (:issue:`10251`) - Bug in ``DataFrame.quantile`` on checking that a valid axis was passed (:issue:`9543`) - Bug in ``groupby.apply`` aggregation for ``Categorical`` not preserving categories (:issue:`10138`) - Bug in ``to_csv`` where ``date_format`` is ignored if the ``datetime`` is fractional (:issue:`10209`) @@ -155,7 +155,7 @@ Bug Fixes - Bug in ``GroupBy.get_group`` raises ``ValueError`` when group key contains ``NaT`` (:issue:`6992`) - Bug in ``SparseSeries`` constructor ignores input data name (:issue:`10258`) - Bug in ``Categorical.remove_categories`` causing a ``ValueError`` when removing the ``NaN`` category if underlying dtype is floating-point (:issue:`10156`) -- Bug where infer_freq infers timerule (WOM-5XXX) unsupported by to_offset (:issue:`9425`) +- Bug where infer_freq infers time rule (WOM-5XXX) unsupported by to_offset (:issue:`9425`) - Bug in ``DataFrame.to_hdf()`` where table format would raise a seemingly unrelated error for invalid (non-string) column names. This is now explicitly forbidden. (:issue:`9057`) - Bug to handle masking empty ``DataFrame`` (:issue:`10126`). - Bug where MySQL interface could not handle numeric table/column names (:issue:`10255`) diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index ec8f318b72fef..1b98ebd0e19c5 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -308,7 +308,7 @@ See the :ref:`documentation <io.excel>` for more details. os.remove('test.xlsx') Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel``, -if the serialized data had index names. For version 0.17.0 the ouptput format of ``to_excel`` +if the serialized data had index names. For version 0.17.0 the output format of ``to_excel`` has been changed to make this keyword unnecessary - the change is shown below. **Old** @@ -1042,7 +1042,7 @@ Performance Improvements Bug Fixes ~~~~~~~~~ -- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) +- Bug in incorrect computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) - Bug in ``.isin`` on older numpies (:issue:`11232`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`) diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index e1b561c4deacb..990f27950d982 100644 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -41,7 +41,7 @@ Conditional HTML Formatting We've added *experimental* support for conditional HTML formatting: the visual styling of a DataFrame based on the data. The styling is accomplished with HTML and CSS. -Acesses the styler class with the :attr:`pandas.DataFrame.style`, attribute, +Accesses the styler class with the :attr:`pandas.DataFrame.style`, attribute, an instance of :class:`~pandas.core.style.Styler` with your data attached. Here's a quick example: diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt index bfd314639aa60..8dc49dbc319a6 100644 --- a/doc/source/whatsnew/v0.18.0.txt +++ b/doc/source/whatsnew/v0.18.0.txt @@ -330,7 +330,7 @@ Timedeltas t[0].round('2h') -In addition, ``.round()``, ``.floor()`` and ``.ceil()`` will be available thru the ``.dt`` accessor of ``Series``. +In addition, ``.round()``, ``.floor()`` and ``.ceil()`` will be available through the ``.dt`` accessor of ``Series``. .. ipython:: python @@ -414,7 +414,7 @@ New Behavior: df.loc[ix, 'b'] = df.loc[ix, 'b'] df.dtypes -When a DataFrame's integer slice is partially updated with a new slice of floats that could potentially be downcasted to integer without losing precision, the dtype of the slice will be set to float instead of integer. +When a DataFrame's integer slice is partially updated with a new slice of floats that could potentially be down-casted to integer without losing precision, the dtype of the slice will be set to float instead of integer. Previous Behavior: @@ -516,19 +516,19 @@ Other enhancements - ``Series`` gained an ``is_unique`` attribute (:issue:`11946`) - ``DataFrame.quantile`` and ``Series.quantile`` now accept ``interpolation`` keyword (:issue:`10174`). - Added ``DataFrame.style.format`` for more flexible formatting of cell values (:issue:`11692`) -- ``DataFrame.select_dtypes`` now allows the ``np.float16`` typecode (:issue:`11990`) +- ``DataFrame.select_dtypes`` now allows the ``np.float16`` type code (:issue:`11990`) - ``pivot_table()`` now accepts most iterables for the ``values`` parameter (:issue:`12017`) - Added Google ``BigQuery`` service account authentication support, which enables authentication on remote servers. (:issue:`11881`, :issue:`12572`). For further details see `here <https://pandas-gbq.readthedocs.io/en/latest/intro.html>`__ - ``HDFStore`` is now iterable: ``for k in store`` is equivalent to ``for k in store.keys()`` (:issue:`12221`). - Add missing methods/fields to ``.dt`` for ``Period`` (:issue:`8848`) -- The entire codebase has been ``PEP``-ified (:issue:`12096`) +- The entire code base has been ``PEP``-ified (:issue:`12096`) .. _whatsnew_0180.api_breaking: Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- the leading whitespaces have been removed from the output of ``.to_string(index=False)`` method (:issue:`11833`) +- the leading white spaces have been removed from the output of ``.to_string(index=False)`` method (:issue:`11833`) - the ``out`` parameter has been removed from the ``Series.round()`` method. (:issue:`11763`) - ``DataFrame.round()`` leaves non-numeric columns unchanged in its return, rather than raises. (:issue:`11885`) - ``DataFrame.head(0)`` and ``DataFrame.tail(0)`` return empty frames, rather than ``self``. (:issue:`11937`) @@ -1186,7 +1186,7 @@ Performance Improvements - Improved performance in construction of ``Categoricals`` with ``Series`` of datetimes containing ``NaT`` (:issue:`12077`) -- Improved performance of ISO 8601 date parsing for dates without separators (:issue:`11899`), leading zeros (:issue:`11871`) and with whitespace preceding the time zone (:issue:`9714`) +- Improved performance of ISO 8601 date parsing for dates without separators (:issue:`11899`), leading zeros (:issue:`11871`) and with white space preceding the time zone (:issue:`9714`) diff --git a/doc/source/whatsnew/v0.18.1.txt b/doc/source/whatsnew/v0.18.1.txt index de9a5d5d8afae..34921505a46bf 100644 --- a/doc/source/whatsnew/v0.18.1.txt +++ b/doc/source/whatsnew/v0.18.1.txt @@ -106,7 +106,7 @@ Now you can do: .. _whatsnew_0181.enhancements.method_chain: -Method chaininng improvements +Method chaining improvements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following methods / indexers now accept a ``callable``. It is intended to make @@ -598,14 +598,14 @@ Bug Fixes - Bug in ``.resample(...)`` with a ``PeriodIndex`` when resampling to an existing frequency (:issue:`12770`) - Bug in printing data which contains ``Period`` with different ``freq`` raises ``ValueError`` (:issue:`12615`) - Bug in ``Series`` construction with ``Categorical`` and ``dtype='category'`` is specified (:issue:`12574`) -- Bugs in concatenation with a coercable dtype was too aggressive, resulting in different dtypes in outputformatting when an object was longer than ``display.max_rows`` (:issue:`12411`, :issue:`12045`, :issue:`11594`, :issue:`10571`, :issue:`12211`) +- Bugs in concatenation with a coercible dtype was too aggressive, resulting in different dtypes in output formatting when an object was longer than ``display.max_rows`` (:issue:`12411`, :issue:`12045`, :issue:`11594`, :issue:`10571`, :issue:`12211`) - Bug in ``float_format`` option with option not being validated as a callable. (:issue:`12706`) - Bug in ``GroupBy.filter`` when ``dropna=False`` and no groups fulfilled the criteria (:issue:`12768`) - Bug in ``__name__`` of ``.cum*`` functions (:issue:`12021`) - Bug in ``.astype()`` of a ``Float64Inde/Int64Index`` to an ``Int64Index`` (:issue:`12881`) -- Bug in roundtripping an integer based index in ``.to_json()/.read_json()`` when ``orient='index'`` (the default) (:issue:`12866`) +- Bug in round tripping an integer based index in ``.to_json()/.read_json()`` when ``orient='index'`` (the default) (:issue:`12866`) - Bug in plotting ``Categorical`` dtypes cause error when attempting stacked bar plot (:issue:`13019`) -- Compat with >= ``numpy`` 1.11 for ``NaT`` comparions (:issue:`12969`) +- Compat with >= ``numpy`` 1.11 for ``NaT`` comparisons (:issue:`12969`) - Bug in ``.drop()`` with a non-unique ``MultiIndex``. (:issue:`12701`) - Bug in ``.concat`` of datetime tz-aware and naive DataFrames (:issue:`12467`) - Bug in correctly raising a ``ValueError`` in ``.resample(..).fillna(..)`` when passing a non-string (:issue:`12952`) @@ -673,7 +673,7 @@ Bug Fixes - Bug in ``pd.concat`` raises ``AttributeError`` when input data contains tz-aware datetime and timedelta (:issue:`12620`) - Bug in ``pd.concat`` did not handle empty ``Series`` properly (:issue:`11082`) -- Bug in ``.plot.bar`` alginment when ``width`` is specified with ``int`` (:issue:`12979`) +- Bug in ``.plot.bar`` alignment when ``width`` is specified with ``int`` (:issue:`12979`) - Bug in ``fill_value`` is ignored if the argument to a binary operator is a constant (:issue:`12723`) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 50d7877a9cd48..73fb124afef87 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -386,7 +386,7 @@ Google BigQuery Enhancements Fine-grained numpy errstate ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas codebase. (:issue:`13109`, :issue:`13145`) +Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas code base. (:issue:`13109`, :issue:`13145`) After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from your code. These are likely legitimate, and the underlying cause likely existed in the code when using previous versions of pandas that simply silenced the warning. Use `numpy.errstate <http://docs.scipy.org/doc/numpy/reference/generated/numpy.errstate.html>`__ around the source of the ``RuntimeWarning`` to control how these conditions are handled. @@ -750,7 +750,7 @@ This will now convert integers/floats with the default unit of ``ns``. Bug fixes related to ``.to_datetime()``: - Bug in ``pd.to_datetime()`` when passing integers or floats, and no ``unit`` and ``errors='coerce'`` (:issue:`13180`). -- Bug in ``pd.to_datetime()`` when passing invalid datatypes (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`) +- Bug in ``pd.to_datetime()`` when passing invalid data types (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`) - Bug in ``pd.to_datetime()`` which overflowed on ``int8``, and ``int16`` dtypes (:issue:`13451`) - Bug in ``pd.to_datetime()`` raise ``AttributeError`` with ``NaN`` and the other string is not valid when ``errors='ignore'`` (:issue:`12424`) - Bug in ``pd.to_datetime()`` did not cast floats correctly when ``unit`` was specified, resulting in truncated datetime (:issue:`13834`) @@ -1512,7 +1512,7 @@ Bug Fixes - Bug in ``.set_index`` raises ``AmbiguousTimeError`` if new index contains DST boundary and multi levels (:issue:`12920`) - Bug in ``.shift`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13926`) - Bug in ``pd.read_hdf()`` returns incorrect result when a ``DataFrame`` with a ``categorical`` column and a query which doesn't match any values (:issue:`13792`) -- Bug in ``.iloc`` when indexing with a non lex-sorted MultiIndex (:issue:`13797`) +- Bug in ``.iloc`` when indexing with a non lexsorted MultiIndex (:issue:`13797`) - Bug in ``.loc`` when indexing with date strings in a reverse sorted ``DatetimeIndex`` (:issue:`14316`) - Bug in ``Series`` comparison operators when dealing with zero dim NumPy arrays (:issue:`13006`) - Bug in ``.combine_first`` may return incorrect ``dtype`` (:issue:`7630`, :issue:`10567`) diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt index b8afe18e0f871..1c577dddf1cd4 100644 --- a/doc/source/whatsnew/v0.19.1.txt +++ b/doc/source/whatsnew/v0.19.1.txt @@ -43,7 +43,7 @@ Bug Fixes - Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`) - Bug in ``TimedeltaIndex`` addition with a Datetime-like object where addition overflow in the negative direction was not being caught (:issue:`14068`, :issue:`14453`) - Bug in string indexing against data with ``object`` ``Index`` may raise ``AttributeError`` (:issue:`14424`) -- Corrrecly raise ``ValueError`` on empty input to ``pd.eval()`` and ``df.query()`` (:issue:`13139`) +- Correctly raise ``ValueError`` on empty input to ``pd.eval()`` and ``df.query()`` (:issue:`13139`) - Bug in ``RangeIndex.intersection`` when result is a empty set (:issue:`14364`). - Bug in groupby-transform broadcasting that could cause incorrect dtype coercion (:issue:`14457`) - Bug in ``Series.__setitem__`` which allowed mutating read-only arrays (:issue:`14359`). diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 5fb725a76770e..bd90e371597dc 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -24,7 +24,7 @@ Highlights include: .. warning:: - Pandas has changed the internal structure and layout of the codebase. + Pandas has changed the internal structure and layout of the code base. This can affect imports that are not from the top-level ``pandas.*`` namespace, please see the changes :ref:`here <whatsnew_0200.privacy>`. Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating. @@ -324,7 +324,7 @@ this JSON Table schema representation of the Series or DataFrame if you are using IPython (or another frontend like `nteract`_ using the Jupyter messaging protocol). This gives frontends like the Jupyter notebook and `nteract`_ -more flexiblity in how they display pandas objects, since they have +more flexibility in how they display pandas objects, since they have more information about the data. You must enable this by setting the ``display.html.table_schema`` option to ``True``. @@ -462,7 +462,7 @@ Selecting via a scalar value that is contained *in* the intervals. Other Enhancements ^^^^^^^^^^^^^^^^^^ -- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closedness. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`) +- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closed. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`) - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`. - ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`) - ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`) @@ -1389,7 +1389,7 @@ list, and a dict of column names to scalars or lists. This provides a useful syn (potentially different) aggregations. However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent -between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility. +between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionality. - We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed one to ``rename`` the resulting aggregation, but this had a completely different @@ -1528,7 +1528,7 @@ Removal of prior version deprecations/changes - The ``pandas.io.ga`` module with a ``google-analytics`` interface is removed (:issue:`11308`). Similar functionality can be found in the `Google2Pandas <https://github.com/panalysis/Google2Pandas>`__ package. - ``pd.to_datetime`` and ``pd.to_timedelta`` have dropped the ``coerce`` parameter in favor of ``errors`` (:issue:`13602`) -- ``pandas.stats.fama_macbeth``, ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var``, as well as the top-level ``pandas.fama_macbeth`` and ``pandas.ols`` routines are removed. Similar functionaility can be found in the `statsmodels <shttp://www.statsmodels.org/dev/>`__ package. (:issue:`11898`) +- ``pandas.stats.fama_macbeth``, ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var``, as well as the top-level ``pandas.fama_macbeth`` and ``pandas.ols`` routines are removed. Similar functionality can be found in the `statsmodels <shttp://www.statsmodels.org/dev/>`__ package. (:issue:`11898`) - The ``TimeSeries`` and ``SparseTimeSeries`` classes, aliases of ``Series`` and ``SparseSeries``, are removed (:issue:`10890`, :issue:`15098`). - ``Series.is_time_series`` is dropped in favor of ``Series.index.is_all_dates`` (:issue:`15098`) @@ -1640,7 +1640,7 @@ I/O - Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`) - Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`) - Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`) -- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`) +- Bug in ``pd.read_csv()`` with ``parse_dates`` when multi-line headers are specified (:issue:`15376`) - Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`) - Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`) - Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`) @@ -1722,7 +1722,7 @@ Numeric - Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`) - Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`) - Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`) -- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`) +- Bug in ``.eval()`` which caused multi-line evals to fail with local variables not on the first line (:issue:`15342`) Other ^^^^^ diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 2e9e616daf3a7..2c147736d79a8 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -124,7 +124,7 @@ I/O - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the ``numpy.bool_`` datatype (:issue:`18390`) -- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) +- Bug in :func:`read_json` not decoding when reading line delimited JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) - Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) - Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) @@ -139,7 +139,7 @@ Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`) -- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) +- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequency is 12h or higher (:issue:`15549`) - Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) - Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a099fb40c35a7..2430b6ac2bbd4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -243,7 +243,7 @@ Grouping by a categorical includes the unobserved categories in the output. When grouping by multiple categorical columns, this means you get the cartesian product of all the categories, including combinations where there are no observations, which can result in a large number of groups. We have added a keyword ``observed`` to control this behavior, it defaults to -``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) +``observed=False`` for backward-compatibility. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) .. ipython:: python @@ -535,8 +535,8 @@ Other Enhancements - :func:`DataFrame.replace` now supports the ``method`` parameter, which can be used to specify the replacement method when ``to_replace`` is a scalar, list or tuple and ``value`` is ``None`` (:issue:`19632`) - :meth:`Timestamp.month_name`, :meth:`DatetimeIndex.month_name`, and :meth:`Series.dt.month_name` are now available (:issue:`12805`) - :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`) -- :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row. - ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) +- :meth:`DataFrame.to_sql` now performs a multi-value insert if the underlying connection supports itk rather than inserting row by row. + ``SQLAlchemy`` dialects supporting multi-value inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - :func:`read_html` now reads all ``<tbody>`` elements in a ``<table>``, not just the first. (:issue:`20690`) - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) @@ -836,7 +836,7 @@ Extraction of matching patterns from strings By default, extracting matching patterns from strings with :func:`str.extract` used to return a ``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless -``expand`` is set to ``False``. Finallay, ``None`` was an accepted value for +``expand`` is set to ``False``. Finally, ``None`` was an accepted value for the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`) Previous Behavior: @@ -896,7 +896,7 @@ New Behavior: Notice in the example above that the converted ``Categorical`` has retained ``ordered=True``. Had the default value for ``ordered`` remained as ``False``, the converted ``Categorical`` would have become unordered, despite ``ordered=False`` never being explicitly specified. To change the value of ``ordered``, explicitly pass it to the new dtype, e.g. ``CategoricalDtype(categories=list('cbad'), ordered=False)``. -Note that the unintenional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. +Note that the unintentional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. .. _whatsnew_0230.api_breaking.pretty_printing: @@ -1107,7 +1107,7 @@ Performance Improvements - Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) - Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`) - Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) -- Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifiested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) +- Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) - Improved performance of :func:`pandas.core.arrays.Categorical.from_codes` (:issue:`18501`) @@ -1243,7 +1243,7 @@ Offsets - Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) -- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) +- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operations (:issue:`14774`) Numeric @@ -1329,9 +1329,9 @@ I/O - :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`, :issue:`9155`, :issue:`19900`) - Bug in :meth:`pandas.io.stata.StataReader.value_labels` raising an ``AttributeError`` when called on very old files. Now returns an empty dict (:issue:`19417`) - Bug in :func:`read_pickle` when unpickling objects with :class:`TimedeltaIndex` or :class:`Float64Index` created with pandas prior to version 0.20 (:issue:`19939`) -- Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`) +- Bug in :meth:`pandas.io.json.json_normalize` where sub-records are not properly normalized if any sub-records values are NoneType (:issue:`20030`) - Bug in ``usecols`` parameter in :func:`read_csv` where error is not raised correctly when passing a string. (:issue:`20529`) -- Bug in :func:`HDFStore.keys` when reading a file with a softlink causes exception (:issue:`20523`) +- Bug in :func:`HDFStore.keys` when reading a file with a soft link causes exception (:issue:`20523`) - Bug in :func:`HDFStore.select_column` where a key which is not a valid store raised an ``AttributeError`` instead of a ``KeyError`` (:issue:`17912`) Plotting @@ -1390,7 +1390,7 @@ Reshaping - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) -- Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) +- Bug in :func:`concat` when concatenating sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - Bug in :func:`DataFrame.join` which does an ``outer`` instead of a ``left`` join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) @@ -1411,5 +1411,5 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) -- Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existant option key in some cases (:issue:`19789`) +- Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existent option key in some cases (:issue:`19789`) - Bug in :func:`testing.assert_series_equal` and :func:`testing.assert_frame_equal` for Series or DataFrames with differing unicode data (:issue:`20503`) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b94377af770f4..1626508c3ba31 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -64,7 +64,7 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ -- +- Added sphinx spelling extension, updated documentation on how to use the spell check (:issue:`21079`) - - diff --git a/doc/source/whatsnew/v0.6.0.txt b/doc/source/whatsnew/v0.6.0.txt index 55a67a75e0fd1..bd01dd0a90a59 100644 --- a/doc/source/whatsnew/v0.6.0.txt +++ b/doc/source/whatsnew/v0.6.0.txt @@ -43,7 +43,7 @@ New Features Performance Enhancements ~~~~~~~~~~~~~~~~~~~~~~~~ -- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (:issue:`361`) +- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the code base (:issue:`361`) - VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) - VBENCH Improved performance of ``MultiIndex.from_tuples`` - VBENCH Special Cython matrix iterator for applying arbitrary reduction operations diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt index b5ec5aa73ee9a..29d6fe563d047 100644 --- a/doc/source/whatsnew/v0.8.0.txt +++ b/doc/source/whatsnew/v0.8.0.txt @@ -33,7 +33,7 @@ clear of NumPy 1.6's datetime64 API functions (though limited as they are) and only interact with this data using the interface that pandas provides. See the end of the 0.8.0 section for a "porting" guide listing potential issues -for users migrating legacy codebases from pandas 0.7 or earlier to 0.8.0. +for users migrating legacy code bases from pandas 0.7 or earlier to 0.8.0. Bug fixes to the 0.7.x series for legacy NumPy < 1.6 users will be provided as they arise. There will be no more further development in 0.7.x beyond bug @@ -68,7 +68,7 @@ Time series changes and improvements :ref:`time spans <timeseries.periods>` and performing **calendar logic**, including the `12 fiscal quarterly frequencies <timeseries.quarterly>`. This is a partial port of, and a substantial enhancement to, - elements of the scikits.timeseries codebase. Support for conversion between + elements of the scikits.timeseries code base. Support for conversion between PeriodIndex and DatetimeIndex - New Timestamp data type subclasses `datetime.datetime`, providing the same interface while enabling working with nanosecond-resolution data. Also @@ -76,7 +76,7 @@ Time series changes and improvements - Enhanced support for :ref:`time zones <timeseries.timezone>`. Add `tz_convert` and ``tz_lcoalize`` methods to TimeSeries and DataFrame. All timestamps are stored as UTC; Timestamps from DatetimeIndex objects with time - zone set will be localized to localtime. Time zone conversions are therefore + zone set will be localized to local time. Time zone conversions are therefore essentially free. User needs to know very little about pytz library now; only time zone names as as strings are required. Time zone-aware timestamps are equal if and only if their UTC timestamps match. Operations between time diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt index e2d6d7a275086..1f58170b30244 100644 --- a/doc/source/whatsnew/v0.9.1.txt +++ b/doc/source/whatsnew/v0.9.1.txt @@ -8,7 +8,7 @@ v0.9.1 (November 14, 2012) -------------------------- -This is a bugfix release from 0.9.0 and includes several new features and +This is a bug fix release from 0.9.0 and includes several new features and enhancements along with a large number of bug fixes. The new features include by-column sort order for DataFrame and Series, improved NA handling for the rank method, masking functions for DataFrame, and intraday time-series filtering for diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 741e5553141f7..41047d9c25c22 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -304,7 +304,7 @@ cdef class IndexEngine: """ return an indexer suitable for takng from a non unique index return the labels in the same order ast the target and a missing indexer into the targets (which correspond - to the -1 indicies in the results """ + to the -1 indices in the results """ cdef: ndarray values, x diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..f93748a75e609 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -943,7 +943,7 @@ class Timedelta(_Timedelta): days, seconds, microseconds, milliseconds, minutes, hours, weeks : numeric, optional Values for construction in compat with datetime.timedelta. - np ints and floats will be coereced to python ints and floats. + np ints and floats will be coerced to python ints and floats. Notes ----- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 88bc497f9f22d..63520fdd74299 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -513,7 +513,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, See Also -------- pandas.cut : Discretize continuous-valued array. - pandas.unique : Find the unique valuse in an array. + pandas.unique : Find the unique value in an array. Examples -------- @@ -558,7 +558,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, [a, c] Categories (3, object): [a, b, c] - Notice that ``'b'`` is in ``uniques.categories``, desipite not being + Notice that ``'b'`` is in ``uniques.categories``, despite not being present in ``cat.values``. For all other pandas objects, an Index of the appropriate type is @@ -576,8 +576,8 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, @Substitution( values=dedent("""\ values : sequence - A 1-D seqeunce. Sequences that aren't pandas objects are - coereced to ndarrays before factorization. + A 1-D sequence. Sequences that aren't pandas objects are + coerced to ndarrays before factorization. """), order=dedent("""\ order @@ -1457,7 +1457,7 @@ def take(arr, indices, axis=0, allow_fill=False, fill_value=None): Parameters ---------- arr : sequence - Non array-likes (sequences without a dtype) are coereced + Non array-likes (sequences without a dtype) are coerced to an ndarray. indices : sequence of integers Indices to be taken. diff --git a/pandas/core/apply.py b/pandas/core/apply.py index ac173c5182bc7..27ac5038276d6 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -113,7 +113,7 @@ def get_result(self): if isinstance(self.f, compat.string_types): # Support for `frame.transform('method')` # Some methods (shift, etc.) require the axis argument, others - # don't, so inspect and insert if nescessary. + # don't, so inspect and insert if necessary. func = getattr(self.obj, self.f) sig = compat.signature(func) if 'axis' in sig.args: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 1922801c30719..ce87c0a8b0c5a 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -491,7 +491,7 @@ def take(self, indices, allow_fill=False, fill_value=None): `fill_value`: a user-facing "boxed" scalar, and a low-level physical NA value. `fill_value` should be the user-facing version, and the implementation should handle translating that to the - physical version for processing the take if nescessary. + physical version for processing the take if necessary. Returns ------- @@ -510,7 +510,7 @@ def take(self, indices, allow_fill=False, fill_value=None): ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when `indices` is a sequence of values. Additionally, it's called by :meth:`Series.reindex`, or any other method - that causes realignemnt, with a `fill_value`. + that causes realignment, with a `fill_value`. See Also -------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index abcb9ae3494b5..eff8c9b4f4cbf 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -51,7 +51,7 @@ _take_msg = textwrap.dedent("""\ Interpreting negative values in 'indexer' as missing values. - In the future, this will change to meaning positional indicies + In the future, this will change to meaning positional indices from the right. Use 'allow_fill=True' to retain the previous behavior and silence this @@ -1478,7 +1478,7 @@ def argsort(self, *args, **kwargs): # TODO(PY2): use correct signature # We have to do *args, **kwargs to avoid a a py2-only signature # issue since np.argsort differs from argsort. - """Return the indicies that would sort the Categorical. + """Return the indices that would sort the Categorical. Parameters ---------- diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index d9dc73434f5ac..ad4588f254174 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -33,7 +33,7 @@ def isna(obj): """ Detect missing values for an array-like object. - This function takes a scalar or array-like object and indictates + This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). @@ -52,7 +52,7 @@ def isna(obj): See Also -------- notna : boolean inverse of pandas.isna. - Series.isna : Detetct missing values in a Series. + Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. @@ -260,7 +260,7 @@ def notna(obj): """ Detect non-missing values for an array-like object. - This function takes a scalar or array-like object and indictates + This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). @@ -279,7 +279,7 @@ def notna(obj): See Also -------- isna : boolean inverse of pandas.notna. - Series.notna : Detetct valid values in a Series. + Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 77a67c048a48d..5e5cde05cafbc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1136,7 +1136,7 @@ def to_gbq(self, destination_table, project_id, chunksize=None, Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False - Force Google BigQuery to reauthenticate the user. This is useful + Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: @@ -5922,7 +5922,7 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, -------- DataFrame.applymap: For elementwise operations DataFrame.aggregate: only perform aggregating type operations - DataFrame.transform: only perform transformating type operations + DataFrame.transform: only perform transforming type operations Examples -------- @@ -6565,7 +6565,7 @@ def cov(self, min_periods=None): See Also -------- pandas.Series.cov : compute covariance with another Series - pandas.core.window.EWM.cov: expoential weighted sample covariance + pandas.core.window.EWM.cov: exponential weighted sample covariance pandas.core.window.Expanding.cov : expanding sample covariance pandas.core.window.Rolling.cov : rolling sample covariance diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9e4eda1bc4dc7..38def81e73231 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3607,7 +3607,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, If desired, we can fill in the missing values using one of several options. - For example, to backpropagate the last valid value to fill the ``NaN`` + For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') @@ -4541,7 +4541,7 @@ def as_matrix(self, columns=None): e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 - will result in a flot64 dtype. + will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. @@ -4622,7 +4622,7 @@ def values(self): See Also -------- - pandas.DataFrame.index : Retrievie the index labels + pandas.DataFrame.index : Retrieve the index labels pandas.DataFrame.columns : Retrieving the column names """ self._consolidate_inplace() @@ -5702,7 +5702,7 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): the correct type for replacement. Compare the behavior of ``s.replace({'a': None})`` and - ``s.replace('a', None)`` to understand the pecularities + ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index df7a5dc9dc173..3bc59157055ce 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2069,7 +2069,7 @@ def shift(self, periods=1, freq=None, axis=0): @Appender(_doc_template) def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, axis=0): - """Calcuate pct_change of each value to previous entry in group""" + """Calculate pct_change of each value to previous entry in group""" if freq is not None or axis != 0: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df39eb5fd8312..82147e3ad2f38 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2629,7 +2629,7 @@ def shift(self, periods=1, freq=None): def argsort(self, *args, **kwargs): """ - Return the integer indicies that would sort the index. + Return the integer indices that would sort the index. Parameters ---------- @@ -2641,7 +2641,7 @@ def argsort(self, *args, **kwargs): Returns ------- numpy.ndarray - Integer indicies that would sort the index if used as + Integer indices that would sort the index if used as an indexer. See also diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 83950f1d71633..bc4b729cbfe15 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2046,7 +2046,7 @@ def normalize(self): """ Convert times to midnight. - The time component of the date-timeise converted to midnight i.e. + The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..a9c65b7c2c864 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2141,7 +2141,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): Notes ----- - This method only works if the MultiIndex is properly lex-sorted. So, + This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index b9e8f9028dbf7..c163e3d53e634 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -587,7 +587,7 @@ def asfreq(self, freq=None, how='E'): 'S', 'START', or 'BEGIN' for start. Whether the elements should be aligned to the end or start within pa period. January 31st ('END') vs. - Janury 1st ('START') for example. + January 1st ('START') for example. Returns ------- diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 31c489e2f8941..e9b9a734ec5f5 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -761,7 +761,7 @@ def _interp_limit(invalid, fw_limit, bw_limit): """ # handle forward first; the backward direction is the same except # 1. operate on the reversed array - # 2. subtract the returned indicies from N - 1 + # 2. subtract the returned indices from N - 1 N = len(invalid) f_idx = set() b_idx = set() diff --git a/pandas/core/panel.py b/pandas/core/panel.py index fe46b8a66e5ef..c4aa471b8b944 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1405,7 +1405,7 @@ def _get_join_index(self, other, how): # miscellaneous data creation @staticmethod def _extract_axes(self, data, axes, **kwargs): - """ return a list of the axis indicies """ + """ return a list of the axis indices """ return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)] @@ -1447,11 +1447,11 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): Returns ------- - dict of aligned results & indicies + dict of aligned results & indices """ result = dict() - # caller differs dict/ODict, presered type + # caller differs dict/ODict, preserved type if isinstance(frames, OrderedDict): result = OrderedDict() diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 0707cc756682e..0b0fcacc1bc48 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -425,7 +425,7 @@ def backfill(self, limit=None): appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. - Missing values that existed in the orginal data will not be modified. + Missing values that existed in the original data will not be modified. Parameters ---------- @@ -529,7 +529,7 @@ def fillna(self, method, limit=None): appear (e.g., when the resampling frequency is higher than the original frequency). - Missing values that existed in the orginal data will + Missing values that existed in the original data will not be modified. Parameters diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index ce99d2f8c9a63..b3e3c52f6e363 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -166,7 +166,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more - group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,... + group of columns with format + A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) @@ -185,7 +186,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): i : str or list-like Column(s) to use as id variable(s) j : str - The name of the subobservation variable. What you wish to name your + The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names @@ -200,7 +201,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form - Aone, Btwo,.., and you have an unrelated column Arating, you can + A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 @@ -242,7 +243,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 - With multuple id columns + With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4d8897fb7c811..73aba4d4e044b 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -319,7 +319,7 @@ def merge_asof(left, right, on=None, - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value - (i.e., stricly less-than / strictly greater-than) + (i.e., strictly less-than / strictly greater-than) direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..c92825abf45a3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3088,7 +3088,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): -------- Series.map: For element-wise operations Series.agg: only perform aggregating type operations - Series.transform: only perform transformating type operations + Series.transform: only perform transforming type operations Examples -------- diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 81d775157cf62..cb1e8c067f537 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -207,7 +207,7 @@ def str_count(arr, pat, flags=0): Flags for the `re` module. For a complete list, `see here <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. **kwargs - For compatability with other string methods. Not used. + For compatibility with other string methods. Not used. Returns ------- @@ -1358,7 +1358,7 @@ def str_split(arr, pat=None, n=None): Limit number of splits in output. ``None``, 0 and -1 will be interpreted as return all splits. expand : bool, default False - Expand the splitted strings into separate columns. + Expand the split strings into separate columns. * If ``True``, return DataFrame/MultiIndex expanding dimensionality. * If ``False``, return Series/Index, containing lists of strings. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 1de43116d0b49..8ecb81397edb3 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -138,7 +138,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse - with year first (this is a known bug, based on dateutil beahavior). + with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 @@ -181,8 +181,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime - conversion. May produce sigificant speed-up when parsing duplicate date - strings, especially ones with timezone offsets. + conversion. May produce significant speed-up when parsing duplicate + date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f876ceb8a26bf..5203cf036c146 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -60,7 +60,7 @@ class Styler(object): table_styles: list-like, default None list of {selector: (attr, value)} dicts; see Notes uuid: str, default None - a unique identifier to avoid CSS collisons; generated automatically + a unique identifier to avoid CSS collisions; generated automatically caption: str, default None caption to attach to the table @@ -79,7 +79,7 @@ class Styler(object): If using in the Jupyter notebook, Styler has defined a ``_repr_html_`` to automatically render itself. Otherwise call Styler.render to get - the genterated HTML. + the generated HTML. CSS classes are attached to the generated HTML @@ -120,7 +120,7 @@ def __init__(self, data, precision=None, table_styles=None, uuid=None, if data.ndim == 1: data = data.to_frame() if not data.index.is_unique or not data.columns.is_unique: - raise ValueError("style is not supported for non-unique indicies.") + raise ValueError("style is not supported for non-unique indices.") self.data = data self.index = data.index @@ -549,7 +549,7 @@ def _apply(self, func, axis=0, subset=None, **kwargs): def apply(self, func, axis=0, subset=None, **kwargs): """ - Apply a function column-wise, row-wise, or table-wase, + Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters @@ -1051,7 +1051,8 @@ def _bar_center_mid(s, color, width, base): def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left'): """ - Color the background ``color`` proptional to the values in each column. + Color the background ``color`` proportional to the values in each + column. Excludes non-numeric data by default. Parameters diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 236d70609e76c..c7c16598ee432 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -56,7 +56,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False - Force Google BigQuery to reauthenticate the user. This is useful + Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. private_key : str, optional Service account private key in JSON format. Can be file path diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 01f7db7d68664..6f663f8ff8433 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -219,7 +219,7 @@ def build_table_schema(data, index=True, primary_key=None, version=True): ----- See `_as_json_table_type` for conversion types. Timedeltas as converted to ISO8601 duration format with - 9 decimal places after the secnods field for nanosecond precision. + 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index daa370d0ca61a..aa39e341792c7 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -297,7 +297,7 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs): objects. .. versionadded:: 0.19.0 support for pathlib, py.path. - .. versionadded:: 0.21.0 support for __fspath__ proptocol. + .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file @@ -3790,13 +3790,13 @@ class WORMTable(Table): table_type = u('worm') def read(self, **kwargs): - """ read the indicies and the indexing array, calculate offset rows and + """ read the indices and the indexing array, calculate offset rows and return """ raise NotImplementedError("WORMTable needs to implement read") def write(self, **kwargs): """ write in a format that we can search later on (but cannot append - to): write out the indicies and the values using _write_array + to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search """ raise NotImplementedError("WORKTable needs to implement write") @@ -4694,7 +4694,7 @@ class Selection(object): ---------- table : a Table object where : list of Terms (or convertible to) - start, stop: indicies to start and/or stop selection + start, stop: indices to start and/or stop selection """ diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 87b7d13251f28..0819df97ba5fa 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1394,7 +1394,7 @@ def orientation(self): In statistics, `kernel density estimation`_ (KDE) is a non-parametric way to estimate the probability density function (PDF) of a random variable. This function uses Gaussian kernels and includes automatic - bandwith determination. + bandwidth determination. .. _kernel density estimation: https://en.wikipedia.org/wiki/Kernel_density_estimation @@ -2031,7 +2031,7 @@ def plot_series(data, kind='line', ax=None, # Series unique Tick label font size in points or as a string (e.g., `large`). rot : int or float, default 0 The rotation angle of labels (in degrees) - with respect to the screen coordinate sytem. + with respect to the screen coordinate system. grid : boolean, default True Setting this to True will show the grid. figsize : A tuple (width, height) in inches @@ -2063,7 +2063,7 @@ def plot_series(data, kind='line', ax=None, # Series unique * 'axes' : object of class matplotlib.axes.Axes * 'dict' : dict of matplotlib.lines.Line2D objects - * 'both' : a nametuple with strucure (ax, lines) + * 'both' : a namedtuple with structure (ax, lines) For data grouped with ``by``: @@ -2848,8 +2848,8 @@ def hist(self, bins=10, **kwds): >>> ax = s.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can - lead to overfitting, while using a large bandwidth value may result - in underfitting: + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: .. plot:: :context: close-figs @@ -3284,8 +3284,8 @@ def hist(self, by=None, bins=10, **kwds): >>> ax = df.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can - lead to overfitting, while using a large bandwidth value may result - in underfitting: + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: .. plot:: :context: close-figs @@ -3415,7 +3415,7 @@ def scatter(self, x, y, s=None, c=None, **kwds): - A sequence of color strings referred to by name, RGB or RGBA code, which will be used for each point's color recursively. For - intance ['green','yellow'] all points will be filled in green or + instance ['green','yellow'] all points will be filled in green or yellow, alternatively. - A column name or position whose values will be used to color the diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 6dd38187f7277..300e1acdea911 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1638,7 +1638,7 @@ def test_constructor_series_copy(self): def test_constructor_with_nas(self): # GH 5016 - # na's in indicies + # na's in indices def check(df): for i in range(len(df.columns)): diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index f0ba1851b28dd..a77c170221bea 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -130,7 +130,7 @@ def setup_method(self, method): setattr(self, o, d) def generate_indices(self, f, values=False): - """ generate the indicies + """ generate the indices if values is True , use the axis values is False, use the range """ diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index bfc74db73b813..49047e1da0996 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -391,13 +391,13 @@ def test_iloc_getitem_frame(self): expected = df.ix[[0, 2, 6], [0, 2]] tm.assert_frame_equal(result, expected) - # neg indicies + # neg indices result = df.iloc[[-1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) - # dups indicies + # dups indices result = df.iloc[[-1, -1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 18, 2, 6], [6, 2]] diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py index 81265c9f2941d..1085e2a61be48 100644 --- a/pandas/tests/indexing/test_panel.py +++ b/pandas/tests/indexing/test_panel.py @@ -43,12 +43,12 @@ def test_iloc_getitem_panel(self): expected = p.loc[['A', 'C']] tm.assert_panel_equal(result, expected) - # neg indicies + # neg indices result = p.iloc[[-1, 1], [-1, 1]] expected = p.loc[['D', 'B'], ['c', 'b']] tm.assert_panel_equal(result, expected) - # dups indicies + # dups indices result = p.iloc[[-1, -1, 1], [-1, 1]] expected = p.loc[['D', 'D', 'B'], ['c', 'b']] tm.assert_panel_equal(result, expected) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 381a059244858..d590cfd6b6c64 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1484,7 +1484,7 @@ def test_append_with_data_columns(self): store.append('df', df[2:]) tm.assert_frame_equal(store['df'], df) - # check that we have indicies created + # check that we have indices created assert(store._handle.root.df.table.cols.index.is_indexed is True) assert(store._handle.root.df.table.cols.B.is_indexed is True) @@ -4511,7 +4511,7 @@ def do_copy(f, new_f=None, keys=None, keys = store.keys() assert set(keys) == set(tstore.keys()) - # check indicies & nrows + # check indices & nrows for k in tstore.keys(): if tstore.get_storer(k).is_table: new_t = tstore.get_storer(k) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 12d803a76e7f3..d95a2ad2d7f76 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1524,7 +1524,7 @@ def test_take(self): expected = self.panel.reindex(minor=['D', 'A', 'B', 'C']) assert_panel_equal(result, expected) - # neg indicies ok + # neg indices ok expected = self.panel.reindex(minor=['D', 'D', 'B', 'C']) result = self.panel.take([3, -1, 1, 2], axis=2) assert_panel_equal(result, expected)
This is my first time contributing to a project this big so I hope everything is okay I am still working on fixing bugs and polishing a few things but here is what I have done so far: - fixed typos - updated a wordlist txt file with words to be ignored by the spellchecker - added the spellcheck method to the DocBuilder class (should this be named spelling?) I've re-used the _sphix_build method to run the spelling command, to do this I had to update the _sphix_build method to include 'spelling' as a kind. I'd like to know if this is the best approach of If I should just replicate the code. I have added a few configuring options in order to get better results when using the spellcheck, the first one was to use the wordlist text file, the second was to ignore known PyPI packages names and finally to show suggestions of a misspelt word. At the moment the spellcheck will run if we type the command: `python make.py spellcheck` and the spellcheck method is called, I haven't yet figured out how to fail the build when there are exceptions like the Issue 21079 suggested. ---- - [x] closes #21079 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21109
2018-05-17T20:53:32Z
2018-06-07T13:20:33Z
2018-06-07T13:20:33Z
2018-06-12T07:58:06Z
PERF: Improve performance of CategoricalIndex.is_unique
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..d45b4e19c6aac 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -30,6 +30,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) +- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`) - - diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 78b7ae7054248..150eca32e229d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -378,7 +378,7 @@ def _engine(self): # introspection @cache_readonly def is_unique(self): - return not self.duplicated().any() + return self._engine.is_unique @property def is_monotonic_increasing(self): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 0e630f69b1a32..a2a4170256088 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -581,6 +581,15 @@ def test_is_monotonic(self, data, non_lexsorted_data): assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing + @pytest.mark.parametrize('values, expected', [ + ([1, 2, 3], True), + ([1, 3, 1], False), + (list('abc'), True), + (list('aba'), False)]) + def test_is_unique(self, values, expected): + ci = CategoricalIndex(values) + assert ci.is_unique is expected + def test_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo')
``CategoricalIndex.is_unique`` creates an extraneous boolean array. By changing ``CategoricalIndex.is_unique`` to use ``CategoricalIndex._engine.is_unique`` instead, this array creation is avoided. We simultaneously get to set ``is_monotonic*`` for free, and therefore will save time, if that property is called later. Demonstration ========== Setup: ```python >>> n = 1_000_000 >>> ci = pd.CategoricalIndex(list('a' * n + 'b' * n + 'c' * n)) ``` Currently, ``ci.is_unique`` is about the same (disregarding``@readonly_cache``) as: ```python >>> from pandas._libs.hashtable import duplicated_int64 >>> not duplicated_int64(ci.codes.astype('int64')).any() False >>> %timeit duplicated_int64(ci.codes.astype('int64')).any() 46.7 ms ± 4.18 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) ``` Notice that the ``duplicated_int64()`` creates an boolean array, which is not needed and slows the operation down. If we instead use ``ci._engine.is_unique`` to check for uniqueness, the check is roughly similar to: ```python >>> from pandas._libs.algos import is_monotonic_int64 >>> is_monotonic_int64(ci.codes.astype('int64'), False) (True, False, False) # (is_monotonic_inc, is_monotonic_dec, is_unique) >>> %timeit is_monotonic_int64(ci.codes.astype('int64'), False) 23.3 ms ± 364 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) ``` This is faster than the other version, as the intermediate boolean array is not created in this version. Also, is it (IMO) more idiomatic, as ``index._engine`` is in general supposed to be used for this kind of index content checks.
https://api.github.com/repos/pandas-dev/pandas/pulls/21107
2018-05-17T19:14:55Z
2018-06-04T21:43:17Z
2018-06-04T21:43:17Z
2018-10-27T08:16:21Z
Replaced open with Context Mgrs in Parser Tests
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 2423ddcd9a1a0..2b7ff1f5a9879 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -54,20 +54,21 @@ def test_bad_stream_exception(self): # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. - handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') - # stream must be binary UTF8 - stream = codecs.StreamRecoder( - handle, utf8.encode, utf8.decode, codec.streamreader, - codec.streamwriter) + if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" - with tm.assert_raises_regex(UnicodeDecodeError, msg): - self.read_csv(stream) - stream.close() + + # stream must be binary UTF8 + with open(self.csv_shiftjs, "rb") as handle, codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, + codec.streamwriter) as stream: + + with tm.assert_raises_regex(UnicodeDecodeError, msg): + self.read_csv(stream) def test_read_csv(self): if not compat.PY3: diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 01c6620e50d37..e84db66561c49 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -110,16 +110,15 @@ def test_read_csv_infer_compression(self): # see gh-9770 expected = self.read_csv(self.csv1, index_col=0, parse_dates=True) - inputs = [self.csv1, self.csv1 + '.gz', - self.csv1 + '.bz2', open(self.csv1)] + with open(self.csv1) as f: + inputs = [self.csv1, self.csv1 + '.gz', + self.csv1 + '.bz2', f] - for f in inputs: - df = self.read_csv(f, index_col=0, parse_dates=True, - compression='infer') - - tm.assert_frame_equal(expected, df) + for inp in inputs: + df = self.read_csv(inp, index_col=0, parse_dates=True, + compression='infer') - inputs[3].close() + tm.assert_frame_equal(expected, df) def test_read_csv_compressed_utf16_example(self): # GH18071 diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index ab4c14034cd20..e8d9d8b52164b 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -35,24 +35,18 @@ def setup_method(self, method): self.xls1 = os.path.join(self.dirpath, 'test.xls') def test_file_handle(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f) - result = reader.read() # noqa - finally: - f.close() + reader.read() def test_string_filename(self): reader = TextReader(self.csv1, header=None) reader.read() def test_file_handle_mmap(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f, memory_map=True, header=None) reader.read() - finally: - f.close() def test_StringIO(self): with open(self.csv1, 'rb') as f:
Maybe closes #21102 and #19984, though if not should still work as a general cleanup
https://api.github.com/repos/pandas-dev/pandas/pulls/21105
2018-05-17T16:08:22Z
2018-05-19T20:08:19Z
2018-05-19T20:08:18Z
2018-06-08T17:09:48Z
BUG: type aliasing is not allowed to be compared using isinstance()
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..9c19d4d6bbaad 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -48,6 +48,11 @@ Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) + +Strings +^^^^^^^ + +- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`) - Conversion diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 12517372fedd1..5ae22694d0da7 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -425,7 +425,7 @@ def raise_with_traceback(exc, traceback=Ellipsis): # In Python 3.7, the private re._pattern_type is removed. # Python 3.5+ have typing.re.Pattern -if PY35: +if PY36: import typing re_type = typing.re.Pattern else: diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index ead9ba1e26e2d..79d3aad493182 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -4,9 +4,10 @@ """ import pytest +import re from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap, lfilter, builtins, iterkeys, itervalues, iteritems, - next, get_range_parameters, PY2) + next, get_range_parameters, PY2, re_type) class TestBuiltinIterators(object): @@ -89,3 +90,7 @@ def test_get_range_parameters(self, start, stop, step): assert start_result == start_expected assert stop_result == stop_expected assert step_result == step_expected + + +def test_re_type(): + assert isinstance(re.compile(''), re_type)
- [X] closes #21078 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry As raised in #21078, Python 3.5.4 supports the using `isinstance()` with `typing.re.Pattern` But it does not support the same method in 3.5.2. ``` Python Python 3.5.2 (default, Nov 23 2017, 16:37:01) [GCC 5.4.0 20160609] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import re >>> import typing >>> isinstance(re.compile(''), typing.re.Pattern) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/lib/python3.5/typing.py", line 260, in __instancecheck__ raise TypeError("Type aliases cannot be used with isinstance().") TypeError: Type aliases cannot be used with isinstance(). ``` This Bugfix PR is to revert the `re_type` to be back to what it used to be before the update with the new release.
https://api.github.com/repos/pandas-dev/pandas/pulls/21098
2018-05-17T04:02:09Z
2018-05-17T20:55:14Z
2018-05-17T20:55:14Z
2018-05-19T23:47:10Z
Support for OO Optimization
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35484e34ee9eb..654b27624a6fe 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -46,8 +46,6 @@ Documentation Changes Bug Fixes ~~~~~~~~~ -- tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) - Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -101,3 +99,9 @@ Reshaping - Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - + +Other +^^^^^ + +- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) +- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index c28e2052bd93e..c2d09c6d49e86 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -2,6 +2,8 @@ """ Testing that we work in the downstream packages """ +import subprocess + import pytest import numpy as np # noqa from pandas import DataFrame @@ -53,6 +55,11 @@ def test_xarray(df): assert df.to_xarray() is not None +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call(["python", "-OO", "-c", "import pandas"]) + + @tm.network def test_statsmodels(): diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 749165f894819..c294110d89ec5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1090,12 +1090,17 @@ def apply(self, other): class CustomBusinessMonthEnd(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') _prefix = 'CBM' class CustomBusinessMonthBegin(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', + 'beginning') _prefix = 'CBMS' diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 624fbbbd4f05e..6b55554cdc941 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -4,7 +4,7 @@ import types import warnings from textwrap import dedent, wrap -from functools import wraps, update_wrapper +from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS def deprecate(name, alternative, version, alt_name=None, @@ -20,18 +20,18 @@ def deprecate(name, alternative, version, alt_name=None, Parameters ---------- name : str - Name of function to deprecate - alternative : str - Name of function to use instead + Name of function to deprecate. + alternative : func + Function to use instead. version : str - Version of pandas in which the method has been deprecated + Version of pandas in which the method has been deprecated. alt_name : str, optional - Name to use in preference of alternative.__name__ + Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str - The message to display in the warning. - Default is '{name} is deprecated. Use {alt_name} instead.' + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ @@ -39,25 +39,26 @@ def deprecate(name, alternative, version, alt_name=None, warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) - @wraps(alternative) + # adding deprecated directive to the docstring + msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) + msg = '\n '.join(wrap(msg, 70)) + + @Substitution(version=version, msg=msg) + @Appender(alternative.__doc__) def wrapper(*args, **kwargs): + """ + .. deprecated:: %(version)s + + %(msg)s + + """ warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) - # adding deprecated directive to the docstring - msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) - tpl = dedent(""" - .. deprecated:: {version} - - {msg} - - {rest} - """) - rest = getattr(wrapper, '__doc__', '') - docstring = tpl.format(version=version, - msg='\n '.join(wrap(msg, 70)), - rest=dedent(rest)) - wrapper.__doc__ = docstring + # Since we are using Substitution to create the required docstring, + # remove that from the attributes that should be assigned to the wrapper + assignments = tuple(x for x in WRAPPER_ASSIGNMENTS if x != '__doc__') + update_wrapper(wrapper, alternative, assigned=assignments) return wrapper
- [X] closes #21071 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21093
2018-05-16T21:26:01Z
2018-05-30T21:04:48Z
2018-05-30T21:04:48Z
2018-06-08T17:18:57Z
BUG: assert_index_equal does not raise error for check_categorical=False when comparing 2 CategoricalIndex objects
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..d211a21546978 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -46,6 +46,11 @@ Bug Fixes - - +Categorical +^^^^^^^^^^^ + +- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`) + Conversion ^^^^^^^^^^ diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index d6f58d16bcf64..ab7c4fb528452 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -503,6 +503,25 @@ def test_index_equal_metadata_message(self): with tm.assert_raises_regex(AssertionError, expected): assert_index_equal(idx1, idx2) + def test_categorical_index_equality(self): + expected = """Index are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_index_equality_relax_categories_check(self): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertSeriesEqual(object): @@ -600,6 +619,25 @@ def test_series_equal_message(self): assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]), check_less_precise=True) + def test_categorical_series_equality(self): + expected = """Attributes are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_series_equality_relax_categories_check(self): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertFrameEqual(object): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e1484a9c1b390..233eba6490937 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -778,8 +778,12 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, def _check_types(l, r, obj='Index'): if exact: - assert_class_equal(left, right, exact=exact, obj=obj) - assert_attr_equal('dtype', l, r, obj=obj) + assert_class_equal(l, r, exact=exact, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if check_categorical: + assert_attr_equal('dtype', l, r, obj=obj) + # allow string-like to have different inferred_types if l.inferred_type in ('string', 'unicode'): assert r.inferred_type in ('string', 'unicode') @@ -829,7 +833,8 @@ def _get_ilevel_values(index, level): # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) - if check_exact: + # skip exact index checking when `check_categorical` is False + if check_exact and check_categorical: if not left.equals(right): diff = np.sum((left.values != right.values) .astype(int)) * 100.0 / len(left) @@ -950,23 +955,23 @@ def is_sorted(seq): def assert_categorical_equal(left, right, check_dtype=True, - obj='Categorical', check_category_order=True): + check_category_order=True, obj='Categorical'): """Test that Categoricals are equivalent. Parameters ---------- - left, right : Categorical - Categoricals to compare + left : Categorical + right : Categorical check_dtype : bool, default True Check that integer dtype of the codes are the same - obj : str, default 'Categorical' - Specify object name being compared, internally used to show appropriate - assertion message check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message """ _check_isinstance(left, right, Categorical) @@ -1020,7 +1025,7 @@ def raise_assert_detail(obj, message, left, right, diff=None): def assert_numpy_array_equal(left, right, strict_nan=False, check_dtype=True, err_msg=None, - obj='numpy array', check_same=None): + check_same=None, obj='numpy array'): """ Checks that 'np.ndarray' is equivalent Parameters @@ -1033,11 +1038,11 @@ def assert_numpy_array_equal(left, right, strict_nan=False, check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message - check_same : None|'copy'|'same', default None - Ensure left and right refer/do not refer to the same memory area """ # instance validation
- [x] closes #19776 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry * tests added for `check_categorical` parameter in `assert_series_equal` * cleaned up `assert_categorical_equal` parameter order and docstring to match other functions (`obj` at the end) Thanks to all maintainers who helped answer questions on Gitter during the informal PyCon sprint!
https://api.github.com/repos/pandas-dev/pandas/pulls/21092
2018-05-16T18:55:14Z
2018-05-19T20:10:01Z
2018-05-19T20:10:00Z
2018-06-08T17:10:27Z
Fixed extensionarray ref [ci skip]
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index feba9d856789b..a099fb40c35a7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -202,7 +202,7 @@ for storing ip addresses. ...: ``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas -:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. +:class:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. .. code-block:: ipython
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/21077
2018-05-16T11:09:26Z
2018-05-16T11:09:33Z
2018-05-16T11:09:33Z
2018-05-16T11:09:36Z
DOC: Start 0.24.0 whatsnew [ci skip]
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index d61a98fe2dae4..c744e44b4c17c 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,10 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.24.0.txt + +.. include:: whatsnew/v0.23.1.txt + .. include:: whatsnew/v0.23.0.txt .. include:: whatsnew/v0.22.0.txt diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt new file mode 100644 index 0000000000000..3886b6c142305 --- /dev/null +++ b/doc/source/whatsnew/v0.24.0.txt @@ -0,0 +1,179 @@ +.. _whatsnew_0240: + +v0.24.0 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0240.enhancements: + +New features +~~~~~~~~~~~~ + +.. _whatsnew_0240.enhancements.other: + +Other Enhancements +^^^^^^^^^^^^^^^^^^ +- +- +- + +.. _whatsnew_0240.api_breaking: + + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0240.api.other: + +Other API Changes +^^^^^^^^^^^^^^^^^ + +- +- +- + +.. _whatsnew_0240.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.prior_deprecations: + +Removal of prior version deprecations/changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.docs: + +Documentation Changes +~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +Categorical +^^^^^^^^^^^ + +- +- +- + +Datetimelike +^^^^^^^^^^^^ + +- +- +- + +Timedelta +^^^^^^^^^ + +- +- +- + +Timezones +^^^^^^^^^ + +- +- +- + +Offsets +^^^^^^^ + +- +- +- + +Numeric +^^^^^^^ + +- +- +- + +Strings +^^^^^^^ + +- +- +- + +Indexing +^^^^^^^^ + +- +- +- + +MultiIndex +^^^^^^^^^^ + +- +- +- + +I/O +^^^ + +- +- +- + +Plotting +^^^^^^^^ + +- +- +- + +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- +- +- + +Sparse +^^^^^^ + +- +- +- + +Reshaping +^^^^^^^^^ + +- +- +- + +Other +^^^^^ + +- +- +- +
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/21072
2018-05-16T03:07:04Z
2018-05-16T03:07:34Z
2018-05-16T03:07:34Z
2018-05-26T10:45:00Z
DOC: Correct the date of whatsnew v0.23 #21067
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f89de1dc22d8..feba9d856789b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1,6 +1,6 @@ .. _whatsnew_0230: -v0.23.0 (May 15, 2017) +v0.23.0 (May 15, 2018) ---------------------- This is a major release from 0.22.0 and includes a number of API changes,
- [x] closes #21067 - [x] tests added / passed (NA, just the DOC) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21069
2018-05-16T02:24:03Z
2018-05-16T03:02:00Z
2018-05-16T03:02:00Z
2018-05-16T13:34:50Z
Prevent Unlimited Agg Recursion with Duplicate Col Names
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..338364a943edf 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -43,7 +43,10 @@ Documentation Changes Bug Fixes ~~~~~~~~~ -- +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) - Conversion diff --git a/pandas/core/base.py b/pandas/core/base.py index fa78c89ed4ee7..aa051c6f5eaef 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -590,9 +590,10 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # multiples else: - for col in obj: + for index, col in enumerate(obj): try: - colg = self._gotitem(col, ndim=1, subset=obj[col]) + colg = self._gotitem(col, ndim=1, + subset=obj.iloc[:, index]) results.append(colg.aggregate(arg)) keys.append(col) except (TypeError, DataError): @@ -675,7 +676,6 @@ def _gotitem(self, key, ndim, subset=None): subset : object, default None subset to act on """ - # create a new object to prevent aliasing if subset is None: subset = self.obj diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..77a67c048a48d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5731,7 +5731,12 @@ def diff(self, periods=1, axis=0): # ---------------------------------------------------------------------- # Function application - def _gotitem(self, key, ndim, subset=None): + def _gotitem(self, + key, # type: Union[str, List[str]] + ndim, # type: int + subset=None # type: Union[Series, DataFrame, None] + ): + # type: (...) -> Union[Series, DataFrame] """ sub-classes to define return a sliced object @@ -5746,9 +5751,11 @@ def _gotitem(self, key, ndim, subset=None): """ if subset is None: subset = self + elif subset.ndim == 1: # is Series + return subset # TODO: _shallow_copy(subset)? - return self[key] + return subset[key] _agg_doc = dedent(""" The aggregation operations are always performed over an axis, either the diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ac46f02d00773..dfb2961befe35 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -554,6 +554,14 @@ def test_apply_non_numpy_dtype(self): result = df.apply(lambda x: x) assert_frame_equal(result, df) + def test_apply_dup_names_multi_agg(self): + # GH 21063 + df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a']) + expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min']) + result = df.agg(['min']) + + tm.assert_frame_equal(result, expected) + class TestInferOutputShape(object): # the user has supplied an opaque UDF where
- [X] closes #21063 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The `_gotitem` implementation for `DataFrame` seems a little strange so there may be a more comprehensive approach, but this should prevent the issue for the time being Didn't add whatsnew yet since none existed for 0.23.1. Happy to add if we are OK with this fix
https://api.github.com/repos/pandas-dev/pandas/pulls/21066
2018-05-15T18:29:40Z
2018-05-17T12:42:15Z
2018-05-17T12:42:14Z
2018-06-08T17:08:34Z
DOC: updated docstring for nanoseconds function per doc guidelines
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index d17d4e7139d72..f7bb6c1dbb304 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -791,9 +791,32 @@ cdef class _Timedelta(timedelta): @property def nanoseconds(self): """ - Number of nanoseconds (>= 0 and less than 1 microsecond). + Return the number of nanoseconds (n), where 0 <= n < 1 microsecond. + + Returns + ------- + int + Number of nanoseconds. + + See Also + -------- + Timedelta.components : Return all attributes with assigned values + (i.e. days, hours, minutes, seconds, milliseconds, microseconds, + nanoseconds). + + Examples + -------- + **Using string input** + + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.nanoseconds + 42 + + **Using integer input** - .components will return the shown components + >>> td = pd.Timedelta(42, unit='ns') + >>> td.nanoseconds + 42 """ self._ensure_components() return self._ns
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ################### Docstring (pandas.Timedelta.nanoseconds) ################### ################################################################################ Return the number of nanoseconds (n), where 0 <= n < 1 microsecond. Returns ------- int : Number of nanoseconds See Also -------- Timedelta.components : Return all attributes with assigned values (i.e. days, seconds, microseconds, nanoseconds) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No examples section found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21065
2018-05-15T17:20:02Z
2018-05-17T00:20:43Z
2018-05-17T00:20:43Z
2018-06-08T17:06:45Z
Pass sort for agg multiple
diff --git a/pandas/core/base.py b/pandas/core/base.py index 5022beabef76b..fa78c89ed4ee7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -608,7 +608,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): raise ValueError("no results") try: - return concat(results, keys=keys, axis=1) + return concat(results, keys=keys, axis=1, sort=False) except TypeError: # we are concatting non-NDFrame objects, diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index af39c8f01cf73..ac46f02d00773 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -908,6 +908,31 @@ def test_demo(self): index=['max', 'min', 'sum']) tm.assert_frame_equal(result.reindex_like(expected), expected) + def test_agg_multiple_mixed_no_warning(self): + # https://github.com/pandas-dev/pandas/issues/20909 + mdf = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0], + "C": ['bar', 'foobarbaz'], + "D": [pd.Timestamp('2013-01-01'), pd.NaT]}, + index=['min', 'sum']) + # sorted index + with tm.assert_produces_warning(None): + result = mdf.agg(['min', 'sum']) + + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(None): + result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min']) + + # For backwards compatibility, the result's index is + # still sorted by function name, so it's ['min', 'sum'] + # not ['sum', 'min']. + expected = expected[['D', 'C', 'B', 'A']] + tm.assert_frame_equal(result, expected) + def test_agg_dict_nested_renaming_depr(self): df = pd.DataFrame({'A': range(5), 'B': 5})
xref https://github.com/pandas-dev/pandas/issues/20909
https://api.github.com/repos/pandas-dev/pandas/pulls/21062
2018-05-15T16:07:43Z
2018-05-15T20:02:19Z
2018-05-15T20:02:19Z
2018-05-15T20:02:33Z
DOC: add highlights and toc to whatsnew file for 0.23.0
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5fe397a7cbb37..32db2ff5ebb24 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -42,7 +42,7 @@ pandas 0.23.0 **Release date**: May 15, 2017 -This is a major release from 0.23.0 and includes a number of API changes, new +This is a major release from 0.22.0 and includes a number of API changes, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. @@ -54,6 +54,7 @@ Highlights include: - :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. - :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. - :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. +- :ref:`Changes to make output shape of DataFrame.apply consistent <whatsnew_0230.api_breaking.apply>`. See the :ref:`full whatsnew <whatsnew_0230>` for a list of all the changes. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 89dab728d2bd4..3f89de1dc22d8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -8,90 +8,114 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +Highlights include: + +- :ref:`Round-trippable JSON format with 'table' orient <whatsnew_0230.enhancements.round-trippable_json>`. +- :ref:`Instantiation from dicts respects order for Python 3.6+ <whatsnew_0230.api_breaking.dict_insertion_order>`. +- :ref:`Dependent column arguments for assign <whatsnew_0230.enhancements.assign_dependent>`. +- :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. +- :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. +- :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. +- :ref:`Changes to make output shape of DataFrame.apply consistent <whatsnew_0230.api_breaking.apply>`. + +Check the :ref:`API Changes <whatsnew_0230.api_breaking>` and :ref:`deprecations <whatsnew_0230.deprecations>` before updating. + .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. See :ref:`install.dropping-27` for more. +.. contents:: What's new in v0.23.0 + :local: + :backlinks: none + :depth: 2 + .. _whatsnew_0230.enhancements: New features ~~~~~~~~~~~~ -.. _whatsnew_0210.enhancements.limit_area: - -``DataFrame.interpolate`` has gained the ``limit_area`` kwarg -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _whatsnew_0230.enhancements.round-trippable_json: -:meth:`DataFrame.interpolate` has gained a ``limit_area`` parameter to allow further control of which ``NaN`` s are replaced. -Use ``limit_area='inside'`` to fill only NaNs surrounded by valid values or use ``limit_area='outside'`` to fill only ``NaN`` s -outside the existing valid values while preserving those inside. (:issue:`16284`) See the :ref:`full documentation here <missing_data.interp_limits>`. +JSON read/write round-trippable with ``orient='table'`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A ``DataFrame`` can now be written to and subsequently read back via JSON while preserving metadata through usage of the ``orient='table'`` argument (see :issue:`18912` and :issue:`9146`). Previously, none of the available ``orient`` values guaranteed the preservation of dtypes and index names, amongst other metadata. .. ipython:: python - ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) - ser + df = pd.DataFrame({'foo': [1, 2, 3, 4], + 'bar': ['a', 'b', 'c', 'd'], + 'baz': pd.date_range('2018-01-01', freq='d', periods=4), + 'qux': pd.Categorical(['a', 'b', 'c', 'c']) + }, index=pd.Index(range(4), name='idx')) + df + df.dtypes + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes -Fill one consecutive inside value in both directions +Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. .. ipython:: python + :okwarning: - ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + df.index.name = 'index' -Fill all consecutive outside values backward + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes .. ipython:: python + :suppress: - ser.interpolate(limit_direction='backward', limit_area='outside') + import os + os.remove('test.json') -Fill all consecutive outside values in both directions -.. ipython:: python - - ser.interpolate(limit_direction='both', limit_area='outside') +.. _whatsnew_0230.enhancements.assign_dependent: -.. _whatsnew_0210.enhancements.get_dummies_dtype: -``get_dummies`` now supports ``dtype`` argument -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``.assign()`` accepts dependent arguments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`) +The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 +<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the +:ref:`documentation here <dsintro.chained_assignment>` (:issue:`14207`) .. ipython:: python - df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) - pd.get_dummies(df, columns=['c']).dtypes - pd.get_dummies(df, columns=['c'], dtype=bool).dtypes - - -.. _whatsnew_0230.enhancements.window_raw: + df = pd.DataFrame({'A': [1, 2, 3]}) + df + df.assign(B=df.A, C=lambda x:x['A']+ x['B']) -Rolling/Expanding.apply() accepts a ``raw`` keyword to pass a ``Series`` to the function -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. warning:: -:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, -:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. -This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The -default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. -In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) + This may subtly change the behavior of your code when you're + using ``.assign()`` to update an existing column. Previously, callables + referring to other variables being updated would get the "old" values -.. ipython:: python + Previous Behavior: - s = pd.Series(np.arange(5), np.arange(5) + 1) - s + .. code-block:: ipython -Pass a ``Series``: + In [2]: df = pd.DataFrame({"A": [1, 2, 3]}) -.. ipython:: python + In [3]: df.assign(A=lambda df: df.A + 1, C=lambda df: df.A * -1) + Out[3]: + A C + 0 2 -1 + 1 3 -2 + 2 4 -3 - s.rolling(2, min_periods=1).apply(lambda x: x.iloc[-1], raw=False) + New Behavior: -Mimic the original behavior of passing a ndarray: + .. ipython:: python -.. ipython:: python + df.assign(A=df.A+1, C= lambda df: df.A* -1) - s.rolling(2, min_periods=1).apply(lambda x: x[-1], raw=True) .. _whatsnew_0230.enhancements.merge_on_columns_and_levels: @@ -151,6 +175,194 @@ resetting indexes. See the :ref:`Sorting by Indexes and Values # Sort by 'second' (index) and 'A' (column) df_multi.sort_values(by=['second', 'A']) + +.. _whatsnew_023.enhancements.extension: + +Extending Pandas with Custom Types (Experimental) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy +arrays as columns in a DataFrame or values in a Series. This allows third-party +libraries to implement extensions to NumPy's types, similar to how pandas +implemented categoricals, datetimes with timezones, periods, and intervals. + +As a demonstration, we'll use cyberpandas_, which provides an ``IPArray`` type +for storing ip addresses. + +.. code-block:: ipython + + In [1]: from cyberpandas import IPArray + + In [2]: values = IPArray([ + ...: 0, + ...: 3232235777, + ...: 42540766452641154071740215577757643572 + ...: ]) + ...: + ...: + +``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas +:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. + +.. code-block:: ipython + + In [3]: ser = pd.Series(values) + + In [4]: ser + Out[4]: + 0 0.0.0.0 + 1 192.168.1.1 + 2 2001:db8:85a3::8a2e:370:7334 + dtype: ip + +Notice that the dtype is ``ip``. The missing value semantics of the underlying +array are respected: + +.. code-block:: ipython + + In [5]: ser.isna() + Out[5]: + 0 True + 1 False + 2 False + dtype: bool + +For more, see the :ref:`extension types <extending.extension-types>` +documentation. If you build an extension array, publicize it on our +:ref:`ecosystem page <ecosystem.extensions>`. + +.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ + + +.. _whatsnew_0230.enhancements.categorical_grouping: + +New ``observed`` keyword for excluding unobserved categories in ``groupby`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Grouping by a categorical includes the unobserved categories in the output. +When grouping by multiple categorical columns, this means you get the cartesian product of all the +categories, including combinations where there are no observations, which can result in a large +number of groups. We have added a keyword ``observed`` to control this behavior, it defaults to +``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df['C'] = ['foo', 'bar'] * 2 + df + +To show all values, the previous behavior: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=False).count() + + +To show only observed values: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=True).count() + +For pivotting operations, this behavior is *already* controlled by the ``dropna`` keyword: + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df + +.. ipython:: python + + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=True) + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=False) + + +.. _whatsnew_0230.enhancements.window_raw: + +Rolling/Expanding.apply() accepts ``raw=False`` to pass a ``Series`` to the function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, +:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. +This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The +default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. +In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) + +.. ipython:: python + + s = pd.Series(np.arange(5), np.arange(5) + 1) + s + +Pass a ``Series``: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x.iloc[-1], raw=False) + +Mimic the original behavior of passing a ndarray: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x[-1], raw=True) + + +.. _whatsnew_0210.enhancements.limit_area: + +``DataFrame.interpolate`` has gained the ``limit_area`` kwarg +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`DataFrame.interpolate` has gained a ``limit_area`` parameter to allow further control of which ``NaN`` s are replaced. +Use ``limit_area='inside'`` to fill only NaNs surrounded by valid values or use ``limit_area='outside'`` to fill only ``NaN`` s +outside the existing valid values while preserving those inside. (:issue:`16284`) See the :ref:`full documentation here <missing_data.interp_limits>`. + + +.. ipython:: python + + ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) + ser + +Fill one consecutive inside value in both directions + +.. ipython:: python + + ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + +Fill all consecutive outside values backward + +.. ipython:: python + + ser.interpolate(limit_direction='backward', limit_area='outside') + +Fill all consecutive outside values in both directions + +.. ipython:: python + + ser.interpolate(limit_direction='both', limit_area='outside') + +.. _whatsnew_0210.enhancements.get_dummies_dtype: + +``get_dummies`` now supports ``dtype`` argument +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`) + +.. ipython:: python + + df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) + pd.get_dummies(df, columns=['c']).dtypes + pd.get_dummies(df, columns=['c'], dtype=bool).dtypes + + .. _whatsnew_0230.enhancements.timedelta_mod: Timedelta mod method @@ -227,86 +439,6 @@ These bugs were squashed: - Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) - Bug in :func:`DataFrameGroupBy.rank` where ranks were incorrect when both infinity and ``NaN`` were present (:issue:`20561`) -.. _whatsnew_0230.enhancements.round-trippable_json: - -JSON read/write round-trippable with ``orient='table'`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A ``DataFrame`` can now be written to and subsequently read back via JSON while preserving metadata through usage of the ``orient='table'`` argument (see :issue:`18912` and :issue:`9146`). Previously, none of the available ``orient`` values guaranteed the preservation of dtypes and index names, amongst other metadata. - -.. ipython:: python - - df = pd.DataFrame({'foo': [1, 2, 3, 4], - 'bar': ['a', 'b', 'c', 'd'], - 'baz': pd.date_range('2018-01-01', freq='d', periods=4), - 'qux': pd.Categorical(['a', 'b', 'c', 'c']) - }, index=pd.Index(range(4), name='idx')) - df - df.dtypes - df.to_json('test.json', orient='table') - new_df = pd.read_json('test.json', orient='table') - new_df - new_df.dtypes - -Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. - -.. ipython:: python - :okwarning: - - df.index.name = 'index' - - df.to_json('test.json', orient='table') - new_df = pd.read_json('test.json', orient='table') - new_df - new_df.dtypes - -.. ipython:: python - :suppress: - - import os - os.remove('test.json') - - -.. _whatsnew_0230.enhancements.assign_dependent: - - -``.assign()`` accepts dependent arguments -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 -<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the -:ref:`documentation here <dsintro.chained_assignment>` (:issue:`14207`) - -.. ipython:: python - - df = pd.DataFrame({'A': [1, 2, 3]}) - df - df.assign(B=df.A, C=lambda x:x['A']+ x['B']) - -.. warning:: - - This may subtly change the behavior of your code when you're - using ``.assign()`` to update an existing column. Previously, callables - referring to other variables being updated would get the "old" values - - Previous Behavior: - - .. code-block:: ipython - - In [2]: df = pd.DataFrame({"A": [1, 2, 3]}) - - In [3]: df.assign(A=lambda df: df.A + 1, C=lambda df: df.A * -1) - Out[3]: - A C - 0 2 -1 - 1 3 -2 - 2 4 -3 - - New Behavior: - - .. ipython:: python - - df.assign(A=df.A+1, C= lambda df: df.A* -1) .. _whatsnew_0230.enhancements.str_cat_align: @@ -358,116 +490,6 @@ Supplying a ``CategoricalDtype`` will make the categories in each column consist df['A'].dtype df['B'].dtype -.. _whatsnew_023.enhancements.extension: - -Extending Pandas with Custom Types (Experimental) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy -arrays as columns in a DataFrame or values in a Series. This allows third-party -libraries to implement extensions to NumPy's types, similar to how pandas -implemented categoricals, datetimes with timezones, periods, and intervals. - -As a demonstration, we'll use cyberpandas_, which provides an ``IPArray`` type -for storing ip addresses. - -.. code-block:: ipython - - In [1]: from cyberpandas import IPArray - - In [2]: values = IPArray([ - ...: 0, - ...: 3232235777, - ...: 42540766452641154071740215577757643572 - ...: ]) - ...: - ...: - -``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas -:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. - -.. code-block:: ipython - - In [3]: ser = pd.Series(values) - - In [4]: ser - Out[4]: - 0 0.0.0.0 - 1 192.168.1.1 - 2 2001:db8:85a3::8a2e:370:7334 - dtype: ip - -Notice that the dtype is ``ip``. The missing value semantics of the underlying -array are respected: - -.. code-block:: ipython - - In [5]: ser.isna() - Out[5]: - 0 True - 1 False - 2 False - dtype: bool - -For more, see the :ref:`extension types <extending.extension-types>` -documentation. If you build an extension array, publicize it on our -:ref:`ecosystem page <ecosystem.extensions>`. - -.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ - -.. _whatsnew_0230.enhancements.categorical_grouping: - -Categorical Groupers has gained an observed keyword -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Grouping by a categorical includes the unobserved categories in the output. -When grouping with multiple groupers, this means you get the cartesian product of all the -categories, including combinations where there are no observations, which can result in a large -number of groupers. We have added a keyword ``observed`` to control this behavior, it defaults to -``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) - - -.. ipython:: python - - cat1 = pd.Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - cat2 = pd.Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) - df['C'] = ['foo', 'bar'] * 2 - df - -To show all values, the previous behavior: - -.. ipython:: python - - df.groupby(['A', 'B', 'C'], observed=False).count() - - -To show only observed values: - -.. ipython:: python - - df.groupby(['A', 'B', 'C'], observed=True).count() - -For pivotting operations, this behavior is *already* controlled by the ``dropna`` keyword: - -.. ipython:: python - - cat1 = pd.Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - cat2 = pd.Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) - df - -.. ipython:: python - - pd.pivot_table(df, values='values', index=['A', 'B'], - dropna=True) - pd.pivot_table(df, values='values', index=['A', 'B'], - dropna=False) - .. _whatsnew_0230.enhancements.other: @@ -519,7 +541,7 @@ Other Enhancements - :func:`read_html` now reads all ``<tbody>`` elements in a ``<table>``, not just the first. (:issue:`20690`) - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) -- :class:`pandas.tseries.api.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). +- :class:`~pandas.tseries.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). - :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5 - Updated :meth:`DataFrame.to_gbq` and :meth:`pandas.read_gbq` signature and documentation to reflect changes from the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ
@TomAugspurger in case you didn't do this yet
https://api.github.com/repos/pandas-dev/pandas/pulls/21061
2018-05-15T15:31:46Z
2018-05-15T20:05:26Z
2018-05-15T20:05:26Z
2018-05-15T20:05:26Z
Add epoch alternative to deprecation message
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 248c648c33db3..d17d4e7139d72 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1196,6 +1196,9 @@ class Timedelta(_Timedelta): msg = textwrap.dedent("""\ Floor division between integer array and Timedelta is deprecated. Use 'array // timedelta.value' instead. + If you want to obtain epochs from an array of timestamps, + you can rather use + 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. """) warnings.warn(msg, FutureWarning) return other // self.value
cfr https://github.com/pandas-dev/pandas/pull/21036#issuecomment-389162167
https://api.github.com/repos/pandas-dev/pandas/pulls/21060
2018-05-15T15:24:59Z
2018-05-15T18:09:49Z
2018-05-15T18:09:49Z
2018-05-15T18:11:18Z
Fixed space in small info
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0437c479c9d81..dccc840f5affd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2231,7 +2231,7 @@ def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: - return ("{num:3.1f}{size_q}" + return ("{num:3.1f}{size_q} " "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 return "{num:3.1f}{size_q} {pb}".format(num=num, diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 8fc6fef11798a..668613c494a47 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -5,6 +5,7 @@ from datetime import datetime, timedelta import re import sys +import textwrap from numpy import nan import numpy as np @@ -204,6 +205,25 @@ def test_info(self): frame.info() frame.info(verbose=False) + def test_info_memory(self): + # https://github.com/pandas-dev/pandas/issues/21056 + df = pd.DataFrame({'a': pd.Series([1, 2], dtype='i8')}) + buf = StringIO() + df.info(buf=buf) + result = buf.getvalue() + bytes = float(df.memory_usage().sum()) + + expected = textwrap.dedent("""\ + <class 'pandas.core.frame.DataFrame'> + RangeIndex: 2 entries, 0 to 1 + Data columns (total 1 columns): + a 2 non-null int64 + dtypes: int64(1) + memory usage: {} bytes + """.format(bytes)) + + assert result == expected + def test_info_wide(self): from pandas import set_option, reset_option io = StringIO()
Closes https://github.com/pandas-dev/pandas/issues/21056 ping @jreback @jorisvandenbossche this should be the last failure that was picked up by dask's tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/21057
2018-05-15T14:54:28Z
2018-05-15T19:04:11Z
2018-05-15T19:04:10Z
2018-05-15T21:36:26Z
BUG: Keep original name in str.cat
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index d40ff02fd0285..81d775157cf62 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2320,9 +2320,9 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): res = str_cat(data, others=others, sep=sep, na_rep=na_rep) if isinstance(self._orig, Index): - res = Index(res) + res = Index(res, name=self._orig.name) else: # Series - res = Series(res, index=data.index) + res = Series(res, index=data.index, name=self._orig.name) return res @copy(str_split) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 1a978cbf6363f..9d008dfd25c90 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -144,6 +144,19 @@ def test_cat(self): with tm.assert_raises_regex(ValueError, rgx): strings.str_cat(one, 'three') + @pytest.mark.parametrize('container', [Series, Index]) + @pytest.mark.parametrize('other', [None, Series, Index]) + def test_str_cat_name(self, container, other): + # https://github.com/pandas-dev/pandas/issues/21053 + values = ['a', 'b'] + if other: + other = other(values) + else: + other = values + result = container(values, name='name').str.cat(other, sep=',', + join='left') + assert result.name == 'name' + @pytest.mark.parametrize('series_or_index', ['series', 'index']) def test_str_cat(self, series_or_index): # test_cat above tests "str_cat" from ndarray to ndarray;
Closes https://github.com/pandas-dev/pandas/issues/21053 cc @jreback @jorisvandenbossche @h-vetinari for a quick check if you have a chance.
https://api.github.com/repos/pandas-dev/pandas/pulls/21054
2018-05-15T13:51:20Z
2018-05-15T15:07:43Z
2018-05-15T15:07:42Z
2018-05-15T15:09:43Z
DOC: Updated release and whatsnew for 0.23.0
diff --git a/doc/source/release.rst b/doc/source/release.rst index 709c9b15b55f7..5fe397a7cbb37 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,361 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: https://pypi.org/project/pandas * Documentation: http://pandas.pydata.org +pandas 0.23.0 +------------- + +**Release date**: May 15, 2017 + +This is a major release from 0.23.0 and includes a number of API changes, new +features, enhancements, and performance improvements along with a large number +of bug fixes. We recommend that all users upgrade to this version. + +Highlights include: + +- :ref:`Round-trippable JSON format with 'table' orient <whatsnew_0230.enhancements.round-trippable_json>`. +- :ref:`Instantiation from dicts respects order for Python 3.6+ <whatsnew_0230.api_breaking.dict_insertion_order>`. +- :ref:`Dependent column arguments for assign <whatsnew_0230.enhancements.assign_dependent>`. +- :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. +- :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. +- :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. + +See the :ref:`full whatsnew <whatsnew_0230>` for a list of all the changes. + +Thanks +~~~~~~ + +A total of 328 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Critchley +* AbdealiJK + +* Adam Hooper + +* Albert Villanova del Moral +* Alejandro Giacometti + +* Alejandro Hohmann + +* Alex Rychyk +* Alexander Buchkovsky +* Alexander Lenail + +* Alexander Michael Schade +* Aly Sivji + +* Andreas Költringer + +* Andrew +* Andrew Bui + +* András Novoszáth + +* Andy Craze + +* Andy R. Terrel +* Anh Le + +* Anil Kumar Pallekonda + +* Antoine Pitrou + +* Antonio Linde + +* Antonio Molina + +* Antonio Quinonez + +* Armin Varshokar + +* Artem Bogachev + +* Avi Sen + +* Azeez Oluwafemi + +* Ben Auffarth + +* Bernhard Thiel + +* Bhavesh Poddar + +* BielStela + +* Blair + +* Bob Haffner +* Brett Naul + +* Brock Mendel +* Bryce Guinta + +* Carlos Eduardo Moreira dos Santos + +* Carlos García Márquez + +* Carol Willing +* Cheuk Ting Ho + +* Chitrank Dixit + +* Chris +* Chris Burr + +* Chris Catalfo + +* Chris Mazzullo +* Christian Chwala + +* Cihan Ceyhan + +* Clemens Brunner +* Colin + +* Cornelius Riemenschneider +* Crystal Gong + +* DaanVanHauwermeiren +* Dan Dixey + +* Daniel Frank + +* Daniel Garrido + +* Daniel Sakuma + +* DataOmbudsman + +* Dave Hirschfeld +* Dave Lewis + +* David Adrián Cañones Castellano + +* David Arcos + +* David C Hall + +* David Fischer +* David Hoese + +* David Lutz + +* David Polo + +* David Stansby +* Dennis Kamau + +* Dillon Niederhut +* Dimitri + +* Dr. Irv +* Dror Atariah +* Eric Chea + +* Eric Kisslinger +* Eric O. LEBIGOT (EOL) + +* FAN-GOD + +* Fabian Retkowski + +* Fer Sar + +* Gabriel de Maeztu + +* Gianpaolo Macario + +* Giftlin Rajaiah +* Gilberto Olimpio + +* Gina + +* Gjelt + +* Graham Inggs + +* Grant Roch +* Grant Smith + +* Grzegorz Konefał + +* Guilherme Beltramini +* HagaiHargil + +* Hamish Pitkeathly + +* Hammad Mashkoor + +* Hannah Ferchland + +* Hans +* Haochen Wu + +* Hissashi Rocha + +* Iain Barr + +* Ibrahim Sharaf ElDen + +* Ignasi Fosch + +* Igor Conrado Alves de Lima + +* Igor Shelvinskyi + +* Imanflow + +* Ingolf Becker +* Israel Saeta Pérez +* Iva Koevska + +* Jakub Nowacki + +* Jan F-F + +* Jan Koch + +* Jan Werkmann +* Janelle Zoutkamp + +* Jason Bandlow + +* Jaume Bonet + +* Jay Alammar + +* Jeff Reback +* JennaVergeynst +* Jimmy Woo + +* Jing Qiang Goh + +* Joachim Wagner + +* Joan Martin Miralles + +* Joel Nothman +* Joeun Park + +* John Cant + +* Johnny Metz + +* Jon Mease +* Jonas Schulze + +* Jongwony + +* Jordi Contestí + +* Joris Van den Bossche +* José F. R. Fonseca + +* Jovixe + +* Julio Martinez + +* Jörg Döpfert +* KOBAYASHI Ittoku + +* Kate Surta + +* Kenneth + +* Kevin Kuhl +* Kevin Sheppard +* Krzysztof Chomski +* Ksenia + +* Ksenia Bobrova + +* Kunal Gosar + +* Kurtis Kerstein + +* Kyle Barron + +* Laksh Arora + +* Laurens Geffert + +* Leif Walsh +* Liam Marshall + +* Liam3851 + +* Licht Takeuchi +* Liudmila + +* Ludovico Russo + +* Mabel Villalba + +* Manan Pal Singh + +* Manraj Singh +* Marc + +* Marc Garcia +* Marco Hemken + +* Maria del Mar Bibiloni + +* Mario Corchero + +* Mark Woodbridge + +* Martin Journois + +* Mason Gallo + +* Matias Heikkilä + +* Matt Braymer-Hayes +* Matt Kirk + +* Matt Maybeno + +* Matthew Kirk + +* Matthew Rocklin + +* Matthew Roeschke +* Matthias Bussonnier + +* Max Mikhaylov + +* Maxim Veksler + +* Maximilian Roos +* Maximiliano Greco + +* Michael Penkov +* Michael Röttger + +* Michael Selik + +* Michael Waskom +* Mie~~~ +* Mike Kutzma + +* Ming Li + +* Mitar + +* Mitch Negus + +* Montana Low + +* Moritz Münst + +* Mortada Mehyar +* Myles Braithwaite + +* Nate Yoder +* Nicholas Ursa + +* Nick Chmura +* Nikos Karagiannakis + +* Nipun Sadvilkar + +* Nis Martensen + +* Noah + +* Noémi Éltető + +* Olivier Bilodeau + +* Ondrej Kokes + +* Onno Eberhard + +* Paul Ganssle + +* Paul Mannino + +* Paul Reidy +* Paulo Roberto de Oliveira Castro + +* Pepe Flores + +* Peter Hoffmann +* Phil Ngo + +* Pietro Battiston +* Pranav Suri + +* Priyanka Ojha + +* Pulkit Maloo + +* README Bot + +* Ray Bell + +* Riccardo Magliocchetti + +* Ridhwan Luthra + +* Robert Meyer +* Robin +* Robin Kiplang'at + +* Rohan Pandit + +* Rok Mihevc + +* Rouz Azari +* Ryszard T. Kaleta + +* Sam Cohan +* Sam Foo +* Samir Musali + +* Samuel Sinayoko + +* Sangwoong Yoon +* SarahJessica + +* Sharad Vijalapuram + +* Shubham Chaudhary + +* SiYoungOh + +* Sietse Brouwer +* Simone Basso + +* Stefania Delprete + +* Stefano Cianciulli + +* Stephen Childs + +* StephenVoland + +* Stijn Van Hoey + +* Sven +* Talitha Pumar + +* Tarbo Fukazawa + +* Ted Petrou + +* Thomas A Caswell +* Tim Hoffmann + +* Tim Swast +* Tom Augspurger +* Tommy + +* Tulio Casagrande + +* Tushar Gupta + +* Tushar Mittal + +* Upkar Lidder + +* Victor Villas + +* Vince W + +* Vinícius Figueiredo + +* Vipin Kumar + +* WBare +* Wenhuan + +* Wes Turner +* William Ayd +* Wilson Lin + +* Xbar +* Yaroslav Halchenko +* Yee Mey +* Yeongseon Choe + +* Yian + +* Yimeng Zhang +* ZhuBaohe + +* Zihao Zhao + +* adatasetaday + +* akielbowicz + +* akosel + +* alinde1 + +* amuta + +* bolkedebruin +* cbertinato +* cgohlke +* charlie0389 + +* chris-b1 +* csfarkas + +* dajcs + +* deflatSOCO + +* derestle-htwg +* discort +* dmanikowski-reef + +* donK23 + +* elrubio + +* fivemok + +* fjdiod +* fjetter + +* froessler + +* gabrielclow +* gfyoung +* ghasemnaddaf +* h-vetinari + +* himanshu awasthi + +* ignamv + +* jayfoad + +* jazzmuesli + +* jbrockmendel +* jen w + +* jjames34 + +* joaoavf + +* joders + +* jschendel +* juan huguet + +* l736x + +* luzpaz + +* mdeboc + +* miguelmorin + +* miker985 +* miquelcamprodon + +* orereta + +* ottiP + +* peterpanmj + +* rafarui + +* raph-m + +* readyready15728 + +* rmihael + +* samghelms + +* scriptomation + +* sfoo + +* stefansimik + +* stonebig +* tmnhat2001 + +* tomneep + +* topper-123 +* tv3141 + +* verakai + +* xpvpc + +* zhanghui + + pandas 0.22.0 ------------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 005b2377ed61b..89dab728d2bd4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1,7 +1,7 @@ .. _whatsnew_0230: -v0.23.0 -------- +v0.23.0 (May 15, 2017) +---------------------- This is a major release from 0.22.0 and includes a number of API changes, deprecations, new features, enhancements, and performance improvements along @@ -319,6 +319,7 @@ The method has now gained a keyword ``join`` to control the manner of alignment, In v.0.23 `join` will default to None (meaning no alignment), but this default will change to ``'left'`` in a future version of pandas. .. ipython:: python + :okwarning: s = pd.Series(['a', 'b', 'c', 'd']) t = pd.Series(['b', 'd', 'e', 'c'], index=[1, 3, 4, 2]) @@ -613,6 +614,11 @@ Deprecate Panel with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`, :issue:`18324`). +.. ipython:: python + :suppress: + + import pandas.util.testing as tm + .. ipython:: python :okwarning:
https://api.github.com/repos/pandas-dev/pandas/pulls/21051
2018-05-15T13:20:02Z
2018-05-15T13:49:18Z
2018-05-15T13:49:18Z
2018-05-15T16:21:41Z
Fix Inconsistent MultiIndex Sorting
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..2b65af4b368a9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4454,7 +4454,10 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) labels = self._get_axis(axis) - if level: + # make sure that the axis is lexsorted to start + # if not we need to reconstruct to get the correct indexer + labels = labels._sort_levels_monotonic() + if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) @@ -4462,9 +4465,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer - # make sure that the axis is lexsorted to start - # if not we need to reconstruct to get the correct indexer - labels = labels._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_labels_for_sorting(), orders=ascending, na_position=na_position) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..622fa2c226134 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2616,7 +2616,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) index = self.index - if level: + if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index d89731dc09044..d05321abefca6 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -861,6 +861,23 @@ def test_stack_preserve_categorical_dtype(self): tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("level", [0, 'baz']) + def test_unstack_swaplevel_sortlevel(self, level): + # GH 20994 + mi = pd.MultiIndex.from_product([[0], ['d', 'c']], + names=['bar', 'baz']) + df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A']) + df.columns.name = 'foo' + + expected = pd.DataFrame([ + [3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([ + ('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[ + 'baz', 'foo'])) + expected.index.name = 'bar' + + result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) + tm.assert_frame_equal(result, expected) + def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index b60eb89e87da5..599ae683f914b 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -550,18 +550,36 @@ def test_sort_index(self): expected = frame.iloc[:, ::-1] assert_frame_equal(result, expected) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): # GH13496 # sort rows by specified level of multi-index - mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) + mi = MultiIndex.from_tuples([ + [2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi) + + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 2], + [2, 1, 3]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [3, 4], + [1, 2]], index=expected_mi) + result = df.sort_index(level=level) + assert_frame_equal(result, expected) - # MI sort, but no level: sort_level has no effect - mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) - result = df.sort_index(sort_remaining=False) - expected = df.sort_index() + # sort_remaining=False + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 3], + [2, 1, 2]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [1, 2], + [3, 4]], index=expected_mi) + result = df.sort_index(level=level, sort_remaining=False) assert_frame_equal(result, expected) def test_sort_index_intervalindex(self): diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 01b4ea6eaa238..13e0d1b12c372 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -141,19 +141,20 @@ def test_sort_index_inplace(self): assert result is None tm.assert_series_equal(random_order, self.ts) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True - res = s.sort_index(level='A') + res = s.sort_index(level=level) assert_series_equal(backwards, res) # GH13496 - # rows share same level='A': sort has no effect without remaining lvls - res = s.sort_index(level='A', sort_remaining=False) + # sort has no effect without remaining lvls + res = s.sort_index(level=level, sort_remaining=False) assert_series_equal(s, res) def test_sort_index_kind(self):
closes #20994 closes #20945 closes #21052 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is a pretty dark corner and I'll admit that I don't fully understand all of the elements in play. That said, the first problem I noticed with the first referenced issue was an errant conditional that was causing `level=0` and `level='foo'` to go down two different branches, even if the name of the first level was in fact 'foo'. After uncovering that, the subsequent ordering of the returned index then was incorrect regardless of the argument. I moved a monotonic sort around to fix this, though I feel like: - There may be a more general purpose solution to fix this AND - There may be more expressive tests to add here Feedback appreciated
https://api.github.com/repos/pandas-dev/pandas/pulls/21043
2018-05-15T02:10:13Z
2018-05-19T20:15:04Z
2018-05-19T20:15:03Z
2018-06-05T09:04:41Z
CI: Fixed linting on download_wheels
diff --git a/scripts/download_wheels.py b/scripts/download_wheels.py index a4705d0e4e63c..f5cdbbe36d90d 100644 --- a/scripts/download_wheels.py +++ b/scripts/download_wheels.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python """Fetch wheels from wheels.scipy.org for a pandas version.""" import argparse import pathlib @@ -23,7 +24,7 @@ def fetch(version): dest.mkdir(exist_ok=True) files = [x for x in root.xpath("//a/text()") - if x.startswith(f'pandas-{version}') + if x.startswith('pandas-{}'.format(version)) and not dest.joinpath(x).exists()] N = len(files) @@ -32,7 +33,9 @@ def fetch(version): out = str(dest.joinpath(filename)) link = urllib.request.urljoin(base, filename) urllib.request.urlretrieve(link, out) - print(f"Downloaded {link} to {out} [{i}/{N}]") + print("Downloaded {link} to {out} [{i}/{N}]".format( + link=link, out=out, i=i, N=N + )) def main(args=None):
xref https://github.com/pandas-dev/pandas/pull/20928#pullrequestreview-120060546
https://api.github.com/repos/pandas-dev/pandas/pulls/21042
2018-05-15T01:34:05Z
2018-05-15T10:00:21Z
2018-05-15T10:00:21Z
2018-05-15T10:00:24Z
DOC: Enhancing pivot / reshape docs
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 7d9925d800441..feb58c1c11dfd 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -17,6 +17,8 @@ Reshaping and Pivot Tables Reshaping by pivoting DataFrame objects --------------------------------------- +.. image:: _static/reshaping_pivot.png + .. ipython:: :suppress: @@ -33,8 +35,7 @@ Reshaping by pivoting DataFrame objects In [3]: df = unpivot(tm.makeTimeDataFrame()) -Data is often stored in CSV files or databases in so-called "stacked" or -"record" format: +Data is often stored in so-called "stacked" or "record" format: .. ipython:: python @@ -60,8 +61,6 @@ To select out everything for variable ``A`` we could do: df[df['variable'] == 'A'] -.. image:: _static/reshaping_pivot.png - But suppose we wish to do time series operations with the variables. A better representation would be where the ``columns`` are the unique variables and an ``index`` of dates identifies individual observations. To reshape the data into @@ -81,7 +80,7 @@ column: .. ipython:: python df['value2'] = df['value'] * 2 - pivoted = df.pivot('date', 'variable') + pivoted = df.pivot(index='date', columns='variable') pivoted You can then select subsets from the pivoted ``DataFrame``: @@ -93,6 +92,12 @@ You can then select subsets from the pivoted ``DataFrame``: Note that this returns a view on the underlying data in the case where the data are homogeneously-typed. +.. note:: + :func:`~pandas.pivot` will error with a ``ValueError: Index contains duplicate + entries, cannot reshape`` if the index/column pair is not unique. In this + case, consider using :func:`~pandas.pivot_table` which is a generalization + of pivot that can handle duplicate values for one index/column pair. + .. _reshaping.stacking: Reshaping by stacking and unstacking @@ -698,10 +703,103 @@ handling of NaN: In [3]: np.unique(x, return_inverse=True)[::-1] Out[3]: (array([3, 3, 0, 4, 1, 2]), array([nan, 3.14, inf, 'A', 'B'], dtype=object)) - .. note:: If you just want to handle one column as a categorical variable (like R's factor), you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or ``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. + +Examples +-------- + +In this section, we will review frequently asked questions and examples. The +column names and relevant column values are named to correspond with how this +DataFrame will be pivoted in the answers below. + +.. ipython:: python + + np.random.seed([3, 1415]) + n = 20 + + cols = np.array(['key', 'row', 'item', 'col']) + df = cols + pd.DataFrame((np.random.randint(5, size=(n, 4)) // [2, 1, 2, 1]).astype(str)) + df.columns = cols + df = df.join(pd.DataFrame(np.random.rand(n, 2).round(2)).add_prefix('val')) + + df + +Pivoting with Single Aggregations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Suppose we wanted to pivot ``df`` such that the ``col`` values are columns, +``row`` values are the index, and the mean of ``val0`` are the values? In +particular, the resulting DataFrame should look like: + +.. code-block:: ipython + + col col0 col1 col2 col3 col4 + row + row0 0.77 0.605 NaN 0.860 0.65 + row2 0.13 NaN 0.395 0.500 0.25 + row3 NaN 0.310 NaN 0.545 NaN + row4 NaN 0.100 0.395 0.760 0.24 + +This solution uses :func:`~pandas.pivot_table`. Also note that +``aggfunc='mean'`` is the default. It is included here to be explicit. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='mean') + +Note that we can also replace the missing values by using the ``fill_value`` +parameter. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='mean', fill_value=0) + +Also note that we can pass in other aggregation functions as well. For example, +we can also pass in ``sum``. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='sum', fill_value=0) + +Another aggregation we can do is calculate the frequency in which the columns +and rows occur together a.k.a. "cross tabulation". To do this, we can pass +``size`` to the ``aggfunc`` parameter. + +.. ipython:: python + + df.pivot_table(index='row', columns='col', fill_value=0, aggfunc='size') + +Pivoting with Multiple Aggregations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We can also perform multiple aggregations. For example, to perform both a +``sum`` and ``mean``, we can pass in a list to the ``aggfunc`` argument. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc=['mean', 'sum']) + +Note to aggregate over multiple value columns, we can pass in a list to the +``values`` parameter. + +.. ipython:: python + + df.pivot_table( + values=['val0', 'val1'], index='row', columns='col', aggfunc=['mean']) + +Note to subdivide over multiple columns we can pass in a list to the +``columns`` parameter. + +.. ipython:: python + + df.pivot_table( + values=['val0'], index='row', columns=['item', 'col'], aggfunc=['mean']) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7aadf7e735f38..19425cf1a50a1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5503,50 +5503,72 @@ def pivot(self, index=None, columns=None, values=None): ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], - ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]}) + ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df - A B C D - 0 foo one small 1 - 1 foo one large 2 - 2 foo one large 2 - 3 foo two small 3 - 4 foo two small 3 - 5 bar one large 4 - 6 bar one small 5 - 7 bar two small 6 - 8 bar two large 7 + A B C D E + 0 foo one small 1 2 + 1 foo one large 2 4 + 2 foo one large 2 5 + 3 foo two small 3 5 + 4 foo two small 3 6 + 5 bar one large 4 6 + 6 bar one small 5 8 + 7 bar two small 6 9 + 8 bar two large 7 9 + + This first example aggregates values by taking the sum. >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B - bar one 4.0 5.0 - two 7.0 6.0 - foo one 4.0 1.0 - two NaN 6.0 + bar one 4 5 + two 7 6 + foo one 4 1 + two NaN 6 + + We can also fill missing values using the `fill_value` parameter. >>> table = pivot_table(df, values='D', index=['A', 'B'], - ... columns=['C'], aggfunc=np.sum) + ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B - bar one 4.0 5.0 - two 7.0 6.0 - foo one 4.0 1.0 - two NaN 6.0 + bar one 4 5 + two 7 6 + foo one 4 1 + two 0 6 + + The next example aggregates by taking the mean across multiple columns. + + >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': np.mean, + ... 'E': np.mean}) + >>> table + D E + mean mean + A C + bar large 5.500000 7.500000 + small 5.500000 8.500000 + foo large 2.000000 4.500000 + small 2.333333 4.333333 + + We can also calculate multiple types of aggregations for any given + value column. >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E - mean max median min + mean max mean min A C - bar large 5.500000 16 14.5 13 - small 5.500000 15 14.5 14 - foo large 2.000000 10 9.5 9 - small 2.333333 12 11.0 8 + bar large 5.500000 9 7.500000 6 + small 5.500000 9 8.500000 8 + foo large 2.000000 5 4.500000 4 + small 2.333333 6 4.333333 2 Returns -------
- [x] closes #19089 - [x] tests added / passed (NA Just Docs) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Enhancing pivot / reshape docs Added more examples and added Q + A section.
https://api.github.com/repos/pandas-dev/pandas/pulls/21038
2018-05-14T20:13:41Z
2018-11-12T00:21:59Z
2018-11-12T00:21:58Z
2018-11-12T00:22:12Z
Warn on ndarray[int] // timedelta
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index adb4cdf2974a0..73e3e721aad71 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -257,7 +257,7 @@ Pass ``errors='coerce'`` to convert unparseable data to ``NaT`` (not a time): Epoch Timestamps ~~~~~~~~~~~~~~~~ -pandas supports converting integer or float epoch times to ``Timestamp`` and +pandas supports converting integer or float epoch times to ``Timestamp`` and ``DatetimeIndex``. The default unit is nanoseconds, since that is how ``Timestamp`` objects are stored internally. However, epochs are often stored in another ``unit`` which can be specified. These are computed from the starting point specified by the @@ -304,11 +304,12 @@ To invert the operation from above, namely, to convert from a ``Timestamp`` to a stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D') stamps -We convert the ``DatetimeIndex`` to an ``int64`` array, then divide by the conversion unit. +We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the +"unit" (1 second). .. ipython:: python - stamps.view('int64') // pd.Timedelta(1, unit='s') + (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s') .. _timeseries.origin: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..dcd7c56b8013f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1004,6 +1004,7 @@ Deprecations of the ``Series`` and ``Index`` classes have been deprecated and will be removed in a future version (:issue:`20419`). - ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`) +- Floor division between an integer ndarray and a :class:`Timedelta` is deprecated. Divide by :attr:`Timedelta.value` instead (:issue:`19761`) - Setting ``PeriodIndex.freq`` (which was not guaranteed to work correctly) is deprecated. Use :meth:`PeriodIndex.asfreq` instead (:issue:`20678`) - ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`) - The previous default behavior of negative indices in ``Categorical.take`` is deprecated. In a future version it will change from meaning missing values to meaning positional indices from the right. The future behavior is consistent with :meth:`Series.take` (:issue:`20664`). diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 7aeff9bec75b5..248c648c33db3 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # cython: profile=False import collections +import textwrap +import warnings import sys cdef bint PY3 = (sys.version_info[0] >= 3) @@ -1188,6 +1190,15 @@ class Timedelta(_Timedelta): if other.dtype.kind == 'm': # also timedelta-like return _broadcast_floordiv_td64(self.value, other, _rfloordiv) + elif other.dtype.kind == 'i': + # Backwards compatibility + # GH-19761 + msg = textwrap.dedent("""\ + Floor division between integer array and Timedelta is + deprecated. Use 'array // timedelta.value' instead. + """) + warnings.warn(msg, FutureWarning) + return other // self.value raise TypeError('Invalid dtype {dtype} for ' '{op}'.format(dtype=other.dtype, op='__floordiv__')) @@ -1210,6 +1221,11 @@ class Timedelta(_Timedelta): def __rmod__(self, other): # Naive implementation, room for optimization + if hasattr(other, 'dtype') and other.dtype.kind == 'i': + # TODO: Remove this check with backwards-compat shim + # for integer / Timedelta is removed. + raise TypeError("Invalid type {dtype} for " + "{op}".format(dtype=other.dtype, op='__mod__')) return self.__rdivmod__(other)[1] def __divmod__(self, other): @@ -1219,6 +1235,11 @@ class Timedelta(_Timedelta): def __rdivmod__(self, other): # Naive implementation, room for optimization + if hasattr(other, 'dtype') and other.dtype.kind == 'i': + # TODO: Remove this check with backwards-compat shim + # for integer / Timedelta is removed. + raise TypeError("Invalid type {dtype} for " + "{op}".format(dtype=other.dtype, op='__mod__')) div = other // self return div, other - div * self diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 179768fcc6709..9636c92ec22d5 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -403,10 +403,11 @@ def test_td_rfloordiv_numeric_scalar(self): with pytest.raises(TypeError): td.__rfloordiv__(np.float64(2.0)) - with pytest.raises(TypeError): - td.__rfloordiv__(np.int32(2.0)) with pytest.raises(TypeError): td.__rfloordiv__(np.uint8(9)) + with tm.assert_produces_warning(FutureWarning): + # GH-19761: Change to TypeError. + td.__rfloordiv__(np.int32(2.0)) def test_td_rfloordiv_timedeltalike_array(self): # GH#18846 @@ -432,7 +433,8 @@ def test_td_rfloordiv_numeric_series(self): ser = pd.Series([1], dtype=np.int64) res = td.__rfloordiv__(ser) assert res is NotImplemented - with pytest.raises(TypeError): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + # TODO: GH-19761. Change to TypeError. ser // td def test_mod_timedeltalike(self): diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index ab2bf92a26826..3fdc2aa71bfc0 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -21,6 +21,18 @@ def test_arithmetic_overflow(self): with pytest.raises(OverflowError): pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999) + def test_array_timedelta_floordiv(self): + # https://github.com/pandas-dev/pandas/issues/19761 + ints = pd.date_range('2012-10-08', periods=4, freq='D').view('i8') + msg = r"Use 'array // timedelta.value'" + with tm.assert_produces_warning(FutureWarning) as m: + result = ints // pd.Timedelta(1, unit='s') + + assert msg in str(m[0].message) + expected = np.array([1349654400, 1349740800, 1349827200, 1349913600], + dtype='i8') + tm.assert_numpy_array_equal(result, expected) + def test_ops_error_str(self): # GH 13624 td = Timedelta('1 day')
Closes #19761. ```python In [2]: pd.DatetimeIndex(['1931', '1970', '2017']).view('i8') // pd.Timedelta(1, unit='s') pandas-dev/bin/ipython:1: FutureWarning: Floor division between integer array and Timedelta is deprecated. Use 'array // timedelta.value' instead. Out[2]: array([-1230768000, 0, 1483228800]) ``` Long-term, we'll recommend using `to_epoch` for the case where people are doing this to do conversion to unix epoch. But https://github.com/pandas-dev/pandas/issues/14772 has a few design issues that will take some time to discuss. I think we should just recommend `.value` for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/21036
2018-05-14T18:47:31Z
2018-05-15T10:01:33Z
2018-05-15T10:01:33Z
2019-12-05T15:53:26Z
DOC: Rephrased doc for Series.asof. Added examples
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6ca8f6731bbb8..a0886eb431882 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6495,40 +6495,98 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, def asof(self, where, subset=None): """ - The last row without any NaN is taken (or the last row without - NaN considering only the subset of columns in the case of a DataFrame) + Return the last row(s) without any `NaN`s before `where`. + + The last row (for each element in `where`, if list) without any + `NaN` is taken. + In case of a :class:`~pandas.DataFrame`, the last row without `NaN` + considering only the subset of columns (if not `None`) .. versionadded:: 0.19.0 For DataFrame - If there is no good value, NaN is returned for a Series + If there is no good value, `NaN` is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- - where : date or array of dates - subset : string or list of strings, default None - if not None use these columns for NaN propagation + where : date or array-like of dates + Date(s) before which the last row(s) are returned. + subset : str or array-like of str, default `None` + For DataFrame, if not `None`, only use these columns to + check for `NaN`s. Notes ----- - Dates are assumed to be sorted - Raises if this is not the case + Dates are assumed to be sorted. Raises if this is not the case. Returns ------- - where is scalar - - - value or NaN if input is Series - - Series if input is DataFrame + scalar, Series, or DataFrame - where is Index: same shape object as input + * scalar : when `self` is a Series and `where` is a scalar + * Series: when `self` is a Series and `where` is an array-like, + or when `self` is a DataFrame and `where` is a scalar + * DataFrame : when `self` is a DataFrame and `where` is an + array-like See Also -------- - merge_asof + merge_asof : Perform an asof merge. Similar to left join. - """ + Examples + -------- + A Series and a scalar `where`. + + >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) + >>> s + 10 1.0 + 20 2.0 + 30 NaN + 40 4.0 + dtype: float64 + + >>> s.asof(20) + 2.0 + For a sequence `where`, a Series is returned. The first value is + ``NaN``, because the first element of `where` is before the first + index value. + + >>> s.asof([5, 20]) + 5 NaN + 20 2.0 + dtype: float64 + + Missing values are not considered. The following is ``2.0``, not + ``NaN``, even though ``NaN`` is at the index location for ``30``. + + >>> s.asof(30) + 2.0 + + Take all columns into consideration + + >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], + ... 'b': [None, None, None, None, 500]}, + ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', + ... '2018-02-27 09:02:00', + ... '2018-02-27 09:03:00', + ... '2018-02-27 09:04:00', + ... '2018-02-27 09:05:00'])) + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30'])) + a b + 2018-02-27 09:03:30 NaN NaN + 2018-02-27 09:04:30 NaN NaN + + Take a single column into consideration + + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30']), + ... subset=['a']) + a b + 2018-02-27 09:03:30 30.0 NaN + 2018-02-27 09:04:30 40.0 NaN + """ if isinstance(where, compat.string_types): from pandas import to_datetime where = to_datetime(where)
Used code from https://github.com/pandas-dev/pandas/issues/20652 as example, to illustrate different behaviours. - [x] closes #20652 - [x] tests added / passed (N/A, docs) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21034
2018-05-14T18:26:18Z
2018-11-04T15:01:16Z
2018-11-04T15:01:16Z
2018-11-12T13:22:21Z
TST: add arithmetic operators fixture
diff --git a/pandas/conftest.py b/pandas/conftest.py index 137afaa3b3490..b09cb872a12fb 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +from pandas.compat import PY3 import pandas.util._test_decorators as td @@ -77,6 +78,24 @@ def observed(request): return request.param +_all_arithmetic_operators = ['__add__', '__radd__', + '__sub__', '__rsub__', + '__mul__', '__rmul__', + '__floordiv__', '__rfloordiv__', + '__truediv__', '__rtruediv__', + '__pow__', '__rpow__'] +if not PY3: + _all_arithmetic_operators.extend(['__div__', '__rdiv__']) + + +@pytest.fixture(params=_all_arithmetic_operators) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations + """ + return request.param + + @pytest.fixture(params=[None, 'gzip', 'bz2', 'zip', pytest.param('xz', marks=td.skip_if_no_lzma)]) def compression(request): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index f90fcce973f00..ecb74622edf10 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -827,16 +827,52 @@ def test_sub_datetime64_not_ns(self, box, assert_func): res = dt64 - obj assert_func(res, -expected) - def test_operators_datetimelike(self): - def run_ops(ops, get_ser, test_ser): + def test_operators_datetimelike_invalid(self, all_arithmetic_operators): + # these are all TypeEror ops + op_str = all_arithmetic_operators + + def check(get_ser, test_ser): # check that we are getting a TypeError # with 'operate' (from core/ops.py) for the ops that are not # defined - for op_str in ops: - op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate|cannot'): - op(test_ser) + op = getattr(get_ser, op_str, None) + with tm.assert_raises_regex(TypeError, 'operate|cannot'): + op(test_ser) + + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), + Timestamp('20120104')]) + if op_str not in ['__sub__', '__rsub__']: + check(dt1, dt2) + + # ## datetime64 with timetimedelta ### + # TODO(jreback) __rsub__ should raise? + if op_str not in ['__add__', '__radd__', '__sub__']: + check(dt1, td1) + + # 8260, 10763 + # datetime64 with tz + tz = 'US/Eastern' + dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, + tz=tz), name='foo') + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) + td2 = td1.copy() + td2.iloc[1] = np.nan + + if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']: + check(dt2, td2) + + def test_operators_datetimelike(self): # ## timedelta64 ### td1 = Series([timedelta(minutes=5, seconds=3)] * 3) @@ -848,18 +884,10 @@ def run_ops(ops, get_ser, test_ser): dt1.iloc[2] = np.nan dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), Timestamp('20120104')]) - ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__radd__', '__rmul__', '__rfloordiv__', - '__rtruediv__', '__rdiv__', '__rpow__'] - run_ops(ops, dt1, dt2) dt1 - dt2 dt2 - dt1 # ## datetime64 with timetimedelta ### - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] - run_ops(ops, dt1, td1) dt1 + td1 td1 + dt1 dt1 - td1 @@ -867,28 +895,20 @@ def run_ops(ops, get_ser, test_ser): # td1 - dt1 # ## timetimedelta with datetime64 ### - ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', - '__rdiv__', '__rpow__'] - run_ops(ops, td1, dt1) td1 + dt1 dt1 + td1 - # 8260, 10763 - # datetime64 with tz - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] + def test_operators_datetimelike_with_timezones(self): tz = 'US/Eastern' dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, tz=tz), name='foo') dt2 = dt1.copy() dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) td2 = td1.copy() td2.iloc[1] = np.nan - run_ops(ops, dt1, td1) result = dt1 + td1[0] exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) @@ -1133,25 +1153,23 @@ def test_dt64_series_arith_overflow(self): res = dt - ser tm.assert_series_equal(res, -expected) + @pytest.mark.parametrize('op', ['__add__', '__radd__', + '__sub__', '__rsub__']) @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo']) - def test_dt64_series_add_intlike(self, tz): + def test_dt64_series_add_intlike(self, tz, op): # GH#19123 dti = pd.DatetimeIndex(['2016-01-02', '2016-02-03', 'NaT'], tz=tz) ser = Series(dti) other = Series([20, 30, 40], dtype='uint8') - pytest.raises(TypeError, ser.__add__, 1) - pytest.raises(TypeError, ser.__sub__, 1) + pytest.raises(TypeError, getattr(ser, op), 1) - pytest.raises(TypeError, ser.__add__, other) - pytest.raises(TypeError, ser.__sub__, other) + pytest.raises(TypeError, getattr(ser, op), other) - pytest.raises(TypeError, ser.__add__, other.values) - pytest.raises(TypeError, ser.__sub__, other.values) + pytest.raises(TypeError, getattr(ser, op), other.values) - pytest.raises(TypeError, ser.__add__, pd.Index(other)) - pytest.raises(TypeError, ser.__sub__, pd.Index(other)) + pytest.raises(TypeError, getattr(ser, op), pd.Index(other)) class TestSeriesOperators(TestData):
https://api.github.com/repos/pandas-dev/pandas/pulls/21033
2018-05-14T12:21:28Z
2018-05-14T23:49:23Z
2018-05-14T23:49:23Z
2018-05-14T23:49:23Z
BUG: Fixed 19497 - previously, renaming an index changed its type if …
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..f441e6976bd89 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1364,6 +1364,7 @@ Reshaping - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - Bug in :func:`DataFrame.join` which does an ``outer`` instead of a ``left`` join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) +- Bug in :func:`~DataFrame.rename` where an Index of same-length tuples was converted to a MultiIndex (:issue:`19497`) - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) - Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index e7b2576ca1eae..fe508dc1bb0bc 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5296,7 +5296,7 @@ def _transform_index(index, func, level=None): return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] - return Index(items, name=index.name) + return Index(items, name=index.name, tupleize_cols=False) def _putmask_smart(v, m, n): diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 95b952892c93d..164d6746edec0 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -579,6 +579,17 @@ def test_rename_bug(self): columns=['2001-01-01']) assert_frame_equal(df, expected) + def test_rename_bug2(self): + # GH 19497 + # rename was changing Index to MultiIndex if Index contained tuples + + df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], + columns=["a"]) + df = df.rename({(1, 1): (5, 4)}, axis="index") + expected = DataFrame(data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], + columns=["a"]) + assert_frame_equal(df, expected) + def test_reorder_levels(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0],
Fixes bug GH19497 - previously, renaming an index with tuples changed its type (i.e. from Index to MultiIndex). - [x] closes #19497 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21029
2018-05-14T06:57:19Z
2018-05-14T23:52:16Z
2018-05-14T23:52:15Z
2018-05-15T03:53:24Z
BUG: Iteration over DatetimeIndex stops at chunksize (GH21012)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9761974d77d4b..83950f1d71633 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1365,7 +1365,8 @@ def __iter__(self): converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp") - return iter(converted) + for v in converted: + yield v def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 0722b9175c0c6..1a5f12103595c 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -153,6 +153,17 @@ def test_iteration_preserves_tz(self): assert result._repr_base == expected._repr_base assert result == expected + @pytest.mark.parametrize('periods', [0, 9999, 10000, 10001]) + def test_iteration_over_chunksize(self, periods): + # GH21012 + + index = date_range('2000-01-01 00:00:00', periods=periods, freq='min') + num = 0 + for stamp in index: + assert index[num] == stamp + num += 1 + assert num == len(index) + def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day)
- [x] closes #21012 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry avoided using `yield` and changing DatetimeIndex itself into a iterator. @cbertinato 's advice was helpful.
https://api.github.com/repos/pandas-dev/pandas/pulls/21027
2018-05-14T01:02:31Z
2018-05-15T10:02:35Z
2018-05-15T10:02:34Z
2018-06-25T20:59:21Z
DOC: updated the Series.str.rsplit and Series.str.split docstrings
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 08239ae4dae20..b27cfdfe3f1bd 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1343,108 +1343,7 @@ def str_pad(arr, width, side='left', fillchar=' '): def str_split(arr, pat=None, n=None): - """ - Split strings around given separator/delimiter. - - Split each string in the caller's values by given - pattern, propagating NaN values. Equivalent to :meth:`str.split`. - - Parameters - ---------- - pat : str, optional - String or regular expression to split on. - If not specified, split on whitespace. - n : int, default -1 (all) - Limit number of splits in output. - ``None``, 0 and -1 will be interpreted as return all splits. - expand : bool, default False - Expand the split strings into separate columns. - - * If ``True``, return DataFrame/MultiIndex expanding dimensionality. - * If ``False``, return Series/Index, containing lists of strings. - Returns - ------- - Series, Index, DataFrame or MultiIndex - Type matches caller unless ``expand=True`` (see Notes). - - Notes - ----- - The handling of the `n` keyword depends on the number of found splits: - - - If found splits > `n`, make first `n` splits only - - If found splits <= `n`, make all splits - - If for a certain row the number of found splits < `n`, - append `None` for padding up to `n` if ``expand=True`` - - If using ``expand=True``, Series and Index callers return DataFrame and - MultiIndex objects, respectively. - - See Also - -------- - str.split : Standard library version of this method. - Series.str.get_dummies : Split each string into dummy variables. - Series.str.partition : Split string on a separator, returning - the before, separator, and after components. - - Examples - -------- - >>> s = pd.Series(["this is good text", "but this is even better"]) - - By default, split will return an object of the same size - having lists containing the split elements - - >>> s.str.split() - 0 [this, is, good, text] - 1 [but, this, is, even, better] - dtype: object - >>> s.str.split("random") - 0 [this is good text] - 1 [but this is even better] - dtype: object - - When using ``expand=True``, the split elements will expand out into - separate columns. - - For Series object, output return type is DataFrame. - - >>> s.str.split(expand=True) - 0 1 2 3 4 - 0 this is good text None - 1 but this is even better - >>> s.str.split(" is ", expand=True) - 0 1 - 0 this good text - 1 but this even better - - For Index object, output return type is MultiIndex. - - >>> i = pd.Index(["ba 100 001", "ba 101 002", "ba 102 003"]) - >>> i.str.split(expand=True) - MultiIndex(levels=[['ba'], ['100', '101', '102'], ['001', '002', '003']], - labels=[[0, 0, 0], [0, 1, 2], [0, 1, 2]]) - - Parameter `n` can be used to limit the number of splits in the output. - - >>> s.str.split("is", n=1) - 0 [th, is good text] - 1 [but th, is even better] - dtype: object - >>> s.str.split("is", n=1, expand=True) - 0 1 - 0 th is good text - 1 but th is even better - - If NaN is present, it is propagated throughout the columns - during the split. - - >>> s = pd.Series(["this is good text", "but this is even better", np.nan]) - >>> s.str.split(n=3, expand=True) - 0 1 2 3 - 0 this is good text - 1 but this is even better - 2 NaN NaN NaN NaN - """ if pat is None: if n is None or n == 0: n = -1 @@ -1464,25 +1363,7 @@ def str_split(arr, pat=None, n=None): def str_rsplit(arr, pat=None, n=None): - """ - Split each string in the Series/Index by the given delimiter - string, starting at the end of the string and working to the front. - Equivalent to :meth:`str.rsplit`. - Parameters - ---------- - pat : string, default None - Separator to split on. If None, splits on whitespace - n : int, default -1 (all) - None, 0 and -1 will be interpreted as return all splits - expand : bool, default False - * If True, return DataFrame/MultiIndex expanding dimensionality. - * If False, return Series/Index. - - Returns - ------- - split : Series/Index or DataFrame/MultiIndex of objects - """ if n is None or n == 0: n = -1 f = lambda x: x.rsplit(pat, n) @@ -2325,12 +2206,133 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): res = Series(res, index=data.index, name=self._orig.name) return res - @copy(str_split) + _shared_docs['str_split'] = (""" + Split strings around given separator/delimiter. + + Splits the string in the Series/Index from the %(side)s, + at the specified delimiter string. Equivalent to :meth:`str.%(method)s`. + + Parameters + ---------- + pat : str, optional + String or regular expression to split on. + If not specified, split on whitespace. + n : int, default -1 (all) + Limit number of splits in output. + ``None``, 0 and -1 will be interpreted as return all splits. + expand : bool, default False + Expand the splitted strings into separate columns. + + * If ``True``, return DataFrame/MultiIndex expanding dimensionality. + * If ``False``, return Series/Index, containing lists of strings. + + Returns + ------- + Series, Index, DataFrame or MultiIndex + Type matches caller unless ``expand=True`` (see Notes). + + See Also + -------- + Series.str.split : Split strings around given separator/delimiter. + Series.str.rsplit : Splits string around given separator/delimiter, + starting from the right. + Series.str.join : Join lists contained as elements in the Series/Index + with passed delimiter. + str.split : Standard library version for split. + str.rsplit : Standard library version for rsplit. + + Notes + ----- + The handling of the `n` keyword depends on the number of found splits: + + - If found splits > `n`, make first `n` splits only + - If found splits <= `n`, make all splits + - If for a certain row the number of found splits < `n`, + append `None` for padding up to `n` if ``expand=True`` + + If using ``expand=True``, Series and Index callers return DataFrame and + MultiIndex objects, respectively. + + Examples + -------- + >>> s = pd.Series(["this is a regular sentence", + "https://docs.python.org/3/tutorial/index.html", np.nan]) + + In the default setting, the string is split by whitespace. + + >>> s.str.split() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + Without the `n` parameter, the outputs of `rsplit` and `split` + are identical. + + >>> s.str.rsplit() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `n` parameter can be used to limit the number of splits on the + delimiter. The outputs of `split` and `rsplit` are different. + + >>> s.str.split(n=2) + 0 [this, is, a regular sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + >>> s.str.rsplit(n=2) + 0 [this is a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `pat` parameter can be used to split by other characters. + + >>> s.str.split(pat = "/") + 0 [this is a regular sentence] + 1 [https:, , docs.python.org, 3, tutorial, index... + 2 NaN + dtype: object + + When using ``expand=True``, the split elements will expand out into + separate columns. If NaN is present, it is propagated throughout + the columns during the split. + + >>> s.str.split(expand=True) + 0 1 2 3 + 0 this is a regular + 1 https://docs.python.org/3/tutorial/index.html None None None + 2 NaN NaN NaN NaN \ + + 4 + 0 sentence + 1 None + 2 NaN + + For slightly more complex use cases like splitting the html document name + from a url, a combination of parameter settings can be used. + + >>> s.str.rsplit("/", n=1, expand=True) + 0 1 + 0 this is a regular sentence None + 1 https://docs.python.org/3/tutorial index.html + 2 NaN NaN + """) + + @Appender(_shared_docs['str_split'] % { + 'side': 'beginning', + 'method': 'split'}) def split(self, pat=None, n=-1, expand=False): result = str_split(self._data, pat, n=n) return self._wrap_result(result, expand=expand) - @copy(str_rsplit) + @Appender(_shared_docs['str_split'] % { + 'side': 'end', + 'method': 'rsplit'}) def rsplit(self, pat=None, n=-1, expand=False): result = str_rsplit(self._data, pat, n=n) return self._wrap_result(result, expand=expand)
- [ ] closes #xxxx - [x] tests passed validate_docstrings.py - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] rendered document looks ok 'python make.py --single pandas.Series.str.rsplit' and 'python make.py --single pandas.Series.str.split'
https://api.github.com/repos/pandas-dev/pandas/pulls/21026
2018-05-14T00:32:12Z
2018-06-22T23:36:45Z
2018-06-22T23:36:45Z
2018-06-22T23:36:49Z
PERF: improved performance of CategoricalIndex.is_monotonic*
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index ae1d7029217a4..5464e7cba22c3 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -173,3 +173,23 @@ def setup(self, dtype): def time_isin_categorical(self, dtype): self.series.isin(self.sample) + + +class IsMonotonic(object): + + def setup(self): + N = 1000 + self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N)) + self.s = pd.Series(self.c) + + def time_categorical_index_is_monotonic_increasing(self): + self.c.is_monotonic_increasing + + def time_categorical_index_is_monotonic_decreasing(self): + self.c.is_monotonic_decreasing + + def time_categorical_series_is_monotonic_increasing(self): + self.s.is_monotonic_increasing + + def time_categorical_series_is_monotonic_decreasing(self): + self.s.is_monotonic_decreasing diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..8c5111e712a34 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -29,6 +29,7 @@ Deprecations Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ +- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) - - diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 3ffef5804acf7..78b7ae7054248 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -382,11 +382,11 @@ def is_unique(self): @property def is_monotonic_increasing(self): - return Index(self.codes).is_monotonic_increasing + return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self): - return Index(self.codes).is_monotonic_decreasing + return self._engine.is_monotonic_decreasing @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs) def unique(self, level=None): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 6a1a1a5bdba4f..0e630f69b1a32 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -543,35 +543,41 @@ def test_reindex_empty_index(self): tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) - def test_is_monotonic(self): - c = CategoricalIndex([1, 2, 3]) + @pytest.mark.parametrize('data, non_lexsorted_data', [ + [[1, 2, 3], [9, 0, 1, 2, 3]], + [list('abc'), list('fabcd')], + ]) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], ordered=True) + c = CategoricalIndex(data, ordered=True) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data)) assert not c.is_monotonic_increasing assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data), ordered=True) assert not c.is_monotonic_increasing - assert not c.is_monotonic_decreasing + assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True) + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) assert not c.is_monotonic_increasing - assert c.is_monotonic_decreasing + assert not c.is_monotonic_decreasing # non lexsorted categories - categories = [9, 0, 1, 2, 3] + categories = non_lexsorted_data - c = CategoricalIndex([9, 0], categories=categories) + c = CategoricalIndex(categories[:2], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([0, 1], categories=categories) + c = CategoricalIndex(categories[1:3], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ```python >>> n = 1000000 >>> ci = pd.CategoricalIndex(list('a' * n + 'b' * n + 'c' * n)) >>> %t ci.is_monotonic_increasing 22 ms # v0.22 and master 227 ns # this commit ``` There seem to be a few more like this, where ``CategoricalIndex`` should use ``self._engine`` but doesn't. @TomAugspurger?
https://api.github.com/repos/pandas-dev/pandas/pulls/21025
2018-05-14T00:16:10Z
2018-05-17T00:21:51Z
2018-05-17T00:21:51Z
2018-06-08T17:08:16Z
DOC: some cleanup of various whatsnew files
diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt index f440be1ddd56e..02ddc362255ec 100644 --- a/doc/source/whatsnew/v0.13.0.txt +++ b/doc/source/whatsnew/v0.13.0.txt @@ -651,7 +651,7 @@ Enhancements Additionally, the ``method`` argument to ``interpolate`` has been expanded to include ``'nearest', 'zero', 'slinear', 'quadratic', 'cubic', - 'barycentric', 'krogh', 'piecewise_polynomial', 'pchip', `polynomial`, 'spline'`` + 'barycentric', 'krogh', 'piecewise_polynomial', 'pchip', 'polynomial', 'spline'`` The new methods require scipy_. Consult the Scipy reference guide_ and documentation_ for more information about when the various methods are appropriate. See :ref:`the docs<missing_data.interpolate>`. diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt index be962ceb181ff..92c699017fc13 100644 --- a/doc/source/whatsnew/v0.14.0.txt +++ b/doc/source/whatsnew/v0.14.0.txt @@ -998,7 +998,7 @@ Bug Fixes - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) -- Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned +- Bug in ``DataFrame.apply`` with functions that used ``*args`` or ``**kwargs`` and returned an empty result (:issue:`6952`) - Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) - Moved ``Panel.shift`` to ``NDFrame.slice_shift`` and fixed to respect multiple dtypes. (:issue:`6959`) diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt index 4674cbc846722..32a2391c75531 100644 --- a/doc/source/whatsnew/v0.14.1.txt +++ b/doc/source/whatsnew/v0.14.1.txt @@ -229,7 +229,7 @@ Bug Fixes :issue:`7409`). - Bug where bool objects were converted to ``nan`` in ``convert_objects`` (:issue:`7416`). -- Bug in ``quantile`` ignoring the axis keyword argument (:issue`7306`) +- Bug in ``quantile`` ignoring the axis keyword argument (:issue:`7306`) - Bug where ``nanops._maybe_null_out`` doesn't work with complex numbers (:issue:`7353`) - Bug in several ``nanops`` functions when ``axis==0`` for diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index c5ef6c8c9d74a..0f1a8c324de54 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -983,7 +983,7 @@ Other: df.describe(include='all') - Without those arguments, 'describe` will behave as before, including only numerical columns or, if none are, only categorical columns. See also the :ref:`docs <basics.describe>` + Without those arguments, ``describe`` will behave as before, including only numerical columns or, if none are, only categorical columns. See also the :ref:`docs <basics.describe>` - Added ``split`` as an option to the ``orient`` argument in ``pd.DataFrame.to_dict``. (:issue:`7840`) diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index f84f25d3e906c..345fc9f1b5da7 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -287,7 +287,7 @@ Bug Fixes - Bug in ``Categorical`` reflected comparison operator raising if the first argument was a numpy array scalar (e.g. np.int64) (:issue:`8658`) - Bug in Panel indexing with a list-like (:issue:`8710`) - Compat issue is ``DataFrame.dtypes`` when ``options.mode.use_inf_as_null`` is True (:issue:`8722`) -- Bug in ``read_csv``, ``dialect`` parameter would not take a string (:issue: `8703`) +- Bug in ``read_csv``, ``dialect`` parameter would not take a string (:issue:`8703`) - Bug in slicing a multi-index level with an empty-list (:issue:`8737`) - Bug in numeric index operations of add/sub with Float/Index Index with numpy arrays (:issue:`8608`) - Bug in setitem with empty indexer and unwanted coercion of dtypes (:issue:`8669`) diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt index 91ec0c3038985..29f6832b48aaf 100644 --- a/doc/source/whatsnew/v0.16.2.txt +++ b/doc/source/whatsnew/v0.16.2.txt @@ -163,5 +163,5 @@ Bug Fixes - Bug in ``Panel.apply`` when the result has ndim=0 (:issue:`10332`) - Bug in ``read_hdf`` where ``auto_close`` could not be passed (:issue:`9327`). - Bug in ``read_hdf`` where open stores could not be used (:issue:`10330`). -- Bug in adding empty ``DataFrame``s, now results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`). +- Bug in adding empty ``DataFrames``, now results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`). - Bug in ``to_hdf`` and ``HDFStore`` which did not check that complib choices were valid (:issue:`4582`, :issue:`8874`). diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 239b2ba96404c..ec8f318b72fef 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -1043,7 +1043,7 @@ Bug Fixes ~~~~~~~~~ - Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) -- Bug in ``.isin`` on older numpies (:issue: `11232`) +- Bug in ``.isin`` on older numpies (:issue:`11232`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`) - Bug in ``DatetimeIndex`` when localizing with ``NaT`` (:issue:`10477`) @@ -1094,7 +1094,7 @@ Bug Fixes - Bug in ``to_datetime`` and ``to_timedelta`` causing ``Index`` name to be lost (:issue:`10875`) -- Bug in ``len(DataFrame.groupby)`` causing ``IndexError`` when there's a column containing only NaNs (:issue: `11016`) +- Bug in ``len(DataFrame.groupby)`` causing ``IndexError`` when there's a column containing only NaNs (:issue:`11016`) - Bug that caused segfault when resampling an empty Series (:issue:`10228`) - Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 302105c1e653c..50d7877a9cd48 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -265,7 +265,7 @@ Individual columns can be parsed as a ``Categorical`` using a dict specification Categorical Concatenation ^^^^^^^^^^^^^^^^^^^^^^^^^ -- A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`:13763`, issue:`13846`, :issue:`14173`) +- A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`13763`, :issue:`13846`, :issue:`14173`) .. ipython:: python @@ -1525,7 +1525,7 @@ Bug Fixes - Bug in invalid datetime parsing in ``to_datetime`` and ``DatetimeIndex`` may raise ``TypeError`` rather than ``ValueError`` (:issue:`11169`, :issue:`11287`) - Bug in ``Index`` created with tz-aware ``Timestamp`` and mismatched ``tz`` option incorrectly coerces timezone (:issue:`13692`) - Bug in ``DatetimeIndex`` with nanosecond frequency does not include timestamp specified with ``end`` (:issue:`13672`) -- Bug in ```Series``` when setting a slice with a ```np.timedelta64``` (:issue:`14155`) +- Bug in ```Series`` when setting a slice with a ``np.timedelta64`` (:issue:`14155`) - Bug in ``Index`` raises ``OutOfBoundsDatetime`` if ``datetime`` exceeds ``datetime64[ns]`` bounds, rather than coercing to ``object`` dtype (:issue:`13663`) - Bug in ``Index`` may ignore specified ``datetime64`` or ``timedelta64`` passed as ``dtype`` (:issue:`13981`) - Bug in ``RangeIndex`` can be created without no arguments rather than raises ``TypeError`` (:issue:`13793`) diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt index 545b4380d9b75..b8afe18e0f871 100644 --- a/doc/source/whatsnew/v0.19.1.txt +++ b/doc/source/whatsnew/v0.19.1.txt @@ -55,7 +55,7 @@ Bug Fixes - Bug in ``pd.concat`` with dataframes heterogeneous in length and tuple ``keys`` (:issue:`14438`) - Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`) - Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`) -- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`) +- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue:`14327`) - Bug in ``df.groupby`` where ``TypeError`` raised when ``pd.Grouper(key=...)`` is passed in a list (:issue:`14334`) - Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns`` is not scalar and ``values`` is not specified (:issue:`14380`) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 5f22b518ab6c4..5fb725a76770e 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1711,7 +1711,7 @@ Reshaping - Bug in ``pd.concat()`` in which concatenating with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`) - Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`) - Bug in ``DataFrame.nsmallest`` and ``DataFrame.nlargest`` where identical values resulted in duplicated rows (:issue:`15297`) -- Bug in :func:`pandas.pivot_table` incorrectly raising ``UnicodeError`` when passing unicode input for ```margins`` keyword (:issue:`13292`) +- Bug in :func:`pandas.pivot_table` incorrectly raising ``UnicodeError`` when passing unicode input for ``margins`` keyword (:issue:`13292`) Numeric ^^^^^^^ diff --git a/doc/source/whatsnew/v0.20.2.txt b/doc/source/whatsnew/v0.20.2.txt index 31125db0f34d4..3de6fbc8afaf8 100644 --- a/doc/source/whatsnew/v0.20.2.txt +++ b/doc/source/whatsnew/v0.20.2.txt @@ -44,7 +44,7 @@ Bug Fixes - Silenced a warning on some Windows environments about "tput: terminal attributes: No such device or address" when detecting the terminal size. This fix only applies to python 3 (:issue:`16496`) - Bug in using ``pathlib.Path`` or ``py.path.local`` objects with io functions (:issue:`16291`) -- Bug in ``Index.symmetric_difference()`` on two equal MultiIndex's, results in a ``TypeError`` (:issue `13490`) +- Bug in ``Index.symmetric_difference()`` on two equal MultiIndex's, results in a ``TypeError`` (:issue:`13490`) - Bug in ``DataFrame.update()`` with ``overwrite=False`` and ``NaN values`` (:issue:`15593`) - Passing an invalid engine to :func:`read_csv` now raises an informative ``ValueError`` rather than ``UnboundLocalError``. (:issue:`16511`) @@ -83,7 +83,7 @@ Plotting ^^^^^^^^ - Bug in ``DataFrame.plot`` with a single column and a list-like ``color`` (:issue:`3486`) -- Bug in ``plot`` where ``NaT`` in ``DatetimeIndex`` results in ``Timestamp.min`` (:issue: `12405`) +- Bug in ``plot`` where ``NaT`` in ``DatetimeIndex`` results in ``Timestamp.min`` (:issue:`12405`) - Bug in ``DataFrame.boxplot`` where ``figsize`` keyword was not respected for non-grouped boxplots (:issue:`11959`) diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 0c2e494f29bc1..3a257c1ff9648 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -895,7 +895,7 @@ nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You must explicitly register these methods: Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these -converters on first-use (:issue:17710). +converters on first-use (:issue:`17710`). .. note:: @@ -1047,7 +1047,7 @@ Conversion - Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`) - Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`) - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) -- Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). +- Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (``DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). - Bug in :meth:`to_numeric` in which elements were not always being coerced to numeric when ``errors='coerce'`` (:issue:`17007`, :issue:`17125`) - Bug in ``DataFrame`` and ``Series`` constructors where ``range`` objects are converted to ``int32`` dtype on Windows instead of ``int64`` (:issue:`16804`) diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 67c52dac6128d..2e9e616daf3a7 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -81,7 +81,7 @@ Deprecations ~~~~~~~~~~~~ - ``pandas.tseries.register`` has been renamed to - :func:`pandas.plotting.register_matplotlib_converters`` (:issue:`18301`) + :func:`pandas.plotting.register_matplotlib_converters` (:issue:`18301`) .. _whatsnew_0211.performance: @@ -101,7 +101,7 @@ Conversion - Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) - Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) - Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) -- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) +- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising ``TypeError`` (:issue:`18372`) - Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) - Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..0e9a7ca777d69 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -569,10 +569,10 @@ Previous Behavior (and current behavior if on Python < 3.6): .. code-block:: ipython - In [1]: pd.Series({'Income': 2000, - 'Expenses': -1500, - 'Taxes': -200, - 'Net result': 300}) + pd.Series({'Income': 2000, + 'Expenses': -1500, + 'Taxes': -200, + 'Net result': 300}) Expenses -1500 Income 2000 Net result 300 diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt index b2d1d16e86990..b5ec5aa73ee9a 100644 --- a/doc/source/whatsnew/v0.8.0.txt +++ b/doc/source/whatsnew/v0.8.0.txt @@ -123,7 +123,7 @@ Other new features - Enhanced :ref:`read_csv/read_table <io.parse_dates>` for reading time series data and converting multiple columns to dates - Add :ref:`comments <io.comments>` option to parser functions: read_csv, etc. -- Add :ref`dayfirst <io.dayfirst>` option to parser functions for parsing +- Add :ref:`dayfirst <io.dayfirst>` option to parser functions for parsing international DD/MM/YYYY dates - Allow the user to specify the CSV reader :ref:`dialect <io.dialect>` to control quoting etc.
Over time, various whatsnew text files have had some errors that caused parsing errors for sphinx/rst. This PR cleans up various such bugs that have been causing wrong parsing output.
https://api.github.com/repos/pandas-dev/pandas/pulls/21021
2018-05-13T15:27:02Z
2018-05-15T00:02:04Z
2018-05-15T00:02:04Z
2018-05-18T18:05:14Z
BUG: .reset_index() should raise with an invalid level name (GH20925)
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..90780f118016c 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -59,7 +59,7 @@ Conversion Indexing ^^^^^^^^ -- +- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) - I/O diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..6d396e845219e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1195,12 +1195,13 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = com._default_index(len(self)) - if level is not None and isinstance(self.index, MultiIndex): + if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if len(level) < len(self.index.levels): - new_index = self.index.droplevel(level) + if isinstance(self.index, MultiIndex): + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index dce4e82cbdcf1..859082a7e722d 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -188,6 +188,11 @@ def test_reset_index_level(self): with tm.assert_raises_regex(IndexError, 'Too many levels'): s.reset_index(level=[0, 1, 2]) + # Check that .reset_index([],drop=True) doesn't fail + result = pd.Series(range(4)).reset_index([], drop=True) + expected = pd.Series(range(4)) + assert_series_equal(result, expected) + def test_reset_index_range(self): # GH 12071 s = pd.Series(range(2), name='A', dtype='int64') @@ -275,3 +280,18 @@ def test_set_axis_prior_to_deprecation_signature(self): with tm.assert_produces_warning(FutureWarning): result = s.set_axis(0, list('abcd'), inplace=False) tm.assert_series_equal(result, expected) + + def test_reset_index_drop_errors(self): + # GH 20925 + + # KeyError raised for series index when passed level name is missing + s = pd.Series(range(4)) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong', drop=True) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong') + + # KeyError raised for series when level to be dropped is missing + s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2)) + with tm.assert_raises_regex(KeyError, 'not found'): + s.reset_index('wrong', drop=True)
#20925 Raises appropriate error for Series.reset_index(level_name, drop=True) when index is flat and an invalid level is supplied - [x] closes #20925 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21016
2018-05-12T14:21:23Z
2018-05-18T05:53:46Z
2018-05-18T05:53:45Z
2018-06-08T17:09:13Z
Don't raise warning on merging int and float with nan
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0204e655bfa2c..4d8897fb7c811 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -955,14 +955,14 @@ def _maybe_coerce_merge_keys(self): # check whether ints and floats elif is_integer_dtype(rk) and is_float_dtype(lk): - if not (lk == lk.astype(rk.dtype)).all(): + if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all(): warnings.warn('You are merging on int and float ' 'columns where the float values ' 'are not equal to their int ' 'representation', UserWarning) elif is_float_dtype(rk) and is_integer_dtype(lk): - if not (rk == rk.astype(lk.dtype)).all(): + if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all(): warnings.warn('You are merging on int and float ' 'columns where the float values ' 'are not equal to their int ' diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 436fe8f9f5d7e..8e639edd34b18 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1519,6 +1519,13 @@ def test_merge_on_ints_floats_warning(self): result = B.merge(A, left_on='Y', right_on='X') assert_frame_equal(result, expected[['Y', 'X']]) + # test no warning if float has NaNs + B = DataFrame({'Y': [np.nan, np.nan, 3.0]}) + + with tm.assert_produces_warning(None): + result = B.merge(A, left_on='Y', right_on='X') + assert_frame_equal(result, expected[['Y', 'X']]) + @pytest.mark.parametrize('df1_vals, df2_vals', [ ([0, 1, 2], ["0", "1", "2"]), ([0.0, 1.0, 2.0], ["0", "1", "2"]),
Closes https://github.com/pandas-dev/pandas/issues/20998
https://api.github.com/repos/pandas-dev/pandas/pulls/21011
2018-05-11T08:58:09Z
2018-05-11T11:46:29Z
2018-05-11T11:46:29Z
2018-05-24T20:20:22Z
ENH: Implement linspace behavior for timedelta_range and interval_range
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f7bc0e8d8c3f..a8a201558ec9b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -526,7 +526,7 @@ Other Enhancements - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) - :func:`cut` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`20947`) -- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) +- :func:`date_range`, :func:`timedelta_range`, and :func:`interval_range` now return a linearly spaced index if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`, :issue:`20976`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1d5c2d9a098ed..9761974d77d4b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -358,11 +358,6 @@ def __new__(cls, data=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - if data is None and freq is None \ - and com._any_none(periods, start, end): - raise ValueError("Must provide freq argument if no data is " - "supplied") - # if dtype has an embedded tz, capture it if dtype is not None: try: @@ -377,9 +372,13 @@ def __new__(cls, data=None, pass if data is None: - return cls._generate(start, end, periods, name, freq, - tz=tz, normalize=normalize, closed=closed, - ambiguous=ambiguous) + if freq is None and com._any_none(periods, start, end): + msg = 'Must provide freq argument if no data is supplied' + raise ValueError(msg) + else: + return cls._generate(start, end, periods, name, freq, tz=tz, + normalize=normalize, closed=closed, + ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -2590,11 +2589,6 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, """ Return a fixed frequency DatetimeIndex. - Of the three parameters `start`, `end`, `periods`, and `freq` exactly - three must be specified. If `freq` is omitted, the resulting DatetimeIndex - will have `periods` linearly spaced elements between `start` and `end` - (closed on both sides). - Parameters ---------- start : str or datetime-like, optional @@ -2628,9 +2622,20 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, See Also -------- pandas.DatetimeIndex : An immutable container for datetimes. + pandas.timedelta_range : Return a fixed frequency TimedeltaIndex. pandas.period_range : Return a fixed frequency PeriodIndex. pandas.interval_range : Return a fixed frequency IntervalIndex. + Notes + ----- + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``DatetimeIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). + + To learn more about the frequency strings, please see `this link + <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. + Examples -------- **Specifying the values** @@ -2769,8 +2774,10 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. Specifying ``freq`` is a requirement + for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not + desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. @@ -2779,6 +2786,9 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, ------- rng : DatetimeIndex """ + if freq is None: + msg = 'freq must be specified for bdate_range; use date_range instead' + raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 766ac7b14120e..408a8cc435b63 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -6,7 +6,8 @@ from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex from pandas.core.dtypes.dtypes import IntervalDtype -from pandas.core.dtypes.cast import maybe_convert_platform, find_common_type +from pandas.core.dtypes.cast import ( + maybe_convert_platform, find_common_type, maybe_downcast_to_dtype) from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, @@ -1465,8 +1466,13 @@ def interval_range(start=None, end=None, periods=None, freq=None, Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``IntervalIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end``, inclusively. + + To learn more about datetime-like frequency strings, please see `this link + <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- @@ -1505,6 +1511,14 @@ def interval_range(start=None, end=None, periods=None, freq=None, (2017-03-01, 2017-04-01]] closed='right', dtype='interval[datetime64[ns]]') + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.interval_range(start=0, end=6, periods=4) + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]] + closed='right', + dtype='interval[float64]') + The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. @@ -1516,19 +1530,21 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - start = com._maybe_box_datetimelike(start) end = com._maybe_box_datetimelike(end) - endpoint = next(com._not_none(start, end)) + endpoint = start if start is not None else end + + if freq is None and com._any_none(periods, start, end): + freq = 1 if is_number(endpoint) else 'D' + + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, and ' + 'freq, exactly three must be specified') if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' raise ValueError(msg.format(start=start)) - - if not _is_valid_endpoint(end): + elif not _is_valid_endpoint(end): msg = 'end must be numeric or datetime-like, got {end}' raise ValueError(msg.format(end=end)) @@ -1538,8 +1554,7 @@ def interval_range(start=None, end=None, periods=None, freq=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - freq = freq or (1 if is_number(endpoint) else 'D') - if not is_number(freq): + if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError: @@ -1552,28 +1567,34 @@ def interval_range(start=None, end=None, periods=None, freq=None, _is_type_compatible(end, freq)]): raise TypeError("start, end, freq need to be type compatible") + # +1 to convert interval count to breaks count (n breaks = n-1 intervals) + if periods is not None: + periods += 1 + if is_number(endpoint): + # compute the period/start/end if unspecified (at most one) if periods is None: - periods = int((end - start) // freq) - - if start is None: - start = end - periods * freq - - # force end to be consistent with freq (lower if freq skips over end) - end = start + periods * freq - - # end + freq for inclusive endpoint - breaks = np.arange(start, end + freq, freq) - elif isinstance(endpoint, Timestamp): - # add one to account for interval endpoints (n breaks = n-1 intervals) - if periods is not None: - periods += 1 - breaks = date_range(start=start, end=end, periods=periods, freq=freq) + periods = int((end - start) // freq) + 1 + elif start is None: + start = end - (periods - 1) * freq + elif end is None: + end = start + (periods - 1) * freq + + # force end to be consistent with freq (lower if freq skips end) + if freq is not None: + end -= end % freq + + breaks = np.linspace(start, end, periods) + if all(is_integer(x) for x in com._not_none(start, end, freq)): + # np.linspace always produces float output + breaks = maybe_downcast_to_dtype(breaks, 'int64') else: - # add one to account for interval endpoints (n breaks = n-1 intervals) - if periods is not None: - periods += 1 - breaks = timedelta_range(start=start, end=end, periods=periods, - freq=freq) + # delegate to the appropriate range function + if isinstance(endpoint, Timestamp): + range_func = date_range + else: + range_func = timedelta_range + + breaks = range_func(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6b278fc35c831..9707d19953418 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -225,13 +225,13 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - if data is None and freq is None: - raise ValueError("Must provide freq argument if no data is " - "supplied") - if data is None: - return cls._generate(start, end, periods, name, freq, - closed=closed) + if freq is None and com._any_none(periods, start, end): + msg = 'Must provide freq argument if no data is supplied' + raise ValueError(msg) + else: + return cls._generate(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -266,10 +266,10 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, return cls._simple_new(data, name=name, freq=freq) @classmethod - def _generate(cls, start, end, periods, name, offset, closed=None): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and ' - 'periods, exactly two must be specified') + def _generate(cls, start, end, periods, name, freq, closed=None): + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') if start is not None: start = Timedelta(start) @@ -295,8 +295,11 @@ def _generate(cls, start, end, periods, name, offset, closed=None): else: raise ValueError("Closed has to be either 'left', 'right' or None") - index = _generate_regular_range(start, end, periods, offset) - index = cls._simple_new(index, name=name, freq=offset) + if freq is not None: + index = _generate_regular_range(start, end, periods, freq) + index = cls._simple_new(index, name=name, freq=freq) + else: + index = to_timedelta(np.linspace(start.value, end.value, periods)) if not left_closed: index = index[1:] @@ -1046,7 +1049,7 @@ def _generate_regular_range(start, end, periods, offset): return data -def timedelta_range(start=None, end=None, periods=None, freq='D', +def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default @@ -1074,8 +1077,10 @@ def timedelta_range(start=None, end=None, periods=None, freq='D', Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. @@ -1102,6 +1107,17 @@ def timedelta_range(start=None, end=None, periods=None, freq='D', TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') + + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) + TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', + '5 days 00:00:00'], + dtype='timedelta64[ns]', freq=None) """ + if freq is None and com._any_none(periods, start, end): + freq = 'D' + return TimedeltaIndex(start=start, end=end, periods=periods, freq=freq, name=name, closed=closed) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3fb088329f225..193804b66395b 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -361,6 +361,10 @@ def test_constructor(self): with tm.assert_raises_regex(TypeError, msg): bdate_range('2011-1-1', '2012-1-1', 'B') + msg = 'freq must be specified for bdate_range; use date_range instead' + with tm.assert_raises_regex(TypeError, msg): + bdate_range(START, END, periods=10, freq=None) + def test_naive_aware_conflicts(self): naive = bdate_range(START, END, freq=BDay(), tz=None) aware = bdate_range(START, END, freq=BDay(), tz="Asia/Hong_Kong") diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 203e8e3128edc..0fadfcf0c7f28 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -6,9 +6,9 @@ from pandas import ( Interval, IntervalIndex, Timestamp, Timedelta, DateOffset, interval_range, date_range, timedelta_range) +from pandas.core.dtypes.common import is_integer from pandas.tseries.offsets import Day import pandas.util.testing as tm -import pandas as pd @pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) @@ -23,200 +23,198 @@ def name(request): class TestIntervalRange(object): - def test_construction_from_numeric(self, closed, name): - # combinations of start/end/periods without freq - expected = IntervalIndex.from_breaks( - np.arange(0, 6), name=name, closed=closed) - - result = interval_range(start=0, end=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=5, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with freq - expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)], - name=name, closed=closed) - - result = interval_range(start=0, end=6, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=6, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)], - name=name, closed=closed) - result = interval_range(start=0, end=4, freq=1.5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('tz', [None, 'US/Eastern']) - def test_construction_from_timestamp(self, closed, name, tz): - # combinations of start/end/periods without freq - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-06', tz=tz) - breaks = date_range(start=start, end=end) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with fixed freq - freq = '2D' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-07', tz=tz) - breaks = date_range(start=start, end=end, freq=freq) + @pytest.mark.parametrize('freq, periods', [ + (1, 100), (2.5, 40), (5, 20), (25, 4)]) + def test_constructor_numeric(self, closed, name, freq, periods): + start, end = 0, 100 + breaks = np.arange(101, step=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timestamp('2017-01-08', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed) tm.assert_index_equal(result, expected) - # combinations of start/end/periods with non-fixed freq - freq = 'M' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-12-31', tz=tz) + @pytest.mark.parametrize('tz', [None, 'US/Eastern']) + @pytest.mark.parametrize('freq, periods', [ + ('D', 364), ('2D', 182), ('22D18H', 16), ('M', 11)]) + def test_constructor_timestamp(self, closed, name, freq, periods, tz): + start, end = Timestamp('20180101', tz=tz), Timestamp('20181231', tz=tz) breaks = date_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=11, freq=freq, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=11, freq=freq, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timestamp('2018-01-15', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) + # GH 20976: linspace behavior defined from start/end/periods + if not breaks.freq.isAnchored() and tz is None: + # matches expected only for non-anchored offsets and tz naive + # (anchored/DST transitions cause unequal spacing in expected) + result = interval_range(start=start, end=end, periods=periods, + name=name, closed=closed) + tm.assert_index_equal(result, expected) - def test_construction_from_timedelta(self, closed, name): - # combinations of start/end/periods without freq - start, end = Timedelta('1 day'), Timedelta('6 days') - breaks = timedelta_range(start=start, end=end) + @pytest.mark.parametrize('freq, periods', [ + ('D', 100), ('2D12H', 40), ('5D', 20), ('25D', 4)]) + def test_constructor_timedelta(self, closed, name, freq, periods): + start, end = Timedelta('0 days'), Timedelta('100 days') + breaks = timedelta_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=5, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=5, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # combinations of start/end/periods with fixed freq - freq = '2D' - start, end = Timedelta('1 day'), Timedelta('7 days') - breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) + @pytest.mark.parametrize('start, end, freq, expected_endpoint', [ + (0, 10, 3, 9), + (Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')), + (Timestamp('2018-01-01'), + Timestamp('2018-02-09'), + 'MS', + Timestamp('2018-02-01')), + (Timestamp('2018-01-01', tz='US/Eastern'), + Timestamp('2018-01-20', tz='US/Eastern'), + '5D12H', + Timestamp('2018-01-17 12:00:00', tz='US/Eastern'))]) + def test_early_truncation(self, start, end, freq, expected_endpoint): + # index truncates early if freq causes end to be skipped + result = interval_range(start=start, end=end, freq=freq) + result_endpoint = result.right[-1] + assert result_endpoint == expected_endpoint + + @pytest.mark.parametrize('start, mid, end', [ + (Timestamp('2018-03-10', tz='US/Eastern'), + Timestamp('2018-03-10 23:30:00', tz='US/Eastern'), + Timestamp('2018-03-12', tz='US/Eastern')), + (Timestamp('2018-11-03', tz='US/Eastern'), + Timestamp('2018-11-04 00:30:00', tz='US/Eastern'), + Timestamp('2018-11-05', tz='US/Eastern'))]) + def test_linspace_dst_transition(self, start, mid, end): + # GH 20976: linspace behavior defined from start/end/periods + # accounts for the hour gained/lost during DST transition + result = interval_range(start=start, end=end, periods=2) + expected = IntervalIndex.from_breaks([start, mid, end]) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timedelta('7 days 1 hour') - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('freq', [2, 2.0]) + @pytest.mark.parametrize('end', [10, 10.0]) + @pytest.mark.parametrize('start', [0, 0.0]) + def test_float_subtype(self, start, end, freq): + # Has float subtype if any of start/end/freq are float, even if all + # resulting endpoints can safely be upcast to integers + + # defined from start/end/freq + index = interval_range(start=start, end=end, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + end + freq) else 'float64' + assert result == expected + + # defined from start/periods/freq + index = interval_range(start=start, periods=5, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + freq) else 'float64' + assert result == expected + + # defined from end/periods/freq + index = interval_range(end=end, periods=5, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(end + freq) else 'float64' + assert result == expected + + # GH 20976: linspace behavior defined from start/end/periods + index = interval_range(start=start, end=end, periods=5) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + end) else 'float64' + assert result == expected def test_constructor_coverage(self): # float value for periods - expected = pd.interval_range(start=0, periods=10) - result = pd.interval_range(start=0, periods=10.5) + expected = interval_range(start=0, periods=10) + result = interval_range(start=0, periods=10.5) tm.assert_index_equal(result, expected) # equivalent timestamp-like start/end start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15') - expected = pd.interval_range(start=start, end=end) + expected = interval_range(start=start, end=end) - result = pd.interval_range(start=start.to_pydatetime(), - end=end.to_pydatetime()) + result = interval_range(start=start.to_pydatetime(), + end=end.to_pydatetime()) tm.assert_index_equal(result, expected) - result = pd.interval_range(start=start.asm8, end=end.asm8) + result = interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timestamp equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1), DateOffset(days=1)] for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) + result = interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) # equivalent timedelta-like start/end start, end = Timedelta(days=1), Timedelta(days=10) - expected = pd.interval_range(start=start, end=end) + expected = interval_range(start=start, end=end) - result = pd.interval_range(start=start.to_pytimedelta(), - end=end.to_pytimedelta()) + result = interval_range(start=start.to_pytimedelta(), + end=end.to_pytimedelta()) tm.assert_index_equal(result, expected) - result = pd.interval_range(start=start.asm8, end=end.asm8) + result = interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timedelta equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)] for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) + result = interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) def test_errors(self): # not enough params - msg = ('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') + msg = ('Of the four parameters: start, end, periods, and freq, ' + 'exactly three must be specified') with tm.assert_raises_regex(ValueError, msg): interval_range(start=0) @@ -232,7 +230,7 @@ def test_errors(self): # too many params with tm.assert_raises_regex(ValueError, msg): - interval_range(start=0, end=5, periods=6) + interval_range(start=0, end=5, periods=6, freq=1.5) # mixed units msg = 'start, end, freq need to be type compatible' diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py index 784ef845fea10..87dff74cd04d7 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -1,9 +1,9 @@ +import pytest import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.tseries.offsets import Day, Second from pandas import to_timedelta, timedelta_range -from pandas.util.testing import assert_frame_equal class TestTimedeltas(object): @@ -46,12 +46,20 @@ def test_timedelta_range(self): df.index = pd.timedelta_range(start='0s', periods=10, freq='s') expected = df.loc[pd.Timedelta('0s'):, :] result = df.loc['0s':, :] - assert_frame_equal(expected, result) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize('periods, freq', [ + (3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')]) + def test_linspace_behavior(self, periods, freq): + # GH 20976 + result = timedelta_range(start='0 days', end='4 days', periods=periods) + expected = timedelta_range(start='0 days', end='4 days', freq=freq) + tm.assert_index_equal(result, expected) def test_errors(self): # not enough params - msg = ('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') + msg = ('Of the four parameters: start, end, periods, and freq, ' + 'exactly three must be specified') with tm.assert_raises_regex(ValueError, msg): timedelta_range(start='0 days') @@ -66,4 +74,4 @@ def test_errors(self): # too many params with tm.assert_raises_regex(ValueError, msg): - timedelta_range(start='0 days', end='5 days', periods=10) + timedelta_range(start='0 days', end='5 days', periods=10, freq='H')
- [X] closes #20976 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Summary: - added linspace behavior support to `timedelta_range` - added linspace behavior support to `interval_range` - refactored `test_interval_range.py` to use `parametrize`; split up some tests - added a check to `bdate_range` to ensure that `freq` is specified - doesn't really make sense to support linspace behavior since `bdate_range` implies a frequency - same underlying code as `date_range`; could get linspace behavior by overriding `freq` to `None` - updated and cleaned docstrings for all `*_range` functions except `period_range`
https://api.github.com/repos/pandas-dev/pandas/pulls/21009
2018-05-11T05:09:16Z
2018-05-11T11:53:49Z
2018-05-11T11:53:49Z
2018-05-11T14:50:46Z
MyPy cleanup and absolute imports in pandas.core.dtypes.common
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index ce87c0a8b0c5a..30949ca6d1d6b 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -195,13 +195,13 @@ def __setitem__(self, key, value): ) def __len__(self): + # type: () -> int """Length of this array Returns ------- length : int """ - # type: () -> int raise AbstractMethodError(self) def __iter__(self): diff --git a/pandas/core/base.py b/pandas/core/base.py index c331ead8d2fef..6625a3bbe97d7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -788,6 +788,7 @@ def base(self): @property def _ndarray_values(self): + # type: () -> np.ndarray """The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily @@ -795,7 +796,6 @@ def _ndarray_values(self): - categorical -> codes """ - # type: () -> np.ndarray if is_extension_array_dtype(self): return self.values._ndarray_values return self.values diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c45838e6040a9..05f82c67ddb8b 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -5,17 +5,19 @@ PY3, PY36) from pandas._libs import algos, lib from pandas._libs.tslibs import conversion -from .dtypes import (CategoricalDtype, CategoricalDtypeType, - DatetimeTZDtype, DatetimeTZDtypeType, - PeriodDtype, PeriodDtypeType, - IntervalDtype, IntervalDtypeType, - ExtensionDtype, PandasExtensionDtype) -from .generic import (ABCCategorical, ABCPeriodIndex, - ABCDatetimeIndex, ABCSeries, - ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, - ABCIndexClass, ABCDateOffset) -from .inference import is_string_like, is_list_like -from .inference import * # noqa +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, + DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype, + IntervalDtypeType, ExtensionDtype, PandasExtensionDtype) +from pandas.core.dtypes.generic import ( + ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, + ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass, + ABCDateOffset) +from pandas.core.dtypes.inference import ( # noqa:F401 + is_bool, is_integer, is_hashable, is_iterator, is_float, + is_dict_like, is_scalar, is_string_like, is_list_like, is_number, + is_file_like, is_re, is_re_compilable, is_sequence, is_nested_list_like, + is_named_tuple, is_array_like, is_decimal, is_complex, is_interval) _POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
Starting the conversation towards #14468 There were a couple misplaced annotations below docstrings that were causing mypy to choke when running: ```bash mypy pandas --ignore-missing-imports ``` A starred import in pandas.core.dtypes.common was responsible for a lot of complaints from mypy. These were being intentionally suppressed by flake8 but I figure it makes sense to clean up and convert into absolute imports
https://api.github.com/repos/pandas-dev/pandas/pulls/21008
2018-05-11T02:37:51Z
2018-06-23T16:12:19Z
2018-06-23T16:12:19Z
2018-06-23T16:12:23Z
DOC: cleanup of v0.23.0.txt
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..a262a85722329 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -519,7 +519,7 @@ Other Enhancements - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) - :class:`pandas.tseries.api.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). -- :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 +- :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5 - Updated :meth:`DataFrame.to_gbq` and :meth:`pandas.read_gbq` signature and documentation to reflect changes from the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ library. (:issue:`20564`) @@ -569,9 +569,9 @@ Previous Behavior (and current behavior if on Python < 3.6): .. code-block:: ipython In [1]: pd.Series({'Income': 2000, - ... 'Expenses': -1500, - ... 'Taxes': -200, - ... 'Net result': 300}) + 'Expenses': -1500, + 'Taxes': -200, + 'Net result': 300}) Expenses -1500 Income 2000 Net result 300 @@ -806,7 +806,7 @@ Extraction of matching patterns from strings By default, extracting matching patterns from strings with :func:`str.extract` used to return a ``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was -extracted``). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless +extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless ``expand`` is set to ``False``. Finallay, ``None`` was an accepted value for the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`) @@ -917,9 +917,9 @@ Datetimelike API Changes - ``pandas.tseries.frequencies.get_freq_group()`` and ``pandas.tseries.frequencies.DAYS`` are removed from the public API (:issue:`18034`) - :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`) - :attr:`Series.first` and :attr:`DataFrame.first` will now raise a ``TypeError`` - rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`). -- :attr:`Series.last` and :attr:`DateFrame.last` will now raise a ``TypeError`` - rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`). + rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex` (:issue:`20725`). +- :attr:`Series.last` and :attr:`DataFrame.last` will now raise a ``TypeError`` + rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex` (:issue:`20725`). - Restricted ``DateOffset`` keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`, :issue:`18226`). - :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with ``freq=None``, addition or subtraction of integer-dtyped array or ``Index`` will raise ``NullFrequencyError`` instead of ``TypeError`` (:issue:`19895`) @@ -1364,7 +1364,7 @@ Reshaping - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) - Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) -- Bug in :class:`Series` constructor with ``Categorical`` where a ```ValueError`` is not raised when an index of different length is given (:issue:`19342`) +- Bug in :class:`Series` constructor with ``Categorical`` where a ``ValueError`` is not raised when an index of different length is given (:issue:`19342`) - Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`) - Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`) - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`)
Cleanup of some errors in ``v.0.23.0.txt``
https://api.github.com/repos/pandas-dev/pandas/pulls/21007
2018-05-10T21:56:35Z
2018-05-11T06:49:14Z
2018-05-11T06:49:14Z
2018-05-18T18:05:14Z
Whatsnew Typo Correction
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 750227cd59f26..3f7bc0e8d8c3f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -555,8 +555,8 @@ If installed, we now require: .. _whatsnew_0230.api_breaking.dict_insertion_order: -Instantation from dicts preserves dict insertion order for python 3.6+ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Instantiation from dicts preserves dict insertion order for python 3.6+ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Until Python 3.6, dicts in Python had no formally defined ordering. For Python version 3.6 and later, dicts are ordered by insertion order, see
https://api.github.com/repos/pandas-dev/pandas/pulls/21006
2018-05-10T19:56:55Z
2018-05-10T19:59:45Z
2018-05-10T19:59:45Z
2018-05-14T21:11:03Z
DOC: Added 0.23.1 whatsnew template
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt new file mode 100644 index 0000000000000..5c9c3e2931bd9 --- /dev/null +++ b/doc/source/whatsnew/v0.23.1.txt @@ -0,0 +1,82 @@ +.. _whatsnew_0231: + +v0.23.1 +------- + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. contents:: What's new in v0.23.1 + :local: + :backlinks: none + +.. _whatsnew_0231.enhancements: + +New features +~~~~~~~~~~~~ + + +.. _whatsnew_0231.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- + +.. _whatsnew_0231.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- + +Documentation Changes +~~~~~~~~~~~~~~~~~~~~~ + +- +- + +.. _whatsnew_0231.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +- +- + +Conversion +^^^^^^^^^^ + +- +- + +Indexing +^^^^^^^^ + +- +- + +I/O +^^^ + +- +- + +Plotting +^^^^^^^^ + +- +- + +Reshaping +^^^^^^^^^ + +- +- + +Categorical +^^^^^^^^^^^ + +-
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21001
2018-05-10T15:33:15Z
2018-05-16T03:00:32Z
2018-05-16T03:00:32Z
2018-06-12T14:33:51Z
PERF: removed coercion to int64 for arrays of ints in Categorical.from_codes
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 0ffd5f881d626..ae1d7029217a4 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -51,6 +51,7 @@ def setup(self): self.values_some_nan = list(np.tile(self.categories + [np.nan], N)) self.values_all_nan = [np.nan] * len(self.values) + self.values_all_int8 = np.ones(N, 'int8') def time_regular(self): pd.Categorical(self.values, self.categories) @@ -70,6 +71,9 @@ def time_with_nan(self): def time_all_nan(self): pd.Categorical(self.values_all_nan) + def time_from_codes_all_int8(self): + pd.Categorical.from_codes(self.values_all_int8, self.categories) + class ValueCounts(object): diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d10d51352d0e4..99b931db99c2c 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1079,6 +1079,7 @@ Performance Improvements - Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) - Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifiested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) +- Improved performance of :func:`pandas.core.arrays.Categorical.from_codes` (:issue:`18501`) .. _whatsnew_0230.docs: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f91782459df67..abcb9ae3494b5 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -578,7 +578,7 @@ def from_codes(cls, codes, categories, ordered=False): unordered. """ try: - codes = np.asarray(codes, np.int64) + codes = coerce_indexer_dtype(np.asarray(codes), categories) except (ValueError, TypeError): raise ValueError( "codes need to be convertible to an arrays of integers")
- [x] closes #18501 - [ ] tests added / **passed** - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ``` In [3]: arr = np.ones(10000000,dtype='int8') # master In [4]: %timeit pd.Categorical.from_codes(arr, ['foo', 'bar']) 44.2 ms ± 545 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) # after patch In [4]: %timeit pd.Categorical.from_codes(arr, ['foo', 'bar']) 9 ms ± 54.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ``` ``` before after ratio [6d5d7015] [fb1f7b84] 9.24±1ms 9.94±0.6ms 1.08 categoricals.Concat.time_concat 5.52±0.1ms 5.41±0.05ms 0.98 categoricals.Concat.time_union 32.0±0.3ms 32.3±0.3ms 1.01 categoricals.Constructor.time_all_nan 1.32±0.02ms 1.28±0.01ms 0.97 categoricals.Constructor.time_datetimes 1.26±0.01ms 1.29±0.02ms 1.02 categoricals.Constructor.time_datetimes_with_nat 354±3μs 349±7μs 0.99 categoricals.Constructor.time_fastpath 20.0±0.08ms 20.1±0.3ms 1.01 categoricals.Constructor.time_regular 185±1ms 186±0.6ms 1.01 categoricals.Constructor.time_with_nan 10.1ms 10.1ms 0.99 categoricals.Isin.time_isin_categorical('int64') 10.7±0.08ms 10.8±0.07ms 1.00 categoricals.Isin.time_isin_categorical('object') 9.11±0.1ms 9.04±0.2ms 0.99 categoricals.Rank.time_rank_int 9.33±0.1ms 9.37±0.1ms 1.00 categoricals.Rank.time_rank_int_cat 9.13±0.1ms 8.97±0.05ms 0.98 categoricals.Rank.time_rank_int_cat_ordered 141±0.9ms 136±1ms 0.97 categoricals.Rank.time_rank_string 11.2±0.2ms 11.1±0.1ms 0.99 categoricals.Rank.time_rank_string_cat 9.04±0.1ms 9.23±0.1ms 1.02 categoricals.Rank.time_rank_string_cat_ordered 592±5μs 586±3μs 0.99 categoricals.Repr.time_rendering 32.8±2ms 28.4±0.6ms ~0.86 categoricals.SetCategories.time_set_categories 31.8±2ms 29.6±0.1ms 0.93 categoricals.ValueCounts.time_value_counts(False) 30.7±0.1ms 29.3±0.2ms 0.96 categoricals.ValueCounts.time_value_counts(True) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21000
2018-05-10T14:38:06Z
2018-05-15T02:20:05Z
2018-05-15T02:20:05Z
2018-05-15T02:20:06Z
API: Added axis to take
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e8f74cf58a262..88bc497f9f22d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1448,7 +1448,7 @@ def func(arr, indexer, out, fill_value=np.nan): return func -def take(arr, indices, allow_fill=False, fill_value=None): +def take(arr, indices, axis=0, allow_fill=False, fill_value=None): """ Take elements from an array. @@ -1461,6 +1461,8 @@ def take(arr, indices, allow_fill=False, fill_value=None): to an ndarray. indices : sequence of integers Indices to be taken. + axis : int, default 0 + The axis over which to select values. allow_fill : bool, default False How to handle negative values in `indices`. @@ -1476,6 +1478,9 @@ def take(arr, indices, allow_fill=False, fill_value=None): This may be ``None``, in which case the default NA value for the type (``self.dtype.na_value``) is used. + For multi-dimensional `arr`, each *element* is filled with + `fill_value`. + Returns ------- ndarray or ExtensionArray @@ -1529,10 +1534,11 @@ def take(arr, indices, allow_fill=False, fill_value=None): if allow_fill: # Pandas style, -1 means NA validate_indices(indices, len(arr)) - result = take_1d(arr, indices, allow_fill=True, fill_value=fill_value) + result = take_1d(arr, indices, axis=axis, allow_fill=True, + fill_value=fill_value) else: # NumPy style - result = arr.take(indices) + result = arr.take(indices, axis=axis) return result diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 2b78c91f9dac5..9ab147edb8d1b 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -447,6 +447,29 @@ def test_2d_datetime64(self): expected[:, [2, 4]] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected) + def test_take_axis_0(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1]) + expected = np.array([[0, 1, 2], [9, 10, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0) + expected = np.array([[0, 1, 2], [0, 0, 0]]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_axis_1(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1], axis=1) + expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], axis=1, allow_fill=True, + fill_value=0) + expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]]) + tm.assert_numpy_array_equal(result, expected) + class TestExtensionTake(object): # The take method found in pd.api.extensions
- [x] closes #20932 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` One thing that's worrisome here: for N-D arrays, the `fill_value` argument is set for each element. In the case of IPArray, this is fine, since my storage NA-value is (0, 0), so I would pass `fill_value=0`. But in general, when people are using an N-D array to back a 1-D column, that won't necessarily work. But at that point, maybe we recommend people implement take on their own.
https://api.github.com/repos/pandas-dev/pandas/pulls/20999
2018-05-10T14:27:29Z
2018-05-10T18:25:31Z
2018-05-10T18:25:30Z
2018-05-10T18:25:35Z
ENH: Raise useful error when iterating a Window
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..8d20d7b6b78bd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -966,6 +966,7 @@ Other API Changes - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). - :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) - A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than a ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) +- Rolling and Expanding types raise ``NotImplementedError`` upon iteration (:issue:`11704`). .. _whatsnew_0230.deprecations: diff --git a/pandas/core/window.py b/pandas/core/window.py index 5fd054b1930e6..015e7f7913ed0 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -181,6 +181,10 @@ def __unicode__(self): return "{klass} [{attrs}]".format(klass=self._window_type, attrs=','.join(attrs)) + def __iter__(self): + url = 'https://github.com/pandas-dev/pandas/issues/11704' + raise NotImplementedError('See issue #11704 {url}'.format(url=url)) + def _get_index(self, index=None): """ Return index as ndarrays diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 93f637a561718..d8e90ae0e1b35 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -512,6 +512,14 @@ def test_multi_index_names(self): tm.assert_index_equal(result.columns, df.columns) assert result.index.names == [None, '1', '2'] + @pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.rolling(2)) + class TestExpanding(Base): @@ -590,6 +598,14 @@ def test_missing_minp_zero(self): expected = pd.Series([np.nan]) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.expanding(2)) + class TestEWM(Base):
Until Issue #11704 is completed, raise a NotImplementedError to provide a more clear error message when attempting to iterate over a Rolling or Expanding window.
https://api.github.com/repos/pandas-dev/pandas/pulls/20996
2018-05-09T22:12:22Z
2018-05-12T18:38:15Z
2018-05-12T18:38:15Z
2018-05-12T18:38:20Z
DEPR: DataFrame dropna accepting multiple axes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..c47c7878843c2 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1005,6 +1005,7 @@ Deprecations - Setting ``PeriodIndex.freq`` (which was not guaranteed to work correctly) is deprecated. Use :meth:`PeriodIndex.asfreq` instead (:issue:`20678`) - ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`) - The previous default behavior of negative indices in ``Categorical.take`` is deprecated. In a future version it will change from meaning missing values to meaning positional indices from the right. The future behavior is consistent with :meth:`Series.take` (:issue:`20664`). +- Passing multiple axes to the ``axis`` parameter in :func:`DataFrame.dropna` has been deprecated and will be removed in a future version (:issue:`20987`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d43fb95a70555..0437c479c9d81 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4168,14 +4168,15 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, Parameters ---------- - axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof + axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. - Pass tuple or list to drop on multiple axes. + .. deprecated:: 0.23.0: Pass tuple or list to drop on multiple + axes. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. @@ -4259,6 +4260,11 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): + # GH20987 + msg = ("supplying multiple axes to axis is deprecated and " + "will be removed in a future version.") + warnings.warn(msg, FutureWarning, stacklevel=2) + result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 668eae21c664f..f1113fd6debf2 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -174,8 +174,12 @@ def test_dropna_multiple_axes(self): [np.nan, np.nan, np.nan, np.nan], [7, np.nan, 8, 9]]) cp = df.copy() - result = df.dropna(how='all', axis=[0, 1]) - result2 = df.dropna(how='all', axis=(0, 1)) + + # GH20987 + with tm.assert_produces_warning(FutureWarning): + result = df.dropna(how='all', axis=[0, 1]) + with tm.assert_produces_warning(FutureWarning): + result2 = df.dropna(how='all', axis=(0, 1)) expected = df.dropna(how='all').dropna(how='all', axis=1) assert_frame_equal(result, expected) @@ -183,7 +187,8 @@ def test_dropna_multiple_axes(self): assert_frame_equal(df, cp) inp = df.copy() - inp.dropna(how='all', axis=(0, 1), inplace=True) + with tm.assert_produces_warning(FutureWarning): + inp.dropna(how='all', axis=(0, 1), inplace=True) assert_frame_equal(inp, expected) def test_dropna_tz_aware_datetime(self):
Deprecates multiple axes passing to `dropna`. I added the whatsnew to 0.23.0 for now, I can move this to 0.24.0 later if preferred once the whatsnew doc is created. - [x] closes #20987 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20995
2018-05-09T21:41:01Z
2018-05-10T10:32:10Z
2018-05-10T10:32:10Z
2018-05-10T21:02:01Z
DOC: update Readme recommended tags for new contributors
diff --git a/README.md b/README.md index cd2cb99992977..3c8fe57400099 100644 --- a/README.md +++ b/README.md @@ -233,7 +233,7 @@ All contributions, bug reports, bug fixes, documentation improvements, enhanceme A detailed overview on how to contribute can be found in the **[contributing guide.](https://pandas.pydata.org/pandas-docs/stable/contributing.html)** -If you are simply looking to start working with the pandas codebase, navigate to the [GitHub “issues” tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [Difficulty Novice](https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22) where you could start out. +If you are simply looking to start working with the pandas codebase, navigate to the [GitHub “issues” tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out. You can also triage issues which may include reproducing bug reports, or asking for vital information such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to pandas on CodeTriage](https://www.codetriage.com/pandas-dev/pandas). diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 58f097c2fc5f3..e9939250052f1 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -17,8 +17,8 @@ If you are brand new to pandas or open-source development, we recommend going through the `GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ to find issues that interest you. There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ -and `Difficulty Novice -<https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ +and `good first issue +<https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open>`_ where you could start out. Once you've found an interesting issue, you can return here to get your development environment setup.
This changes the recommended tag for new contributors from `Difficulty Novice` to `good first issue` in both main README.md and /doc/source/contributing.rst. closes #20982
https://api.github.com/repos/pandas-dev/pandas/pulls/20992
2018-05-09T15:16:31Z
2018-05-09T15:19:59Z
2018-05-09T15:19:59Z
2018-05-09T15:19:59Z
COMPAT: 32-bit indexing compat
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 9968d398e9040..741e5553141f7 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -8,7 +8,8 @@ from cpython.slice cimport PySlice_Check import numpy as np cimport numpy as cnp -from numpy cimport ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t +from numpy cimport (ndarray, float64_t, int32_t, + int64_t, uint8_t, uint64_t, intp_t) cnp.import_array() cdef extern from "numpy/arrayobject.h": @@ -183,8 +184,8 @@ cdef class IndexEngine: cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[cnp.uint8_t, ndim=1, cast=True] indexer - ndarray[int64_t, ndim=1] found + ndarray[uint8_t, ndim=1, cast=True] indexer + ndarray[intp_t, ndim=1] found int count indexer = self._get_index_values() == val diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index 6f726dd49f11e..4ea35da0626f3 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -55,8 +55,8 @@ cdef class {{name}}Engine(IndexEngine): cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[cnp.uint8_t, ndim=1, cast=True] indexer - ndarray[int64_t, ndim=1] found + ndarray[uint8_t, ndim=1, cast=True] indexer + ndarray[intp_t, ndim=1] found ndarray[{{ctype}}] values int count = 0
xref #19539
https://api.github.com/repos/pandas-dev/pandas/pulls/20989
2018-05-09T10:52:04Z
2018-05-10T09:57:01Z
2018-05-10T09:57:01Z
2018-05-10T09:57:15Z
BUG: date_range linspace behavior respects tz
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6991bc016868..6f5c180c587bd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -525,7 +525,7 @@ Other Enhancements library. (:issue:`20564`) - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) -- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`) +- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1b5aa3b45f3b5..1d5c2d9a098ed 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -587,10 +587,13 @@ def _generate(cls, start, end, periods, name, freq, if end is not None: end = end.tz_localize(tz).asm8 else: + # Create a linearly spaced date_range in local time + start = start.tz_localize(tz) + end = end.tz_localize(tz) index = tools.to_datetime(np.linspace(start.value, - end.value, periods)) - if tz is not None: - index = index.tz_localize('UTC').tz_convert(tz) + end.value, periods), + utc=True) + index = index.tz_convert(tz) if not left_closed and len(index) and index[0] == start: index = index[1:] diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index bbe9cb65eb1a9..3fb088329f225 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -164,20 +164,39 @@ def test_date_range_ambiguous_arguments(self): def test_date_range_convenience_periods(self): # GH 20808 - rng = date_range('2018-04-24', '2018-04-27', periods=3) - exp = DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', - '2018-04-27 00:00:00'], freq=None) + result = date_range('2018-04-24', '2018-04-27', periods=3) + expected = DatetimeIndex(['2018-04-24 00:00:00', + '2018-04-25 12:00:00', + '2018-04-27 00:00:00'], freq=None) - tm.assert_index_equal(rng, exp) + tm.assert_index_equal(result, expected) # Test if spacing remains linear if tz changes to dst in range - rng = date_range('2018-04-01 01:00:00', '2018-04-01 04:00:00', - tz='Australia/Sydney', periods=3) - exp = DatetimeIndex(['2018-04-01 01:00:00+11:00', - '2018-04-01 02:00:00+11:00', - '2018-04-01 02:00:00+10:00', - '2018-04-01 03:00:00+10:00', - '2018-04-01 04:00:00+10:00'], freq=None) + result = date_range('2018-04-01 01:00:00', + '2018-04-01 04:00:00', + tz='Australia/Sydney', + periods=3) + expected = DatetimeIndex([Timestamp('2018-04-01 01:00:00+1100', + tz='Australia/Sydney'), + Timestamp('2018-04-01 02:00:00+1000', + tz='Australia/Sydney'), + Timestamp('2018-04-01 04:00:00+1000', + tz='Australia/Sydney')]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('start,end,result_tz', [ + ['20180101', '20180103', 'US/Eastern'], + [datetime(2018, 1, 1), datetime(2018, 1, 3), 'US/Eastern'], + [Timestamp('20180101'), Timestamp('20180103'), 'US/Eastern'], + [Timestamp('20180101', tz='US/Eastern'), + Timestamp('20180103', tz='US/Eastern'), 'US/Eastern'], + [Timestamp('20180101', tz='US/Eastern'), + Timestamp('20180103', tz='US/Eastern'), None]]) + def test_date_range_linspacing_tz(self, start, end, result_tz): + # GH 20983 + result = date_range(start, end, periods=3, tz=result_tz) + expected = date_range('20180101', periods=3, freq='D', tz='US/Eastern') + tm.assert_index_equal(result, expected) def test_date_range_businesshour(self): idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
- [x] closes #20983 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (addendum to the v0.23 whatsnew entry for this feature)
https://api.github.com/repos/pandas-dev/pandas/pulls/20988
2018-05-09T06:47:02Z
2018-05-10T10:26:23Z
2018-05-10T10:26:23Z
2018-05-10T15:22:35Z
Consistent Return Structure for Rolling Apply
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6991bc016868..32f7447e5ef77 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1328,6 +1328,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) - Bug in :func:`DataFrame.resample` where ``ffill``, ``bfill``, ``pad``, ``backfill``, ``fillna``, ``interpolate``, and ``asfreq`` were ignoring ``loffset``. (:issue:`20744`) - Bug in :func:`DataFrame.groupby` when applying a function that has mixed data types and the user supplied function can fail on the grouping column (:issue:`20949`) +- Bug in :func:`DataFrameGroupBy.rolling().apply() <pandas.core.window.Rolling.apply>` where operations performed against the associated :class:`DataFrameGroupBy` object could impact the inclusion of the grouped item(s) in the result (:issue:`14013`) Sparse ^^^^^^ diff --git a/pandas/core/window.py b/pandas/core/window.py index d7f9f7c85fbbc..5fd054b1930e6 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -837,11 +837,7 @@ def _apply(self, func, name=None, window=None, center=None, index, indexi = self._get_index(index=index) results = [] for b in blocks: - try: - values = self._prep_values(b.values) - except TypeError: - results.append(b.values.copy()) - continue + values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 304e3d02466a5..93f637a561718 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -105,7 +105,6 @@ def test_attribute_access(self): def tests_skip_nuisance(self): df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'}) - r = df.rolling(window=3) result = r[['A', 'B']].sum() expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9], @@ -113,9 +112,12 @@ def tests_skip_nuisance(self): columns=list('AB')) tm.assert_frame_equal(result, expected) - expected = concat([r[['A', 'B']].sum(), df[['C']]], axis=1) - result = r.sum() - tm.assert_frame_equal(result, expected, check_like=True) + def test_skip_sum_object_raises(self): + df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'}) + r = df.rolling(window=3) + + with tm.assert_raises_regex(TypeError, 'cannot handle this type'): + r.sum() def test_agg(self): df = DataFrame({'A': range(5), 'B': range(0, 10, 2)}) @@ -3174,6 +3176,28 @@ def test_rolling_apply(self, raw): lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) + def test_rolling_apply_mutability(self): + # GH 14013 + df = pd.DataFrame({'A': ['foo'] * 3 + ['bar'] * 3, 'B': [1] * 6}) + g = df.groupby('A') + + mi = pd.MultiIndex.from_tuples([('bar', 3), ('bar', 4), ('bar', 5), + ('foo', 0), ('foo', 1), ('foo', 2)]) + + mi.names = ['A', None] + # Grouped column should not be a part of the output + expected = pd.DataFrame([np.nan, 2., 2.] * 2, columns=['B'], index=mi) + + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + # Call an arbitrary function on the groupby + g.sum() + + # Make sure nothing has been mutated + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + def test_expanding(self): g = self.frame.groupby('A') r = g.expanding()
- [X] closes #14013 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry With the changes made in #20949 it appears possible to fix this bug by suppressing a `catch` block that was internal to Rolling's `_apply`. I'm not sure what the purpose of this catch as it essentially allows values to bypass the applied function...Only one test broke on removal which looked wrong anyway, so I updated it as such
https://api.github.com/repos/pandas-dev/pandas/pulls/20984
2018-05-09T00:07:16Z
2018-05-09T10:24:39Z
2018-05-09T10:24:38Z
2018-09-04T23:49:29Z
Parametrization of indexes/test_base #4
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 377b17d45265c..f4fa547574b9e 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -13,7 +13,7 @@ from pandas.tests.indexes.common import Base from pandas.compat import (range, lrange, lzip, u, - text_type, zip, PY3, PY35, PY36, PYPY) + text_type, zip, PY3, PY35, PY36, PYPY, StringIO) import operator import numpy as np @@ -67,9 +67,9 @@ def generate_index_types(self, skip_index_keys=[]): yield key, index def test_can_hold_identifiers(self): - idx = self.create_index() - key = idx[0] - assert idx._can_hold_identifiers_and_holds_name(key) is True + index = self.create_index() + key = index[0] + assert index._can_hold_identifiers_and_holds_name(key) is True def test_new_axis(self): new_index = self.dateIndex[None, :] @@ -1280,8 +1280,8 @@ def test_get_indexer_strings_raises(self): def test_get_indexer_numeric_index_boolean_target(self): # GH 16877 - numeric_idx = pd.Index(range(4)) - result = numeric_idx.get_indexer([True, False, True]) + numeric_index = pd.Index(range(4)) + result = numeric_index.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @@ -1748,16 +1748,18 @@ def test_indexing_doesnt_change_class(self): assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_)) def test_outer_join_sort(self): - left_idx = Index(np.random.permutation(15)) - right_idx = tm.makeDateIndex(10) + left_index = Index(np.random.permutation(15)) + right_index = tm.makeDateIndex(10) with tm.assert_produces_warning(RuntimeWarning): - result = left_idx.join(right_idx, how='outer') + result = left_index.join(right_index, how='outer') - # right_idx in this case because DatetimeIndex has join precedence over - # Int64Index + # right_index in this case because DatetimeIndex has join precedence + # over Int64Index with tm.assert_produces_warning(RuntimeWarning): - expected = right_idx.astype(object).union(left_idx.astype(object)) + expected = right_index.astype(object).union( + left_index.astype(object)) + tm.assert_index_equal(result, expected) def test_nan_first_take_datetime(self): @@ -1840,228 +1842,230 @@ def test_reindex_no_type_preserve_target_empty_mi(self): assert result.levels[1].dtype.type == np.float64 def test_groupby(self): - idx = Index(range(5)) - groups = idx.groupby(np.array([1, 1, 2, 2, 2])) - exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} - tm.assert_dict_equal(groups, exp) + index = Index(range(5)) + result = index.groupby(np.array([1, 1, 2, 2, 2])) + expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} - def test_equals_op_multiindex(self): + tm.assert_dict_equal(result, expected) + + @pytest.mark.parametrize("mi,expected", [ + (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])), + (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))]) + def test_equals_op_multiindex(self, mi, expected): # GH9785 # test comparisons of multiindex - from pandas.compat import StringIO df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) - tm.assert_numpy_array_equal(df.index == df.index, - np.array([True, True])) - - mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)]) - tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True])) - mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)]) - tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False])) - mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) - with tm.assert_raises_regex(ValueError, "Lengths must match"): - df.index == mi3 - index_a = Index(['foo', 'bar', 'baz']) + result = df.index == mi + tm.assert_numpy_array_equal(result, expected) + + def test_equals_op_multiindex_identify(self): + df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) + + result = df.index == df.index + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", [ + MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]), + Index(['foo', 'bar', 'baz'])]) + def test_equals_op_mismatched_multiindex_raises(self, index): + df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) + with tm.assert_raises_regex(ValueError, "Lengths must match"): - df.index == index_a - tm.assert_numpy_array_equal(index_a == mi3, - np.array([False, False, False])) + df.index == index - def test_conversion_preserves_name(self): - # GH 10875 - i = pd.Index(['01:02:03', '01:02:04'], name='label') - assert i.name == pd.to_datetime(i).name - assert i.name == pd.to_timedelta(i).name + def test_equals_op_index_vs_mi_same_length(self): + mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) + index = Index(['foo', 'bar', 'baz']) - def test_string_index_repr(self): - # py3/py2 repr can differ because of "u" prefix - # which also affects to displayed element size + result = mi == index + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) - if PY3: - coerce = lambda x: x - else: - coerce = unicode # noqa + @pytest.mark.parametrize("dt_conv", [ + pd.to_datetime, pd.to_timedelta]) + def test_dt_conversion_preserves_name(self, dt_conv): + # GH 10875 + index = pd.Index(['01:02:03', '01:02:04'], name='label') + assert index.name == dt_conv(index).name + @pytest.mark.skipif(not PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # ASCII # short - idx = pd.Index(['a', 'bb', 'ccc']) - if PY3: - expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')""" - assert repr(idx) == expected - else: - expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')""" - assert coerce(idx) == expected - + (pd.Index(['a', 'bb', 'ccc']), + u"""Index(['a', 'bb', 'ccc'], dtype='object')"""), # multiple lines - idx = pd.Index(['a', 'bb', 'ccc'] * 10) - if PY3: - expected = u"""\ + (pd.Index(['a', 'bb', 'ccc'] * 10), + u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - dtype='object')""" - - assert repr(idx) == expected - else: - expected = u"""\ -Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object')""" - - assert coerce(idx) == expected - + dtype='object')"""), # truncated - idx = pd.Index(['a', 'bb', 'ccc'] * 100) - if PY3: - expected = u"""\ + (pd.Index(['a', 'bb', 'ccc'] * 100), + u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - dtype='object', length=300)""" + dtype='object', length=300)"""), - assert repr(idx) == expected - else: - expected = u"""\ + # Non-ASCII + # short + (pd.Index([u'あ', u'いい', u'ううう']), + u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう'],\n" + u" dtype='object')")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ',\n" + u" ...\n" + u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr(self, index, expected): + result = repr(index) + assert result == expected + + @pytest.mark.skipif(PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # ASCII + # short + (pd.Index(['a', 'bb', 'ccc']), + u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""), + # multiple lines + (pd.Index(['a', 'bb', 'ccc'] * 10), + u"""\ +Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', + u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', + u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], + dtype='object')"""), + # truncated + (pd.Index(['a', 'bb', 'ccc'] * 100), + u"""\ Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', ... u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object', length=300)""" - - assert coerce(idx) == expected + dtype='object', length=300)"""), + # Non-ASCII # short - idx = pd.Index([u'あ', u'いい', u'ううう']) - if PY3: - expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')""" - assert repr(idx) == expected - else: - expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')""" - assert coerce(idx) == expected - + (pd.Index([u'あ', u'いい', u'ううう']), + u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""), # multiple lines - idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう'],\n" - u" dtype='object')") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object')") - assert coerce(idx) == expected - + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " + u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" + u" dtype='object')")), # truncated - idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ',\n" - u" ...\n" - u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう'],\n" - u" dtype='object', length=300)") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object', length=300)") - - assert coerce(idx) == expected + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" + u" ...\n" + u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " + u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_compat(self, index, expected): + result = unicode(index) # noqa + assert result == expected - # Emable Unicode option ----------------------------------------- + @pytest.mark.skipif(not PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # short + (pd.Index([u'あ', u'いい', u'ううう']), + (u"Index(['あ', 'いい', 'ううう'], " + u"dtype='object')")), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう'],\n" + u" dtype='object')""")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ',\n" + u" ...\n" + u" 'ううう', 'あ', 'いい', 'ううう', 'あ', " + u"'いい', 'ううう', 'あ', 'いい',\n" + u" 'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_with_unicode_option(self, index, expected): + # Enable Unicode option ----------------------------------------- with cf.option_context('display.unicode.east_asian_width', True): + result = repr(index) + assert result == expected - # short - idx = pd.Index([u'あ', u'いい', u'ううう']) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう'], " - u"dtype='object')") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう'], " - u"dtype='object')") - assert coerce(idx) == expected - - # multiple lines - idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう'],\n" - u" dtype='object')""") - - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう'],\n" - u" dtype='object')") - - assert coerce(idx) == expected - - # truncated - idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ',\n" - u" ...\n" - u" 'ううう', 'あ', 'いい', 'ううう', 'あ', " - u"'いい', 'ううう', 'あ', 'いい',\n" - u" 'ううう'],\n" - u" dtype='object', length=300)") - - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう'],\n" - u" dtype='object', length=300)") - - assert coerce(idx) == expected + @pytest.mark.skipif(PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # short + (pd.Index([u'あ', u'いい', u'ううう']), + (u"Index([u'あ', u'いい', u'ううう'], " + u"dtype='object')")), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう'],\n" + u" dtype='object')")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ',\n" + u" ...\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_with_unicode_option_compat(self, index, + expected): + # Enable Unicode option ----------------------------------------- + with cf.option_context('display.unicode.east_asian_width', True): + result = unicode(index) # noqa + assert result == expected @pytest.mark.parametrize('dtype', [np.int64, np.float64]) @pytest.mark.parametrize('delta', [1, 0, -1]) def test_addsub_arithmetic(self, dtype, delta): # GH 8142 delta = dtype(delta) - idx = pd.Index([10, 11, 12], dtype=dtype) - result = idx + delta - expected = pd.Index(idx.values + delta, dtype=dtype) + index = pd.Index([10, 11, 12], dtype=dtype) + result = index + delta + expected = pd.Index(index.values + delta, dtype=dtype) tm.assert_index_equal(result, expected) # this subtraction used to fail - result = idx - delta - expected = pd.Index(idx.values - delta, dtype=dtype) + result = index - delta + expected = pd.Index(index.values - delta, dtype=dtype) tm.assert_index_equal(result, expected) - tm.assert_index_equal(idx + idx, 2 * idx) - tm.assert_index_equal(idx - idx, 0 * idx) - assert not (idx - idx).empty + tm.assert_index_equal(index + index, 2 * index) + tm.assert_index_equal(index - index, 0 * index) + assert not (index - index).empty def test_iadd_preserves_name(self): # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name @@ -2075,14 +2079,14 @@ def test_iadd_preserves_name(self): assert ser.index.name == "foo" def test_cached_properties_not_settable(self): - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) with tm.assert_raises_regex(AttributeError, "Can't set attribute"): - idx.is_unique = False + index.is_unique = False def test_get_duplicates_deprecated(self): - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) with tm.assert_produces_warning(FutureWarning): - idx.get_duplicates() + index.get_duplicates() class TestMixedIntIndex(Base): @@ -2100,43 +2104,42 @@ def create_index(self): return self.mixedIndex def test_argsort(self): - idx = self.create_index() + index = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): - result = idx.argsort() + result = index.argsort() elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): - result = idx.argsort() + result = index.argsort() else: - result = idx.argsort() - expected = np.array(idx).argsort() + result = index.argsort() + expected = np.array(index).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): - idx = self.create_index() + index = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): - result = np.argsort(idx) + result = np.argsort(index) elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): - result = np.argsort(idx) + result = np.argsort(index) else: - result = np.argsort(idx) - expected = idx.argsort() + result = np.argsort(index) + expected = index.argsort() tm.assert_numpy_array_equal(result, expected) def test_copy_name(self): # Check that "name" argument passed at initialization is honoured # GH12309 - idx = self.create_index() + index = self.create_index() - first = idx.__class__(idx, copy=True, name='mario') + first = index.__class__(index, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second - # Not using tm.assert_index_equal() since names differ: - assert idx.equals(first) + tm.assert_index_equal(first, second) assert first.name == 'mario' assert second.name == 'mario' @@ -2154,77 +2157,85 @@ def test_copy_name(self): def test_copy_name2(self): # Check that adding a "name" parameter to the copy is honored # GH14302 - idx = pd.Index([1, 2], name='MyName') - idx1 = idx.copy() - - assert idx.equals(idx1) - assert idx.name == 'MyName' - assert idx1.name == 'MyName' - - idx2 = idx.copy(name='NewName') + index = pd.Index([1, 2], name='MyName') + index1 = index.copy() - assert idx.equals(idx2) - assert idx.name == 'MyName' - assert idx2.name == 'NewName' + tm.assert_index_equal(index, index1) - idx3 = idx.copy(names=['NewName']) + index2 = index.copy(name='NewName') + tm.assert_index_equal(index, index2, check_names=False) + assert index.name == 'MyName' + assert index2.name == 'NewName' - assert idx.equals(idx3) - assert idx.name == 'MyName' - assert idx.names == ['MyName'] - assert idx3.name == 'NewName' - assert idx3.names == ['NewName'] + index3 = index.copy(names=['NewName']) + tm.assert_index_equal(index, index3, check_names=False) + assert index.name == 'MyName' + assert index.names == ['MyName'] + assert index3.name == 'NewName' + assert index3.names == ['NewName'] def test_union_base(self): - idx = self.create_index() - first = idx[3:] - second = idx[:5] + index = self.create_index() + first = index[3:] + second = index[:5] if PY3: - with tm.assert_produces_warning(RuntimeWarning): - # unorderable types - result = first.union(second) - expected = Index(['b', 2, 'c', 0, 'a', 1]) - tm.assert_index_equal(result, expected) + # unorderable types + warn_type = RuntimeWarning else: + warn_type = None + + with tm.assert_produces_warning(warn_type): result = first.union(second) - expected = Index(['b', 2, 'c', 0, 'a', 1]) - tm.assert_index_equal(result, expected) + expected = Index(['b', 2, 'c', 0, 'a', 1]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("klass", [ + np.array, Series, list]) + def test_union_different_type_base(self, klass): # GH 10149 - cases = [klass(second.values) - for klass in [np.array, Series, list]] - for case in cases: - if PY3: - with tm.assert_produces_warning(RuntimeWarning): - # unorderable types - result = first.union(case) - assert tm.equalContents(result, idx) - else: - result = first.union(case) - assert tm.equalContents(result, idx) + index = self.create_index() + first = index[3:] + second = index[:5] + + if PY3: + # unorderable types + warn_type = RuntimeWarning + else: + warn_type = None + + with tm.assert_produces_warning(warn_type): + result = first.union(klass(second.values)) + + assert tm.equalContents(result, index) def test_intersection_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:5] - second = idx[:3] + index = self.create_index() + first = index[:5] + second = index[:3] + result = first.intersection(second) expected = Index([0, 'a', 1]) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("klass", [ + np.array, Series, list]) + def test_intersection_different_type_base(self, klass): # GH 10149 - cases = [klass(second.values) - for klass in [np.array, Series, list]] - for case in cases: - result = first.intersection(case) - assert tm.equalContents(result, second) + index = self.create_index() + first = index[:5] + second = index[:3] + + result = first.intersection(klass(second.values)) + assert tm.equalContents(result, second) def test_difference_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:4] - second = idx[3:] + index = self.create_index() + first = index[:4] + second = index[3:] result = first.difference(second) expected = Index([0, 1, 'a']) @@ -2232,103 +2243,102 @@ def test_difference_base(self): def test_symmetric_difference(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:4] - second = idx[3:] + index = self.create_index() + first = index[:4] + second = index[3:] result = first.symmetric_difference(second) expected = Index([0, 1, 2, 'a', 'c']) tm.assert_index_equal(result, expected) def test_logical_compat(self): - idx = self.create_index() - assert idx.all() == idx.values.all() - assert idx.any() == idx.values.any() - - def test_dropna(self): + index = self.create_index() + assert index.all() == index.values.all() + assert index.any() == index.values.any() + + @pytest.mark.parametrize("how", ['any', 'all']) + @pytest.mark.parametrize("dtype", [ + None, object, 'category']) + @pytest.mark.parametrize("vals,expected", [ + ([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]), + ([1., 2., np.nan, 3.], [1., 2., 3.]), + (['A', 'B', 'C'], ['A', 'B', 'C']), + (['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])]) + def test_dropna(self, how, dtype, vals, expected): # GH 6194 - for dtype in [None, object, 'category']: - idx = pd.Index([1, 2, 3], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - - idx = pd.Index([1., 2., 3.], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.Index(['A', 'B', 'C'], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype) - tm.assert_index_equal(nanidx.dropna(), idx) - - tm.assert_index_equal(nanidx.dropna(how='any'), idx) - tm.assert_index_equal(nanidx.dropna(how='all'), idx) - - idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', - '2011-01-03', pd.NaT]) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days']) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', - '3 days', pd.NaT]) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M') - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], - freq='M') - tm.assert_index_equal(nanidx.dropna(), idx) + index = pd.Index(vals, dtype=dtype) + result = index.dropna(how=how) + expected = pd.Index(expected, dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("how", ['any', 'all']) + @pytest.mark.parametrize("index,expected", [ + (pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']), + pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])), + (pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]), + pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])), + (pd.TimedeltaIndex(['1 days', '2 days', '3 days']), + pd.TimedeltaIndex(['1 days', '2 days', '3 days'])), + (pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]), + pd.TimedeltaIndex(['1 days', '2 days', '3 days'])), + (pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'), + pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')), + (pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'), + pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))]) + def test_dropna_dt_like(self, how, index, expected): + result = index.dropna(how=how) + tm.assert_index_equal(result, expected) + def test_dropna_invalid_how_raises(self): msg = "invalid how option: xxx" with tm.assert_raises_regex(ValueError, msg): pd.Index([1, 2, 3]).dropna(how='xxx') def test_get_combined_index(self): result = _get_combined_index([]) - tm.assert_index_equal(result, Index([])) + expected = Index([]) + tm.assert_index_equal(result, expected) def test_repeat(self): repeats = 2 - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) expected = pd.Index([1, 1, 2, 2, 3, 3]) - result = idx.repeat(repeats) + result = index.repeat(repeats) tm.assert_index_equal(result, expected) + def test_repeat_warns_n_keyword(self): + index = pd.Index([1, 2, 3]) + expected = pd.Index([1, 1, 2, 2, 3, 3]) + with tm.assert_produces_warning(FutureWarning): - result = idx.repeat(n=repeats) - tm.assert_index_equal(result, expected) + result = index.repeat(n=2) - def test_is_monotonic_na(self): - examples = [pd.Index([np.nan]), - pd.Index([np.nan, 1]), - pd.Index([1, 2, np.nan]), - pd.Index(['a', 'b', np.nan]), - pd.to_datetime(['NaT']), - pd.to_datetime(['NaT', '2000-01-01']), - pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), - pd.to_timedelta(['1 day', 'NaT']), ] - for index in examples: - assert not index.is_monotonic_increasing - assert not index.is_monotonic_decreasing - assert not index._is_strictly_monotonic_increasing - assert not index._is_strictly_monotonic_decreasing + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", [ + pd.Index([np.nan]), pd.Index([np.nan, 1]), + pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]), + pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']), + pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), + pd.to_timedelta(['1 day', 'NaT'])]) + def test_is_monotonic_na(self, index): + assert not index.is_monotonic_increasing + assert not index.is_monotonic_decreasing + assert not index._is_strictly_monotonic_increasing + assert not index._is_strictly_monotonic_decreasing def test_repr_summary(self): with cf.option_context('display.max_seq_items', 10): - r = repr(pd.Index(np.arange(1000))) - assert len(r) < 200 - assert "..." in r + result = repr(pd.Index(np.arange(1000))) + assert len(result) < 200 + assert "..." in result - def test_int_name_format(self): + @pytest.mark.parametrize("klass", [Series, DataFrame]) + def test_int_name_format(self, klass): index = Index(['a', 'b', 'c'], name=0) - s = Series(lrange(3), index) - df = DataFrame(lrange(3), index=index) - repr(s) - repr(df) + result = klass(lrange(3), index=index) + assert '0' in repr(result) def test_print_unicode_columns(self): df = pd.DataFrame({u("\u05d0"): [1, 2, 3], @@ -2336,29 +2346,27 @@ def test_print_unicode_columns(self): "c": [7, 8, 9]}) repr(df.columns) # should not raise UnicodeDecodeError - def test_unicode_string_with_unicode(self): - idx = Index(lrange(1000)) - - if PY3: - str(idx) - else: - text_type(idx) + @pytest.mark.parametrize("func,compat_func", [ + (str, text_type), # unicode string + (bytes, str) # byte string + ]) + def test_with_unicode(self, func, compat_func): + index = Index(lrange(1000)) - def test_bytestring_with_unicode(self): - idx = Index(lrange(1000)) if PY3: - bytes(idx) + func(index) else: - str(idx) + compat_func(index) def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] - i1 = Index(dt_dates, dtype=object) - i2 = Index(['aa'], dtype=object) - res = i2.intersection(i1) + index1 = Index(dt_dates, dtype=object) + index2 = Index(['aa'], dtype=object) + result = index2.intersection(index1) - assert len(res) == 0 + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize('op', [operator.eq, operator.ne, operator.gt, operator.ge, @@ -2413,8 +2421,8 @@ def test_generated_op_names(opname, indices): assert method.__name__ == opname -@pytest.mark.parametrize('idx_maker', tm.index_subclass_makers_generator()) -def test_index_subclass_constructor_wrong_kwargs(idx_maker): +@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator()) +def test_index_subclass_constructor_wrong_kwargs(index_maker): # GH #19348 with tm.assert_raises_regex(TypeError, 'unexpected keyword argument'): - idx_maker(foo='bar') + index_maker(foo='bar')
progress towards #20812 This should be it for parametrizing tests. Plan is to look at and clean up fixtures after this
https://api.github.com/repos/pandas-dev/pandas/pulls/20979
2018-05-08T16:27:54Z
2018-05-09T10:22:39Z
2018-05-09T10:22:39Z
2018-05-09T10:25:48Z
COMPAT: 32-bit indexing compat
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 858d08d73e603..2c40be17ce781 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1318,7 +1318,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): (indexer, missing) = labels.get_indexer_non_unique(objarr) # 'indexer' has dupes, create 'check' using 'missing' - check = np.zeros(len(objarr)) + check = np.zeros(len(objarr), dtype=np.intp) check[missing] = -1 mask = check == -1 @@ -2469,7 +2469,7 @@ def maybe_convert_indices(indices, n): if len(indices) == 0: # If list is empty, np.array will return float and cause indexing # errors. - return np.empty(0, dtype=np.int_) + return np.empty(0, dtype=np.intp) mask = indices < 0 if mask.any():
xref #20939
https://api.github.com/repos/pandas-dev/pandas/pulls/20977
2018-05-08T10:28:40Z
2018-05-08T11:39:48Z
2018-05-08T11:39:48Z
2018-05-08T11:40:13Z
CLN: simplify combine_first
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ffb2ad046158f..d43fb95a70555 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -43,7 +43,6 @@ is_extension_array_dtype, is_datetimetz, is_datetime64_any_dtype, - is_datetime64tz_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, @@ -52,7 +51,6 @@ is_dtype_equal, needs_i8_conversion, _get_dtype_from_object, - _ensure_float, _ensure_float64, _ensure_int64, _ensure_platform_int, @@ -4887,20 +4885,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): else: arr = func(series, otherSeries) - if do_fill: - arr = _ensure_float(arr) - arr[this_mask & other_mask] = np.nan - - # try to downcast back to the original dtype - if needs_i8_conversion_i: - # ToDo: This conversion should be handled in - # _maybe_cast_to_datetime but the change affects lot... - if is_datetime64tz_dtype(new_dtype): - arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz) - else: - arr = maybe_cast_to_datetime(arr, new_dtype) - else: - arr = maybe_downcast_to_dtype(arr, this_dtype) + arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr
https://api.github.com/repos/pandas-dev/pandas/pulls/20972
2018-05-07T10:26:28Z
2018-05-08T00:18:57Z
2018-05-08T00:18:57Z
2018-05-08T00:19:31Z
BUG: Fix isna cannot handle ambiguous typed list
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 05e0028047941..078a733a67a3e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1364,6 +1364,7 @@ Reshaping - Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`) - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`) - Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`) +- Bug in :func:`isna`, which cannot handle ambiguous typed lists (:issue:`20675`) Other ^^^^^ diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 3b2336bf19547..d9dc73434f5ac 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -120,7 +120,9 @@ def _isna_new(obj): return _isna_ndarraylike(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=isna)) - elif isinstance(obj, list) or hasattr(obj, '__array__'): + elif isinstance(obj, list): + return _isna_ndarraylike(np.asarray(obj, dtype=object)) + elif hasattr(obj, '__array__'): return _isna_ndarraylike(np.asarray(obj)) else: return obj is None @@ -146,7 +148,9 @@ def _isna_old(obj): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=_isna_old)) - elif isinstance(obj, list) or hasattr(obj, '__array__'): + elif isinstance(obj, list): + return _isna_ndarraylike_old(np.asarray(obj, dtype=object)) + elif hasattr(obj, '__array__'): return _isna_ndarraylike_old(np.asarray(obj)) else: return obj is None diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 365d8d762d673..ca9a2dc81fcc6 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -118,6 +118,11 @@ def test_isna_lists(self): exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) + # GH20675 + result = isna([np.NaN, 'world']) + exp = np.array([True, False]) + tm.assert_numpy_array_equal(result, exp) + def test_isna_nat(self): result = isna([NaT]) exp = np.array([True])
- [x] closes #20675 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20971
2018-05-07T00:34:35Z
2018-05-08T00:06:50Z
2018-05-08T00:06:50Z
2018-05-08T00:06:55Z
Sharey keyword for boxplot
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 97a5975dad9a6..5a1bcce9b5970 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -48,22 +48,26 @@ Bug Fixes ~~~~~~~~~ Groupby/Resample/Rolling +~~~~~~~~~~~~~~~~~~~~~~~~ - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) - Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) - Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True`` Data-type specific +~~~~~~~~~~~~~~~~~~ - Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`) - Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`) - Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`) Sparse +~~~~~~ - Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`) Indexing +~~~~~~~~ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) - Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`) @@ -71,17 +75,26 @@ Indexing - Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`) - Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`) +Plotting +~~~~~~~~ + +- New keywords (sharex, sharey) to turn on/off sharing of x/y-axis by subplots generated with pandas.DataFrame().groupby().boxplot() (:issue: `20968`) + I/O +~~~ - Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`) + Reshaping +~~~~~~~~~ - Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - Bug in :func:`concat` warning message providing the wrong guidance for future behavior (:issue:`21101`) Other +~~~~~ - Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c555991ab01c0..8c713548d1ede 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2548,7 +2548,7 @@ def plot_group(group, ax): def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, rot=0, grid=True, ax=None, figsize=None, - layout=None, **kwds): + layout=None, sharex=False, sharey=True, **kwds): """ Make box plots from DataFrameGroupBy data. @@ -2567,6 +2567,14 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, figsize : A tuple (width, height) in inches layout : tuple (optional) (rows, columns) for the layout of the plot + sharex : bool, default False + Whether x-axes will be shared among subplots + + .. versionadded:: 0.23.1 + sharey : bool, default True + Whether y-axes will be shared among subplots + + .. versionadded:: 0.23.1 `**kwds` : Keyword Arguments All other plotting keyword arguments to be passed to matplotlib's boxplot function @@ -2598,7 +2606,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, - ax=ax, sharex=False, sharey=True, + ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) axes = _flatten(axes) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index ac02f5f4e4283..101713b06df8c 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -40,6 +40,14 @@ def setup_method(self, method): "C": np.arange(20) + np.random.uniform( size=20)}) + def _assert_ytickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + self._check_visible(ax.get_yticklabels(), visible=exp) + + def _assert_xtickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + self._check_visible(ax.get_xticklabels(), visible=exp) + @pytest.mark.slow def test_plot(self): df = self.tdf @@ -367,6 +375,57 @@ def test_subplots(self): for ax in axes: assert ax.get_legend() is None + def test_groupby_boxplot_sharey(self): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharey can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14], + 'b': [0.56, 0.84, 0.29, 0.56, 0.85], + 'c': [0, 1, 2, 3, 1]}, + index=[0, 1, 2, 3, 4]) + + # behavior without keyword + axes = df.groupby('c').boxplot() + expected = [True, False, True, False] + self._assert_ytickslabels_visibility(axes, expected) + + # set sharey=True should be identical + axes = df.groupby('c').boxplot(sharey=True) + expected = [True, False, True, False] + self._assert_ytickslabels_visibility(axes, expected) + + # sharey=False, all yticklabels should be visible + axes = df.groupby('c').boxplot(sharey=False) + expected = [True, True, True, True] + self._assert_ytickslabels_visibility(axes, expected) + + def test_groupby_boxplot_sharex(self): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharex can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14], + 'b': [0.56, 0.84, 0.29, 0.56, 0.85], + 'c': [0, 1, 2, 3, 1]}, + index=[0, 1, 2, 3, 4]) + + # behavior without keyword + axes = df.groupby('c').boxplot() + expected = [True, True, True, True] + self._assert_xtickslabels_visibility(axes, expected) + + # set sharex=False should be identical + axes = df.groupby('c').boxplot(sharex=False) + expected = [True, True, True, True] + self._assert_xtickslabels_visibility(axes, expected) + + # sharex=True, yticklabels should be visible + # only for bottom plots + axes = df.groupby('c').boxplot(sharex=True) + expected = [False, False, True, True] + self._assert_xtickslabels_visibility(axes, expected) + @pytest.mark.slow def test_subplots_timeseries(self): idx = date_range(start='2014-07-01', freq='M', periods=10)
…to _subplots(). .. code-block:: jupyter-notebook %pylab inline import pandas as pd N = 100 rand = random.random(N) clas = random.binomial(5,.5, N) df = pd.DataFrame({'Rand': rand-clas, 'Rand2': rand, 'Class': clas}, index= np.arange(N)) df.groupby('Class').boxplot(sharey=True, sharex=False) >>> TypeError: boxplot() got an unexpected keyword argument 'sharey' New Behavior: .. ipython:: jpyter-notebook: ... df.groupby('Class').boxplot(sharey=True, sharex=True) df.groupby('Class').boxplot(sharey=True, sharex=False) df.groupby('Class').boxplot(sharey=False, sharex=True) df.groupby('Class').boxplot(sharey=False, sharex=False) All leads to different behaviour. The shareing of axes both x and y can be turned on and off separately. To restore previous behavior, use boxplot() without keywords. Default is the previous behavior of sharey - [x ] closes #20918 - [x ] tests added / passed - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20968
2018-05-06T17:03:31Z
2018-06-08T11:27:30Z
2018-06-08T11:27:29Z
2018-06-08T16:25:29Z
BUG: Fix wrong khash method definition
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index af1bee435eead..940505c01d6c6 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1254,6 +1254,7 @@ Indexing - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) - Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) +- Bug in ``.loc`` with a ``uint64`` indexer (:issue:`20722`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd index b1d965c3618cd..4c00e273b33b7 100644 --- a/pandas/_libs/khash.pxd +++ b/pandas/_libs/khash.pxd @@ -84,9 +84,9 @@ cdef extern from "khash_python.h": kh_uint64_t* kh_init_uint64() nogil void kh_destroy_uint64(kh_uint64_t*) nogil void kh_clear_uint64(kh_uint64_t*) nogil - khint_t kh_get_uint64(kh_uint64_t*, int64_t) nogil + khint_t kh_get_uint64(kh_uint64_t*, uint64_t) nogil void kh_resize_uint64(kh_uint64_t*, khint_t) nogil - khint_t kh_put_uint64(kh_uint64_t*, int64_t, int*) nogil + khint_t kh_put_uint64(kh_uint64_t*, uint64_t, int*) nogil void kh_del_uint64(kh_uint64_t*, khint_t) nogil bint kh_exist_uint64(kh_uint64_t*, khiter_t) nogil diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 6ccff7e898a6a..2e52154d7679b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -784,3 +784,22 @@ def convert_nested_indexer(indexer_type, keys): index=pd.MultiIndex.from_product(keys)) tm.assert_series_equal(result, expected) + + def test_loc_uint64(self): + # GH20722 + # Test whether loc accept uint64 max value as index. + s = pd.Series([1, 2], + index=[np.iinfo('uint64').max - 1, + np.iinfo('uint64').max]) + + result = s.loc[np.iinfo('uint64').max - 1] + expected = s.iloc[0] + assert result == expected + + result = s.loc[[np.iinfo('uint64').max - 1]] + expected = s.iloc[[0]] + tm.assert_series_equal(result, expected) + + result = s.loc[[np.iinfo('uint64').max - 1, + np.iinfo('uint64').max]] + tm.assert_series_equal(result, s)
- [x] closes #20722 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20966
2018-05-06T08:01:05Z
2018-05-08T10:35:11Z
2018-05-08T10:35:10Z
2018-05-08T10:35:17Z
BUG: Fix combine_first converts other columns type into floats unexpectedly
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 91575c311b409..83bd8ee9b3c74 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -867,3 +867,4 @@ Other - :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`) - :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly. - Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`) +- Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f4b7ccb0fdf5b..6b6d0e9be931d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5072,9 +5072,17 @@ def combine(self, other, func, fill_value=None, overwrite=True): series[this_mask] = fill_value otherSeries[other_mask] = fill_value - # if we have different dtypes, possibly promote - new_dtype = this_dtype - if not is_dtype_equal(this_dtype, other_dtype): + if col not in self.columns: + # If self DataFrame does not have col in other DataFrame, + # try to promote series, which is all NaN, as other_dtype. + new_dtype = other_dtype + try: + series = series.astype(new_dtype, copy=False) + except ValueError: + # e.g. new_dtype is integer types + pass + else: + # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) @@ -5153,6 +5161,11 @@ def combiner(x, y, needs_i8_conversion=False): else: mask = isna(x_values) + # If the column y in other DataFrame is not in first DataFrame, + # just return y_values. + if y.name not in self.columns: + return y_values + return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index 15ca65395e4fc..d1f921bc5e894 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -4,6 +4,7 @@ from datetime import datetime +import pytest import numpy as np from numpy import nan @@ -750,6 +751,17 @@ def test_combine_first_int(self): tm.assert_frame_equal(res, df1) assert res['a'].dtype == 'int64' + @pytest.mark.parametrize("val", [1, 1.0]) + def test_combine_first_with_asymmetric_other(self, val): + # see gh-20699 + df1 = pd.DataFrame({'isNum': [val]}) + df2 = pd.DataFrame({'isBool': [True]}) + + res = df1.combine_first(df2) + exp = pd.DataFrame({'isBool': [True], 'isNum': [val]}) + + tm.assert_frame_equal(res, exp) + def test_concat_datetime_datetime64_frame(self): # #2624 rows = []
- [x] closes #20699 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20965
2018-05-06T06:54:02Z
2018-10-09T12:54:03Z
2018-10-09T12:54:03Z
2018-10-09T12:54:35Z
DOC: add reshaping visuals to the docs (Reshaping and Pivot Tables)
diff --git a/doc/source/_static/reshaping_melt.png b/doc/source/_static/reshaping_melt.png new file mode 100644 index 0000000000000..d0c4e77655e60 Binary files /dev/null and b/doc/source/_static/reshaping_melt.png differ diff --git a/doc/source/_static/reshaping_pivot.png b/doc/source/_static/reshaping_pivot.png new file mode 100644 index 0000000000000..c6c37a80744d4 Binary files /dev/null and b/doc/source/_static/reshaping_pivot.png differ diff --git a/doc/source/_static/reshaping_stack.png b/doc/source/_static/reshaping_stack.png new file mode 100644 index 0000000000000..924f916ae0d37 Binary files /dev/null and b/doc/source/_static/reshaping_stack.png differ diff --git a/doc/source/_static/reshaping_unstack.png b/doc/source/_static/reshaping_unstack.png new file mode 100644 index 0000000000000..3e14cdd1ee1f7 Binary files /dev/null and b/doc/source/_static/reshaping_unstack.png differ diff --git a/doc/source/_static/reshaping_unstack_0.png b/doc/source/_static/reshaping_unstack_0.png new file mode 100644 index 0000000000000..eceddf73eea9e Binary files /dev/null and b/doc/source/_static/reshaping_unstack_0.png differ diff --git a/doc/source/_static/reshaping_unstack_1.png b/doc/source/_static/reshaping_unstack_1.png new file mode 100644 index 0000000000000..ab0ae3796dcc1 Binary files /dev/null and b/doc/source/_static/reshaping_unstack_1.png differ diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 71ddaa13fdd8a..250a1808e496e 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -60,6 +60,8 @@ To select out everything for variable ``A`` we could do: df[df['variable'] == 'A'] +.. image:: _static/reshaping_pivot.png + But suppose we wish to do time series operations with the variables. A better representation would be where the ``columns`` are the unique variables and an ``index`` of dates identifies individual observations. To reshape the data into @@ -96,10 +98,12 @@ are homogeneously-typed. Reshaping by stacking and unstacking ------------------------------------ -Closely related to the :meth:`~DataFrame.pivot` method are the related -:meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on -``Series`` and ``DataFrame``. These methods are designed to work together with -``MultiIndex`` objects (see the section on :ref:`hierarchical indexing +.. image:: _static/reshaping_stack.png + +Closely related to the :meth:`~DataFrame.pivot` method are the related +:meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on +``Series`` and ``DataFrame``. These methods are designed to work together with +``MultiIndex`` objects (see the section on :ref:`hierarchical indexing <advanced.hierarchical>`). Here are essentially what these methods do: - ``stack``: "pivot" a level of the (possibly hierarchical) column labels, @@ -109,6 +113,8 @@ Closely related to the :meth:`~DataFrame.pivot` method are the related (possibly hierarchical) row index to the column axis, producing a reshaped ``DataFrame`` with a new inner-most level of column labels. +.. image:: _static/reshaping_unstack.png + The clearest way to explain is by example. Let's take a prior example data set from the hierarchical indexing section: @@ -149,6 +155,8 @@ unstacks the **last level**: .. _reshaping.unstack_by_name: +.. image:: _static/reshaping_unstack_1.png + If the indexes have names, you can use the level names instead of specifying the level numbers: @@ -156,6 +164,9 @@ the level numbers: stacked.unstack('second') + +.. image:: _static/reshaping_unstack_0.png + Notice that the ``stack`` and ``unstack`` methods implicitly sort the index levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa, will result in a **sorted** copy of the original ``DataFrame`` or ``Series``: @@ -266,11 +277,13 @@ the right thing: Reshaping by Melt ----------------- +.. image:: _static/reshaping_melt.png + The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt` -are useful to massage a ``DataFrame`` into a format where one or more columns -are *identifier variables*, while all other columns, considered *measured -variables*, are "unpivoted" to the row axis, leaving just two non-identifier -columns, "variable" and "value". The names of those columns can be customized +are useful to massage a ``DataFrame`` into a format where one or more columns +are *identifier variables*, while all other columns, considered *measured +variables*, are "unpivoted" to the row axis, leaving just two non-identifier +columns, "variable" and "value". The names of those columns can be customized by supplying the ``var_name`` and ``value_name`` parameters. For instance, @@ -285,7 +298,7 @@ For instance, cheese.melt(id_vars=['first', 'last']) cheese.melt(id_vars=['first', 'last'], var_name='quantity') -Another way to transform is to use the :func:`~pandas.wide_to_long` panel data +Another way to transform is to use the :func:`~pandas.wide_to_long` panel data convenience function. It is less flexible than :func:`~pandas.melt`, but more user-friendly. @@ -332,8 +345,8 @@ While :meth:`~DataFrame.pivot` provides general purpose pivoting with various data types (strings, numerics, etc.), pandas also provides :func:`~pandas.pivot_table` for pivoting with aggregation of numeric data. -The function :func:`~pandas.pivot_table` can be used to create spreadsheet-style -pivot tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced +The function :func:`~pandas.pivot_table` can be used to create spreadsheet-style +pivot tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced strategies. It takes a number of arguments: @@ -485,7 +498,7 @@ using the ``normalize`` argument: pd.crosstab(df.A, df.B, normalize='columns') ``crosstab`` can also be passed a third ``Series`` and an aggregation function -(``aggfunc``) that will be applied to the values of the third ``Series`` within +(``aggfunc``) that will be applied to the values of the third ``Series`` within each group defined by the first two ``Series``: .. ipython:: python @@ -508,8 +521,8 @@ Finally, one can also add margins or normalize this output. Tiling ------ -The :func:`~pandas.cut` function computes groupings for the values of the input -array and is often used to transform continuous variables to discrete or +The :func:`~pandas.cut` function computes groupings for the values of the input +array and is often used to transform continuous variables to discrete or categorical variables: .. ipython:: python @@ -539,8 +552,8 @@ used to bin the passed data.:: Computing indicator / dummy variables ------------------------------------- -To convert a categorical variable into a "dummy" or "indicator" ``DataFrame``, -for example a column in a ``DataFrame`` (a ``Series``) which has ``k`` distinct +To convert a categorical variable into a "dummy" or "indicator" ``DataFrame``, +for example a column in a ``DataFrame`` (a ``Series``) which has ``k`` distinct values, can derive a ``DataFrame`` containing ``k`` columns of 1s and 0s using :func:`~pandas.get_dummies`: @@ -577,7 +590,7 @@ This function is often used along with discretization functions like ``cut``: See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. :func:`get_dummies` also accepts a ``DataFrame``. By default all categorical -variables (categorical in the statistical sense, those with `object` or +variables (categorical in the statistical sense, those with `object` or `categorical` dtype) are encoded as dummy variables. @@ -587,7 +600,7 @@ variables (categorical in the statistical sense, those with `object` or 'C': [1, 2, 3]}) pd.get_dummies(df) -All non-object columns are included untouched in the output. You can control +All non-object columns are included untouched in the output. You can control the columns that are encoded with the ``columns`` keyword. .. ipython:: python @@ -640,7 +653,7 @@ When a column contains only one level, it will be omitted in the result. pd.get_dummies(df, drop_first=True) -By default new columns will have ``np.uint8`` dtype. +By default new columns will have ``np.uint8`` dtype. To choose another dtype, use the``dtype`` argument: .. ipython:: python
- [x] closes #20898 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20960
2018-05-05T16:44:22Z
2018-05-08T20:44:01Z
2018-05-08T20:44:01Z
2018-05-08T20:44:04Z
BUG in .groupby.apply when applying a function that has mixed data types and the user supplied function can fail on the grouping column
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index eb6c212731822..a77620fe6b36b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1320,6 +1320,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.resample` that dropped timezone information (:issue:`13238`) - Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) - Bug in :func:`DataFrame.resample` where ``ffill``, ``bfill``, ``pad``, ``backfill``, ``fillna``, ``interpolate``, and ``asfreq`` were ignoring ``loffset``. (:issue:`20744`) +- Bug in :func:`DataFrame.groupby` when applying a function that has mixed data types and the user supplied function can fail on the grouping column (:issue:`20949`) Sparse ^^^^^^ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 164d1bebd2929..df7a5dc9dc173 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -6,6 +6,7 @@ import warnings import copy from textwrap import dedent +from contextlib import contextmanager from pandas.compat import ( zip, range, lzip, @@ -549,6 +550,16 @@ def f(self): return attr +@contextmanager +def _group_selection_context(groupby): + """ + set / reset the _group_selection_context + """ + groupby._set_group_selection() + yield groupby + groupby._reset_group_selection() + + class _GroupBy(PandasObject, SelectionMixin): _group_selection = None _apply_whitelist = frozenset([]) @@ -696,26 +707,32 @@ def _reset_group_selection(self): each group regardless of whether a group selection was previously set. """ if self._group_selection is not None: - self._group_selection = None # GH12839 clear cached selection too when changing group selection + self._group_selection = None self._reset_cache('_selected_obj') def _set_group_selection(self): """ Create group based selection. Used when selection is not passed directly but instead via a grouper. + + NOTE: this should be paired with a call to _reset_group_selection """ grp = self.grouper - if self.as_index and getattr(grp, 'groupings', None) is not None and \ - self.obj.ndim > 1: - ax = self.obj._info_axis - groupers = [g.name for g in grp.groupings - if g.level is None and g.in_axis] + if not (self.as_index and + getattr(grp, 'groupings', None) is not None and + self.obj.ndim > 1 and + self._group_selection is None): + return + + ax = self.obj._info_axis + groupers = [g.name for g in grp.groupings + if g.level is None and g.in_axis] - if len(groupers): - self._group_selection = ax.difference(Index(groupers)).tolist() - # GH12839 clear selected obj cache when group selection changes - self._reset_cache('_selected_obj') + if len(groupers): + # GH12839 clear selected obj cache when group selection changes + self._group_selection = ax.difference(Index(groupers)).tolist() + self._reset_cache('_selected_obj') def _set_result_index_ordered(self, result): # set the result index on the passed values object and @@ -781,10 +798,10 @@ def _make_wrapper(self, name): type(self).__name__)) raise AttributeError(msg) - # need to setup the selection - # as are not passed directly but in the grouper self._set_group_selection() + # need to setup the selection + # as are not passed directly but in the grouper f = getattr(self._selected_obj, name) if not isinstance(f, types.MethodType): return self.apply(lambda self: getattr(self, name)) @@ -897,7 +914,22 @@ def f(g): # ignore SettingWithCopy here in case the user mutates with option_context('mode.chained_assignment', None): - return self._python_apply_general(f) + try: + result = self._python_apply_general(f) + except Exception: + + # gh-20949 + # try again, with .apply acting as a filtering + # operation, by excluding the grouping column + # This would normally not be triggered + # except if the udf is trying an operation that + # fails on *some* columns, e.g. a numeric operation + # on a string grouper column + + with _group_selection_context(self): + return self._python_apply_general(f) + + return result def _python_apply_general(self, f): keys, values, mutated = self.grouper.apply(f, self._selected_obj, @@ -1275,9 +1307,9 @@ def mean(self, *args, **kwargs): except GroupByError: raise except Exception: # pragma: no cover - self._set_group_selection() - f = lambda x: x.mean(axis=self.axis, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + f = lambda x: x.mean(axis=self.axis, **kwargs) + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1293,13 +1325,12 @@ def median(self, **kwargs): raise except Exception: # pragma: no cover - self._set_group_selection() - def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1336,9 +1367,9 @@ def var(self, ddof=1, *args, **kwargs): if ddof == 1: return self._cython_agg_general('var', **kwargs) else: - self._set_group_selection() f = lambda x: x.var(ddof=ddof, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1384,6 +1415,7 @@ def f(self, **kwargs): kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count + self._set_group_selection() try: return self._cython_agg_general( @@ -1453,11 +1485,11 @@ def ohlc(self): @Appender(DataFrame.describe.__doc__) def describe(self, **kwargs): - self._set_group_selection() - result = self.apply(lambda x: x.describe(**kwargs)) - if self.axis == 1: - return result.T - return result.unstack() + with _group_selection_context(self): + result = self.apply(lambda x: x.describe(**kwargs)) + if self.axis == 1: + return result.T + return result.unstack() @Substitution(name='groupby') @Appender(_doc_template) @@ -1778,13 +1810,12 @@ def ngroup(self, ascending=True): .cumcount : Number the rows in each group. """ - self._set_group_selection() - - index = self._selected_obj.index - result = Series(self.grouper.group_info[0], index) - if not ascending: - result = self.ngroups - 1 - result - return result + with _group_selection_context(self): + index = self._selected_obj.index + result = Series(self.grouper.group_info[0], index) + if not ascending: + result = self.ngroups - 1 - result + return result @Substitution(name='groupby') def cumcount(self, ascending=True): @@ -1835,11 +1866,10 @@ def cumcount(self, ascending=True): .ngroup : Number the groups themselves. """ - self._set_group_selection() - - index = self._selected_obj.index - cumcounts = self._cumcount_array(ascending=ascending) - return Series(cumcounts, index) + with _group_selection_context(self): + index = self._selected_obj.index + cumcounts = self._cumcount_array(ascending=ascending) + return Series(cumcounts, index) @Substitution(name='groupby') @Appender(_doc_template) @@ -3768,7 +3798,6 @@ def nunique(self, dropna=True): @Appender(Series.describe.__doc__) def describe(self, **kwargs): - self._set_group_selection() result = self.apply(lambda x: x.describe(**kwargs)) if self.axis == 1: return result.T @@ -4411,6 +4440,7 @@ def transform(self, func, *args, **kwargs): return self._transform_general(func, *args, **kwargs) obj = self._obj_with_exclusions + # nuiscance columns if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 5ca10fe1af9d1..07eef2d87feb3 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -515,3 +515,16 @@ def test_func(x): index=index2) tm.assert_frame_equal(result1, expected1) tm.assert_frame_equal(result2, expected2) + + +def test_apply_with_mixed_types(): + # gh-20949 + df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1, 2, 3], 'C': [4, 6, 5]}) + g = df.groupby('A') + + result = g.transform(lambda x: x / x.sum()) + expected = pd.DataFrame({'B': [1 / 3., 2 / 3., 1], 'C': [0.4, 0.6, 1.0]}) + tm.assert_frame_equal(result, expected) + + result = g.apply(lambda x: x / x.sum()) + tm.assert_frame_equal(result, expected)
closes #20949
https://api.github.com/repos/pandas-dev/pandas/pulls/20959
2018-05-05T14:29:27Z
2018-05-08T00:19:52Z
2018-05-08T00:19:52Z
2018-05-08T00:20:14Z
ENH: Return DatetimeIndex or TimedeltaIndex bins for q/cut when input is datelike
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index dfb7a3675fdd5..abfa57dc09334 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -161,6 +161,7 @@ Datetimelike API Changes - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with non-``None`` ``freq`` attribute, addition or subtraction of integer-dtyped array or ``Index`` will return an object of the same class (:issue:`19959`) - :class:`DateOffset` objects are now immutable. Attempting to alter one of these will now raise ``AttributeError`` (:issue:`21341`) - :class:`PeriodIndex` subtraction of another ``PeriodIndex`` will now return an object-dtype :class:`Index` of :class:`DateOffset` objects instead of raising a ``TypeError`` (:issue:`20049`) +- :func:`cut` and :func:`qcut` now returns a :class:`DatetimeIndex` or :class:`TimedeltaIndex` bins when the input is datetime or timedelta dtype respectively and ``retbins=True`` (:issue:`19891`) .. _whatsnew_0240.api.other: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 8bbf939e110e9..863ebc6354136 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -11,6 +11,7 @@ is_datetime64_dtype, is_timedelta64_dtype, is_datetime64tz_dtype, + is_datetime_or_timedelta_dtype, _ensure_int64) import pandas.core.algorithms as algos @@ -18,7 +19,7 @@ from pandas._libs.lib import infer_dtype from pandas import (to_timedelta, to_datetime, Categorical, Timestamp, Timedelta, - Series, Interval, IntervalIndex) + Series, Index, Interval, IntervalIndex) import numpy as np @@ -364,6 +365,8 @@ def _bins_to_cuts(x, bins, right=True, labels=None, result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) + bins = _convert_bin_to_datelike_type(bins, dtype) + return result, bins @@ -428,6 +431,26 @@ def _convert_bin_to_numeric_type(bins, dtype): return bins +def _convert_bin_to_datelike_type(bins, dtype): + """ + Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is + datelike + + Parameters + ---------- + bins : list-like of bins + dtype : dtype of data + + Returns + ------- + bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is + datelike + """ + if is_datetime64tz_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype): + bins = Index(bins.astype(np.int64), dtype=dtype) + return bins + + def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 807fb2530603a..44de3e93d42bf 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -7,7 +7,8 @@ import pandas as pd from pandas import (DataFrame, Series, isna, to_datetime, DatetimeIndex, Index, Timestamp, Interval, IntervalIndex, Categorical, - cut, qcut, date_range, NaT, TimedeltaIndex) + cut, qcut, date_range, timedelta_range, NaT, + TimedeltaIndex) from pandas.tseries.offsets import Nano, Day import pandas.util.testing as tm from pandas.api.types import CategoricalDtype as CDT @@ -605,3 +606,38 @@ def f(): mask = result.isna() tm.assert_numpy_array_equal( mask, np.array([False, True, True, True, True])) + + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Pacific']) + def test_datetime_cut_roundtrip(self, tz): + # GH 19891 + s = Series(date_range('20180101', periods=3, tz=tz)) + result, result_bins = cut(s, 2, retbins=True) + expected = cut(s, result_bins) + tm.assert_series_equal(result, expected) + expected_bins = DatetimeIndex(['2017-12-31 23:57:07.200000', + '2018-01-02 00:00:00', + '2018-01-03 00:00:00']) + expected_bins = expected_bins.tz_localize(tz) + tm.assert_index_equal(result_bins, expected_bins) + + def test_timedelta_cut_roundtrip(self): + # GH 19891 + s = Series(timedelta_range('1day', periods=3)) + result, result_bins = cut(s, 2, retbins=True) + expected = cut(s, result_bins) + tm.assert_series_equal(result, expected) + expected_bins = TimedeltaIndex(['0 days 23:57:07.200000', + '2 days 00:00:00', + '3 days 00:00:00']) + tm.assert_index_equal(result_bins, expected_bins) + + @pytest.mark.parametrize('arg, expected_bins', [ + [timedelta_range('1day', periods=3), + TimedeltaIndex(['1 days', '2 days', '3 days'])], + [date_range('20180101', periods=3), + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'])]]) + def test_datelike_qcut_bins(self, arg, expected_bins): + # GH 19891 + s = Series(arg) + result, result_bins = qcut(s, 2, retbins=True) + tm.assert_index_equal(result_bins, expected_bins)
- [x] closes #19891 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20956
2018-05-05T05:29:13Z
2018-07-03T15:34:15Z
2018-07-03T15:34:15Z
2018-07-03T16:41:34Z
CLN: add missing space in package's description
diff --git a/setup.py b/setup.py index a436f451a2a55..6febe674fb2a1 100755 --- a/setup.py +++ b/setup.py @@ -134,7 +134,7 @@ def build_extensions(self): _build_ext.build_extensions(self) -DESCRIPTION = ("Powerful data structures for data analysis, time series," +DESCRIPTION = ("Powerful data structures for data analysis, time series, " "and statistics") LONG_DESCRIPTION = """ **pandas** is a Python package providing fast, flexible, and expressive data
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry The typo fixed here is in the package description, so it shows up in `pip show`. I'd say this fix needs no issue or whatsnew entry; if you feel otherwise, please say so and I'll open an issue and resubmit with a whatsnew entry. ``` $ pip show pandas Name: pandas Version: 0.22.0 Summary: Powerful data structures for data analysis, time series,and statistics Home-page: http://pandas.pydata.org Author: The PyData Development Team Author-email: pydata@googlegroups.com [...] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20950
2018-05-04T11:53:49Z
2018-05-05T12:43:45Z
2018-05-05T12:43:45Z
2018-05-05T12:43:48Z
Allow drop bins when using the cut function
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6036ef7e221fb..750227cd59f26 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -525,6 +525,7 @@ Other Enhancements library. (:issue:`20564`) - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) +- :func:`cut` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`20947`) - :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 118198ea0320d..8bbf939e110e9 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -24,7 +24,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, - include_lowest=False): + include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. @@ -65,6 +65,10 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + + .. versionadded:: 0.23.0 Returns ------- @@ -85,7 +89,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed - bins. For an IntervalIndex `bins`, this is equal to `bins`. + bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For + an IntervalIndex `bins`, this is equal to `bins`. See Also -------- @@ -144,6 +149,32 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... + Passing a Series as an input returns a Series with mapping value. + It is used to map numerically to intervals based on bins. + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) + ... # doctest: +ELLIPSIS + (a 0.0 + b 1.0 + c 2.0 + d 3.0 + e 4.0 + dtype: float64, array([0, 2, 4, 6, 8])) + + Use `drop` optional when bins is not unique + + >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, + ... right=False, duplicates='drop') + ... # doctest: +ELLIPSIS + (a 0.0 + b 1.0 + c 2.0 + d 3.0 + e 3.0 + dtype: float64, array([0, 2, 4, 6, 8])) + Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 @@ -199,7 +230,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, - dtype=dtype) + dtype=dtype, + duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 8d093f2784ba1..5ea27f9e34e1c 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -4,6 +4,7 @@ import numpy as np from pandas.compat import zip +import pandas as pd from pandas import (DataFrame, Series, isna, to_datetime, DatetimeIndex, Index, Timestamp, Interval, IntervalIndex, Categorical, cut, qcut, date_range, NaT, TimedeltaIndex) @@ -337,6 +338,21 @@ def test_series_retbins(self): CDT(ordered=True)) tm.assert_series_equal(result, expected) + def test_cut_duplicates_bin(self): + # issue 20947 + values = Series(np.array([1, 3, 5, 7, 9]), + index=["a", "b", "c", "d", "e"]) + bins = [0, 2, 4, 6, 10, 10] + result = cut(values, bins, duplicates='drop') + expected = cut(values, pd.unique(bins)) + tm.assert_series_equal(result, expected) + + pytest.raises(ValueError, cut, values, bins) + pytest.raises(ValueError, cut, values, bins, duplicates='raise') + + # invalid + pytest.raises(ValueError, cut, values, bins, duplicates='foo') + def test_qcut_duplicates_bin(self): # GH 7751 values = [0, 0, 0, 0, 1, 2, 3]
- [ ] if cut(x, bins=[0, 1, 2, 3, 4, 5, 6, 6, 8, 8, 10], labels=False, retbins=True, right=False) will raise ValueError: You can drop duplicate edges by setting the 'duplicates' kwarg, so add 'duplicates' parameters to the cut function.
https://api.github.com/repos/pandas-dev/pandas/pulls/20947
2018-05-04T02:48:21Z
2018-05-10T18:27:48Z
2018-05-10T18:27:48Z
2018-05-11T02:11:25Z
BUG: Fix IntervalIndex.get_loc/get_indexer for IntervalIndex of length one
diff --git a/doc/source/api.rst b/doc/source/api.rst index 93edd090d846b..d00e5511f1100 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1632,6 +1632,8 @@ IntervalIndex Components IntervalIndex.length IntervalIndex.values IntervalIndex.is_non_overlapping_monotonic + IntervalIndex.get_loc + IntervalIndex.get_indexer .. _api.multiindex: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4ad40fe0f7f2b..09ca4f403399b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1245,6 +1245,7 @@ Indexing - Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`) - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) +- Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 54800d0d76d2e..766ac7b14120e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -159,20 +159,22 @@ class IntervalIndex(IntervalMixin, Index): Attributes ---------- - left - right closed - mid + is_non_overlapping_monotonic + left length + mid + right values - is_non_overlapping_monotonic Methods ------- + contains from_arrays - from_tuples from_breaks - contains + from_tuples + get_indexer + get_loc Examples --------- @@ -938,8 +940,11 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): if isinstance(label, IntervalMixin): raise NotImplementedError + # GH 20921: "not is_monotonic_increasing" for the second condition + # instead of "is_monotonic_decreasing" to account for single element + # indexes being both increasing and decreasing if ((side == 'left' and self.left.is_monotonic_increasing) or - (side == 'right' and self.left.is_monotonic_decreasing)): + (side == 'right' and not self.left.is_monotonic_increasing)): sub_idx = self.right if self.open_right or exclude_label: label = _get_next_label(label) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 71a6f78125004..9920809a18a24 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -497,6 +497,14 @@ def test_get_loc_interval(self): pytest.raises(KeyError, self.index.get_loc, Interval(-1, 0, 'left')) + # Make consistent with test_interval_new.py (see #16316, #16386) + @pytest.mark.parametrize('item', [3, Interval(1, 4)]) + def test_get_loc_length_one(self, item, closed): + # GH 20921 + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + result = index.get_loc(item) + assert result == 0 + # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_get_indexer(self): actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) @@ -544,6 +552,16 @@ def test_get_indexer_subintervals(self): expected = np.array([0, 0, 0], dtype='intp') tm.assert_numpy_array_equal(actual, expected) + # Make consistent with test_interval_new.py (see #16316, #16386) + @pytest.mark.parametrize('item', [ + [3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)]) + def test_get_indexer_length_one(self, item, closed): + # GH 17284 + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + result = index.get_indexer(item) + expected = np.array([0] * len(item), dtype='intp') + tm.assert_numpy_array_equal(result, expected) + # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_contains(self): # Only endpoints are valid.
- [X] closes #17284 - [X] closes #20921 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20946
2018-05-04T00:25:09Z
2018-05-05T12:53:37Z
2018-05-05T12:53:36Z
2018-05-05T19:17:56Z
Update link to NumPy Docstring Standard explanation
diff --git a/doc/README.rst b/doc/README.rst index efa21fdd3a2d9..12950d323f5d3 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -42,7 +42,7 @@ Some other important things to know about the docs: - The docstrings follow the **Numpy Docstring Standard** which is used widely in the Scientific Python community. This standard specifies the format of the different sections of the docstring. See `this document - <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_ + <https://numpydoc.readthedocs.io/en/latest/>`_ for a detailed explanation, or look at some of the existing functions to extend it in a similar manner.
- [ ] whatsnew Updated link to NumPy DocString from `https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt` to `https://numpydoc.readthedocs.io/en/latest/`
https://api.github.com/repos/pandas-dev/pandas/pulls/20942
2018-05-03T10:19:32Z
2018-05-03T10:22:30Z
2018-05-03T10:22:30Z
2018-05-03T10:22:33Z
DOC: followup to #20583, observed kwarg for .groupby
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 3616a7e1b41d2..da13a34cccfea 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -994,7 +994,7 @@ is only interesting over one column (here ``colname``), it may be filtered Handling of (un)observed Categorical values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When using a ``Categorical`` grouper (as a single or as part of multipler groupers), the ``observed`` keyword +When using a ``Categorical`` grouper (as a single grouper, or as part of multipler groupers), the ``observed`` keyword controls whether to return a cartesian product of all possible groupers values (``observed=False``) or only those that are observed groupers (``observed=True``). diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4ad40fe0f7f2b..8f1ec0c108ee1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -419,9 +419,11 @@ documentation. If you build an extension array, publicize it on our Categorical Groupers has gained an observed keyword ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In previous versions, grouping by 1 or more categorical columns would result in an index that was the cartesian product of all of the categories for -each grouper, not just the observed values.``.groupby()`` has gained the ``observed`` keyword to toggle this behavior. The default remains backward -compatible (generate a cartesian product). (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`) +Grouping by a categorical includes the unobserved categories in the output. +When grouping with multiple groupers, this means you get the cartesian product of all the +categories, including combinations where there are no observations, which can result in a large +number of groupers. We have added a keyword ``observed`` to control this behavior, it defaults to +``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) .. ipython:: python diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e96a2a9f08520..343f36eabc0d7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6584,7 +6584,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): axis=axis, inplace=inplace) def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, - group_keys=True, squeeze=False, observed=None, **kwargs): + group_keys=True, squeeze=False, observed=False, **kwargs): """ Group series using mapper (dict or key function, apply given function to group, return result as series) or by a series of columns. @@ -6617,11 +6617,10 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, squeeze : boolean, default False reduce the dimensionality of the return type if possible, otherwise return a consistent type - observed : boolean, default None - if True: only show observed values for categorical groupers. - if False: show all values for categorical groupers. - if None: if any categorical groupers, show a FutureWarning, - default to False. + observed : boolean, default False + This only applies if any of the groupers are Categoricals + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. .. versionadded:: 0.23.0 diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f78f7cb625218..164d1bebd2929 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -556,7 +556,7 @@ class _GroupBy(PandasObject, SelectionMixin): def __init__(self, obj, keys=None, axis=0, level=None, grouper=None, exclusions=None, selection=None, as_index=True, sort=True, group_keys=True, squeeze=False, - observed=None, **kwargs): + observed=False, **kwargs): self._selection = selection @@ -2907,7 +2907,7 @@ class Grouping(object): """ def __init__(self, index, grouper=None, obj=None, name=None, level=None, - sort=True, observed=None, in_axis=False): + sort=True, observed=False, in_axis=False): self.name = name self.level = level @@ -2964,12 +2964,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # a passed Categorical elif is_categorical_dtype(self.grouper): - # observed can be True/False/None - # we treat None as False. If in the future - # we need to warn if observed is not passed - # then we have this option - # gh-20583 - self.all_grouper = self.grouper self.grouper = self.grouper._codes_for_groupby( self.sort, observed) @@ -3088,7 +3082,7 @@ def groups(self): def _get_grouper(obj, key=None, axis=0, level=None, sort=True, - observed=None, mutated=False, validate=True): + observed=False, mutated=False, validate=True): """ create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. @@ -4734,26 +4728,28 @@ def _wrap_agged_blocks(self, items, blocks): def _reindex_output(self, result): """ - if we have categorical groupers, then we want to make sure that + If we have categorical groupers, then we want to make sure that we have a fully reindex-output to the levels. These may have not participated in the groupings (e.g. may have all been - nan groups) + nan groups); This can re-expand the output space """ - # TODO(jreback): remove completely - # when observed parameter is defaulted to True - # gh-20583 - - if self.observed: - return result - + # we need to re-expand the output space to accomodate all values + # whether observed or not in the cartesian product of our groupes groupings = self.grouper.groupings if groupings is None: return result elif len(groupings) == 1: return result + + # if we only care about the observed values + # we are done + elif self.observed: + return result + + # reindexing only applies to a Categorical grouper elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex)) for ping in groupings): return result
closes #20902
https://api.github.com/repos/pandas-dev/pandas/pulls/20941
2018-05-03T10:16:31Z
2018-05-05T12:44:14Z
2018-05-05T12:44:14Z
2018-05-05T12:44:44Z
BUG: cant modify df with duplicate index (#17105)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e3b4eb5e22dec..c1f588b0072fd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1253,6 +1253,7 @@ Indexing - Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`) - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) +- Bug in performing in-place operations on a ``DataFrame`` with a duplicate ``Index`` (:issue:`17105`) - Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) MultiIndex diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7a7e47803c240..858d08d73e603 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1318,7 +1318,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): (indexer, missing) = labels.get_indexer_non_unique(objarr) # 'indexer' has dupes, create 'check' using 'missing' - check = np.zeros_like(objarr) + check = np.zeros(len(objarr)) check[missing] = -1 mask = check == -1 diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index b887b1c9f1218..6d74ce54faa94 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1929,6 +1929,32 @@ def test_iloc_duplicates(self): expected = df.take([0], axis=1) assert_frame_equal(result, expected) + def test_loc_duplicates(self): + # gh-17105 + + # insert a duplicate element to the index + trange = pd.date_range(start=pd.Timestamp(year=2017, month=1, day=1), + end=pd.Timestamp(year=2017, month=1, day=5)) + + trange = trange.insert(loc=5, + item=pd.Timestamp(year=2017, month=1, day=5)) + + df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + bool_idx = np.array([False, False, False, False, False, True]) + + # assignment + df.loc[trange[bool_idx], "A"] = 6 + + expected = pd.DataFrame({'A': [0, 0, 0, 0, 6, 6], + 'B': [0, 0, 0, 0, 0, 0]}, + index=trange) + tm.assert_frame_equal(df, expected) + + # in-place + df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + df.loc[trange[bool_idx], "A"] += 6 + tm.assert_frame_equal(df, expected) + def test_iloc_sparse_propegate_fill_value(self): from pandas.core.sparse.api import SparseDataFrame df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
- [x] closes #17105 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Fixing to allow the modification of DataFrames that have duplicate elements in the index. Previously it would fail with ``` AttributeError: 'bool' object has no attribute 'any' ``` See #17105 for a code snippet. Replacing `zeros_like(objarray)` with `zeros()` because the first unnecessarily returns an array of zeros with the same types as `objarray`. We only want the zeros, not the type, to be able to later compare against -1 and get an array as a result: The result of `zeros_like()` with dates gives a boolean after comparison ``` >>> myarr_fromindex = np.zeros_like(pd.DatetimeIndex([2,3])) >>> myarr_fromindex array(['1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> >>> type(myarr_fromindex) <type 'numpy.ndarray'> >>> >>> myarr_fromindex == -1 False ``` The result of `zeros_like()` with numbers gives an array after comparison ``` >>> >>> >>> myarr_fromarr = np.zeros_like([2,3]) >>> myarr_fromarr array([0, 0]) >>> type(myarr_fromarr) <type 'numpy.ndarray'> >>> myarr_fromarr == -1 array([False, False]) >>> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20939
2018-05-03T08:28:15Z
2018-05-08T00:23:34Z
2018-05-08T00:23:34Z
2018-05-08T00:23:41Z
ASV: add tests for indexing engines and Uint64Engine
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 8290731fd7eea..49d6311a7bb66 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -2,8 +2,9 @@ import numpy as np import pandas.util.testing as tm -from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, - Float64Index, IntervalIndex, CategoricalIndex, +from pandas import (Series, DataFrame, Panel, MultiIndex, + Int64Index, UInt64Index, Float64Index, + IntervalIndex, CategoricalIndex, IndexSlice, concat, date_range) @@ -11,7 +12,7 @@ class NumericSeriesIndexing(object): goal_time = 0.2 params = [ - (Int64Index, Float64Index), + (Int64Index, UInt64Index, Float64Index), ('unique_monotonic_inc', 'nonunique_monotonic_inc'), ] param_names = ['index_dtype', 'index_structure'] diff --git a/asv_bench/benchmarks/indexing_engines.py b/asv_bench/benchmarks/indexing_engines.py new file mode 100644 index 0000000000000..243f2ada7be32 --- /dev/null +++ b/asv_bench/benchmarks/indexing_engines.py @@ -0,0 +1,54 @@ +import numpy as np + +from pandas._libs.index import (Int64Engine, UInt64Engine, Float64Engine, + ObjectEngine) + + +class NumericEngineIndexing(object): + + goal_time = 0.2 + params = [[Int64Engine, UInt64Engine, Float64Engine], + [np.int64, np.uint64, np.float64], + ['monotonic_incr', 'monotonic_decr', 'non_monotonic'], + ] + param_names = ['engine', 'dtype', 'index_type'] + + def setup(self, engine, dtype, index_type): + N = 10**5 + values = list([1] * N + [2] * N + [3] * N) + arr = { + 'monotonic_incr': np.array(values, dtype=dtype), + 'monotonic_decr': np.array(list(reversed(values)), + dtype=dtype), + 'non_monotonic': np.array([1, 2, 3] * N, dtype=dtype), + }[index_type] + + self.data = engine(lambda: arr, len(arr)) + # code belows avoids populating the mapping etc. while timing. + self.data.get_loc(2) + + def time_get_loc(self, engine, dtype, index_type): + self.data.get_loc(2) + + +class ObjectEngineIndexing(object): + + goal_time = 0.2 + params = [('monotonic_incr', 'monotonic_decr', 'non_monotonic')] + param_names = ['index_type'] + + def setup(self, index_type): + N = 10**5 + values = list('a' * N + 'b' * N + 'c' * N) + arr = { + 'monotonic_incr': np.array(values, dtype=object), + 'monotonic_decr': np.array(list(reversed(values)), dtype=object), + 'non_monotonic': np.array(list('abc') * N, dtype=object), + }[index_type] + + self.data = ObjectEngine(lambda: arr, len(arr)) + # code belows avoids populating the mapping etc. while timing. + self.data.get_loc('b') + + def time_get_loc(self, index_type): + self.data.get_loc('b')
This is an offspring from #21699 to do the the ASV tests in a contained PR. For reference the output from running ``asv run -b indexing_engines`` is: ``` · Creating environments · Discovering benchmarks ·· Uninstalling from conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt ·· Installing into conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt · Running 2 total benchmarks (1 commits * 1 environments * 2 benchmarks) [ 0.00%] · For pandas commit hash b28cf5aa: [ 0.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 50.00%] ··· Running indexing_engines.NumericEngineIndexing.time_get_loc 5.13±0.2μs;... [100.00%] ··· Running indexing_engines.ObjectEngineIndexing.time_get_loc 4.24±0.08μs;... ```
https://api.github.com/repos/pandas-dev/pandas/pulls/23090
2018-10-11T12:34:13Z
2018-10-18T16:12:19Z
2018-10-18T16:12:19Z
2018-10-18T17:46:22Z
Fix ASV imports
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index e83efdd0fa2a0..8290731fd7eea 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -2,10 +2,9 @@ import numpy as np import pandas.util.testing as tm -from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index, - IntervalIndex, CategoricalIndex, +from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, + Float64Index, IntervalIndex, CategoricalIndex, IndexSlice, concat, date_range) -from .pandas_vb_common import Panel class NumericSeriesIndexing(object): diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 57811dec8cd29..3524a5adb5450 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -11,8 +11,6 @@ except ImportError: from pandas import ordered_merge as merge_ordered -from .pandas_vb_common import Panel - class Append(object): diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py index b87583ef925f3..c2e5bfa175feb 100644 --- a/asv_bench/benchmarks/panel_ctor.py +++ b/asv_bench/benchmarks/panel_ctor.py @@ -3,8 +3,6 @@ from pandas import DataFrame, Panel, DatetimeIndex, date_range -from .pandas_vb_common import Panel - class DifferentIndexes(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py index e35455f36ed98..542af44a78ffe 100644 --- a/asv_bench/benchmarks/panel_methods.py +++ b/asv_bench/benchmarks/panel_methods.py @@ -3,8 +3,6 @@ import numpy as np from pandas import Panel -from .pandas_vb_common import Panel - class PanelMethods(object):
#22947 broke the ASV - second time in a few days that linting the benchmarks has broken something (see #22978 / #22886) @datapythonista, could you please require that PRs try ``` cd asv_bench asv dev ``` to see if *collection* of benchmark works? This takes about 30 seconds and then the asv run can be aborted with CTRL+C.
https://api.github.com/repos/pandas-dev/pandas/pulls/23085
2018-10-11T05:41:24Z
2018-10-11T06:19:36Z
2018-10-11T06:19:36Z
2018-12-03T06:47:42Z
PERF: Override PeriodIndex.unique
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index c34f9a737473e..29b8c7efda40c 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -119,3 +119,6 @@ def time_align(self): def time_intersection(self): self.index[:750].intersection(self.index[250:]) + + def time_unique(self): + self.index.unique() diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 40dd48880e0eb..b8607136197e4 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -684,6 +684,7 @@ Performance Improvements (:issue:`21372`) - Improved the performance of :func:`pandas.get_dummies` with ``sparse=True`` (:issue:`21997`) - Improved performance of :func:`IndexEngine.get_indexer_non_unique` for sorted, non-unique indexes (:issue:`9466`) +- Improved performance of :func:`PeriodIndex.unique` (:issue:`23083`) .. _whatsnew_0240.docs: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 7833dd851db34..f151389b02463 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -29,6 +29,7 @@ DIFFERENT_FREQ_INDEX) from pandas._libs.tslibs import resolution, period +from pandas.core.algorithms import unique1d from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays.period import PeriodArrayMixin, dt64arr_to_periodarr from pandas.core.base import _shared_docs @@ -539,6 +540,18 @@ def _get_unique_index(self, dropna=False): res = res.dropna() return res + @Appender(Index.unique.__doc__) + def unique(self, level=None): + # override the Index.unique method for performance GH#23083 + if level is not None: + # this should never occur, but is retained to make the signature + # match Index.unique + self._validate_index_level(level) + + values = self._ndarray_values + result = unique1d(values) + return self._shallow_copy(result) + def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label
In trying to simplify the mess that is the PeriodIndex constructors, I found that PeriodIndex.unique is doing an unfortunate conversion to object-dtype. This PR avoids that and gets a nice speedup. ``` In [2]: pi = pd.period_range('1000Q1', periods=10000, freq='Q') In [3]: %timeit pi.unique() The slowest run took 6.25 times longer than the fastest. This could mean that an intermediate result is being cached. 1000 loops, best of 3: 226 µs per loop <-- PR 10 loops, best of 3: 24.7 ms per loop <-- master ```
https://api.github.com/repos/pandas-dev/pandas/pulls/23083
2018-10-11T02:15:12Z
2018-10-11T11:42:37Z
2018-10-11T11:42:37Z
2018-10-11T15:32:04Z
Support ExtensionArray in hash_pandas_object
diff --git a/doc/source/api.rst b/doc/source/api.rst index ffa240febf731..f57531fffaaaa 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -245,6 +245,15 @@ Top-level evaluation eval +Hashing +~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + util.hash_array + util.hash_pandas_object + Testing ~~~~~~~ diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 40dd48880e0eb..5e9ce875dddb8 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -548,6 +548,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your - :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`). - Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) - Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) +- Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) - Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) .. _whatsnew_0240.api.incompatibilities: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index efe587c6aaaad..627afd1b6f860 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -466,6 +466,11 @@ def _values_for_factorize(self): as NA in the factorization routines, so it will be coded as `na_sentinal` and not included in `uniques`. By default, ``np.nan`` is used. + + Notes + ----- + The values returned by this method are also used in + :func:`pandas.util.hash_pandas_object`. """ return self.astype(object), np.nan diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index e62d70847437c..e41885d525653 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -11,7 +11,7 @@ ABCSeries, ABCDataFrame) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_list_like) + is_categorical_dtype, is_list_like, is_extension_array_dtype) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import infer_dtype_from_scalar @@ -265,10 +265,13 @@ def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) + elif is_extension_array_dtype(dtype): + vals, _ = vals._values_for_factorize() + dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early - elif np.issubdtype(dtype, np.complex128): + if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py index beb7948f2c14b..2a4a1b9c4668b 100644 --- a/pandas/tests/extension/base/base.py +++ b/pandas/tests/extension/base/base.py @@ -2,6 +2,7 @@ class BaseExtensionTests(object): + assert_equal = staticmethod(tm.assert_equal) assert_series_equal = staticmethod(tm.assert_series_equal) assert_frame_equal = staticmethod(tm.assert_frame_equal) assert_extension_array_equal = staticmethod( diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 4e7886dd2e943..dce91d5a9ca9c 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -164,3 +164,13 @@ def test_container_shift(self, data, frame, periods, indices): compare = self.assert_series_equal compare(result, expected) + + @pytest.mark.parametrize("as_frame", [True, False]) + def test_hash_pandas_object_works(self, data, as_frame): + # https://github.com/pandas-dev/pandas/issues/23066 + data = pd.Series(data) + if as_frame: + data = data.to_frame() + a = pd.util.hash_pandas_object(data) + b = pd.util.hash_pandas_object(data) + self.assert_equal(a, b) diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 115afdcc99f2b..6c8b12ed865fc 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -199,6 +199,10 @@ def test_combine_le(self, data_repeated): def test_combine_add(self, data_repeated): pass + @unhashable + def test_hash_pandas_object_works(self, data, kind): + super().test_hash_pandas_object_works(data, kind) + class TestCasting(BaseJSON, base.BaseCastingTests): @pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
Closes #23066
https://api.github.com/repos/pandas-dev/pandas/pulls/23082
2018-10-11T02:03:43Z
2018-10-11T11:41:57Z
2018-10-11T11:41:57Z
2018-10-11T11:42:05Z
STY: avoid backslash
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index a6f586c7f2638..d4e3def84664b 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -60,8 +60,8 @@ def np_array_datetime64_compat(arr, *args, **kwargs): if not _np_version_under1p11: # is_list_like - if hasattr(arr, '__iter__') and not \ - isinstance(arr, string_and_binary_types): + if (hasattr(arr, '__iter__') and + not isinstance(arr, string_and_binary_types)): arr = [tz_replacer(s) for s in arr] else: arr = tz_replacer(arr) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e91cc8ec1e996..4607aba070cfc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -275,8 +275,8 @@ def match(to_match, values, na_sentinel=-1): # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series - result = Series(result.ravel()).replace(-1, na_sentinel).values.\ - reshape(result.shape) + result = Series(result.ravel()).replace(-1, na_sentinel) + result = result.values.reshape(result.shape) return result diff --git a/pandas/core/base.py b/pandas/core/base.py index 00c049497c0d8..91ae8375c233a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -395,8 +395,8 @@ def nested_renaming_depr(level=4): elif isinstance(obj, ABCSeries): nested_renaming_depr() - elif isinstance(obj, ABCDataFrame) and \ - k not in obj.columns: + elif (isinstance(obj, ABCDataFrame) and + k not in obj.columns): raise KeyError( "Column '{col}' does not exist!".format(col=k)) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bb82c531b698e..1158a025b1319 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5651,8 +5651,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, # fill in 2d chunks result = {col: s.fillna(method=method, value=value) for col, s in self.iteritems()} - new_obj = self._constructor.\ - from_dict(result).__finalize__(self) + prelim_obj = self._constructor.from_dict(result) + new_obj = prelim_obj.__finalize__(self) new_data = new_obj._data else: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f15b1203a334e..957f3be8cf6ae 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1027,8 +1027,9 @@ def nunique(self, dropna=True): try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes - assert val.dtype == object, \ - 'val.dtype must be object, got %s' % val.dtype + msg = ('val.dtype must be object, got {dtype}' + .format(dtype=val.dtype)) + assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 61dadd833be35..025be781d9ee8 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -578,8 +578,8 @@ def wrapper(*args, **kwargs): # a little trickery for aggregation functions that need an axis # argument kwargs_with_axis = kwargs.copy() - if 'axis' not in kwargs_with_axis or \ - kwargs_with_axis['axis'] is None: + if ('axis' not in kwargs_with_axis or + kwargs_with_axis['axis'] is None): kwargs_with_axis['axis'] = self.axis def curried_with_axis(x): @@ -1490,8 +1490,10 @@ def nth(self, n, dropna=None): self._set_group_selection() if not dropna: - mask = np.in1d(self._cumcount_array(), nth_values) | \ - np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values) + mask_left = np.in1d(self._cumcount_array(), nth_values) + mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, + -nth_values) + mask = mask_left | mask_right out = self._selected_obj[mask] if not self.as_index: @@ -1552,8 +1554,8 @@ def nth(self, n, dropna=None): result.loc[mask] = np.nan # reset/reindex to the original groups - if len(self.obj) == len(dropped) or \ - len(result) == len(self.grouper.result_index): + if (len(self.obj) == len(dropped) or + len(result) == len(self.grouper.result_index)): result.index = self.grouper.result_index else: result = result.reindex(self.grouper.result_index) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index e7144fb1d2932..1c8fe0e6cadad 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -157,8 +157,8 @@ def _set_grouper(self, obj, sort=False): if self.key is not None: key = self.key # The 'on' is already defined - if getattr(self.grouper, 'name', None) == key and \ - isinstance(obj, ABCSeries): + if (getattr(self.grouper, 'name', None) == key and + isinstance(obj, ABCSeries)): ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: @@ -530,9 +530,9 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, except Exception: all_in_columns_index = False - if not any_callable and not all_in_columns_index and \ - not any_arraylike and not any_groupers and \ - match_axis_length and level is None: + if (not any_callable and not all_in_columns_index and + not any_arraylike and not any_groupers and + match_axis_length and level is None): keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): @@ -593,15 +593,15 @@ def is_in_obj(gpr): # create the Grouping # allow us to passing the actual Grouping as the gpr - ping = Grouping(group_axis, - gpr, - obj=obj, - name=name, - level=level, - sort=sort, - observed=observed, - in_axis=in_axis) \ - if not isinstance(gpr, Grouping) else gpr + ping = (Grouping(group_axis, + gpr, + obj=obj, + name=name, + level=level, + sort=sort, + observed=observed, + in_axis=in_axis) + if not isinstance(gpr, Grouping) else gpr) groupings.append(ping) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index d9f7b4d9c31c3..b199127ac867b 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -521,8 +521,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, result = result.astype('float64') result[mask] = np.nan - if kind == 'aggregate' and \ - self._filter_empty_groups and not counts.all(): + if (kind == 'aggregate' and + self._filter_empty_groups and not counts.all()): if result.ndim == 2: try: result = lib.row_bool_subset( @@ -743,8 +743,9 @@ def group_info(self): else: comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) - return comp_ids.astype('int64', copy=False), \ - obs_group_ids.astype('int64', copy=False), ngroups + return (comp_ids.astype('int64', copy=False), + obs_group_ids.astype('int64', copy=False), + ngroups) @cache_readonly def ngroups(self): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b3c913f21dd86..0c6aaf4b46d6a 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1836,8 +1836,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): - if isinstance(key, compat.string_types) and \ - labels.levels[0].is_all_dates: + if (isinstance(key, compat.string_types) and + labels.levels[0].is_all_dates): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) @@ -1847,8 +1847,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels): # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): - if isinstance(component, compat.string_types) and \ - labels.levels[i].is_all_dates: + if (isinstance(component, compat.string_types) and + labels.levels[i].is_all_dates): new_key.append(slice(component, component, None)) else: new_key.append(component) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 6d67070000dcd..1fc9d961285be 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -184,8 +184,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): if len(values) and values[0] is None: fill_value = None - if getattr(self.block, 'is_datetimetz', False) or \ - is_datetimetz(empty_dtype): + if (getattr(self.block, 'is_datetimetz', False) or + is_datetimetz(empty_dtype)): if self.block is None: array = empty_dtype.construct_array_type() missing_arr = array([fill_value], dtype=empty_dtype) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b525dddeb1ba5..22e591e776a22 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -140,8 +140,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', margins_name=margins_name, fill_value=fill_value) # discard the top level - if values_passed and not values_multi and not table.empty and \ - (table.columns.nlevels > 1): + if (values_passed and not values_multi and not table.empty and + (table.columns.nlevels > 1)): table = table[values[0]] if len(index) == 0 and len(columns) > 0: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 50f6e310705d7..495e59d0882de 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -745,9 +745,8 @@ def check_len(item, name): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: - len_msg = \ - len_msg.format(name=name, len_item=len(item), - len_enc=data_to_encode.shape[1]) + len_msg = len_msg.format(name=name, len_item=len(item), + len_enc=data_to_encode.shape[1]) raise ValueError(len_msg) check_len(prefix, 'prefix') diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index eb8d2b0b6c809..dcba51d26980f 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -724,8 +724,9 @@ def calc_with_mask(carg, mask): result = np.empty(carg.shape, dtype='M8[ns]') iresult = result.view('i8') iresult[~mask] = tslibs.iNaT - result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \ - astype('M8[ns]') + + masked_result = calc(carg[mask].astype(np.float64).astype(np.int64)) + result[mask] = masked_result.astype('M8[ns]') return result # try intlike / strings that are ints diff --git a/pandas/core/window.py b/pandas/core/window.py index ea0ec79d655fb..7d48967602bc1 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -98,11 +98,11 @@ def is_freq_type(self): def validate(self): if self.center is not None and not is_bool(self.center): raise ValueError("center must be a boolean") - if self.min_periods is not None and not \ - is_integer(self.min_periods): + if (self.min_periods is not None and + not is_integer(self.min_periods)): raise ValueError("min_periods must be an integer") - if self.closed is not None and self.closed not in \ - ['right', 'both', 'left', 'neither']: + if (self.closed is not None and + self.closed not in ['right', 'both', 'left', 'neither']): raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'") diff --git a/pandas/io/common.py b/pandas/io/common.py index 405911eda7e9e..9bf7c5af2cd3a 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -430,8 +430,8 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, handles.append(f) # in Python 3, convert BytesIO or fileobjects passed with an encoding - if compat.PY3 and is_text and\ - (compression or isinstance(f, need_text_wrapping)): + if (compat.PY3 and is_text and + (compression or isinstance(f, need_text_wrapping))): from io import TextIOWrapper f = TextIOWrapper(f, encoding=encoding) handles.append(f) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 00b4c704c681b..c1cbccb7cbf1c 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -1755,14 +1755,14 @@ def convert(cls, style_dict, num_format_str=None): props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', 'thick', 'double', 'hair', 'mediumDashed', 'dashDot', 'mediumDashDot', 'dashDotDot', - 'mediumDashDotDot', 'slantDashDot'].\ - index(props[k]) + 'mediumDashDotDot', + 'slantDashDot'].index(props[k]) except ValueError: props[k] = 2 if isinstance(props.get('font_script'), string_types): - props['font_script'] = ['baseline', 'superscript', 'subscript'].\ - index(props['font_script']) + props['font_script'] = ['baseline', 'superscript', + 'subscript'].index(props['font_script']) if isinstance(props.get('underline'), string_types): props['underline'] = {'none': 0, 'single': 1, 'double': 2, diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f4bb53ba4f218..ad6ad5bcaf309 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -14,10 +14,9 @@ PackageLoader, Environment, ChoiceLoader, FileSystemLoader ) except ImportError: - msg = "pandas.Styler requires jinja2. "\ - "Please install with `conda install Jinja2`\n"\ - "or `pip install Jinja2`" - raise ImportError(msg) + raise ImportError("pandas.Styler requires jinja2. " + "Please install with `conda install Jinja2`\n" + "or `pip install Jinja2`") from pandas.core.dtypes.common import is_float, is_string_like diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 2846525adbe6b..ac73363b92b1e 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -40,9 +40,8 @@ def get_terminal_size(): if tuple_xy is None: tuple_xy = _get_terminal_size_tput() # needed for window's python in cygwin's xterm! - if current_os == 'Linux' or \ - current_os == 'Darwin' or \ - current_os.startswith('CYGWIN'): + if (current_os == 'Linux' or current_os == 'Darwin' or + current_os.startswith('CYGWIN')): tuple_xy = _get_terminal_size_linux() if tuple_xy is None: tuple_xy = (80, 25) # default value diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 03f0905d2023a..5c7b964cf69d1 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -250,11 +250,10 @@ def _recursive_extract(data, path, seen_meta, level=0): if errors == 'ignore': meta_val = np.nan else: - raise \ - KeyError("Try running with " - "errors='ignore' as key " - "{err} is not always present" - .format(err=e)) + raise KeyError("Try running with " + "errors='ignore' as key " + "{err} is not always present" + .format(err=e)) meta_vals[key].append(meta_val) records.extend(recs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2def3b81c9518..1edc6f6e14442 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -883,15 +883,15 @@ def _clean_options(self, options, engine): # C engine not supported yet if engine == 'c': if options['skipfooter'] > 0: - fallback_reason = "the 'c' engine does not support"\ - " skipfooter" + fallback_reason = ("the 'c' engine does not support" + " skipfooter") engine = 'python' encoding = sys.getfilesystemencoding() or 'utf-8' if sep is None and not delim_whitespace: if engine == 'c': - fallback_reason = "the 'c' engine does not support"\ - " sep=None with delim_whitespace=False" + fallback_reason = ("the 'c' engine does not support" + " sep=None with delim_whitespace=False") engine = 'python' elif sep is not None and len(sep) > 1: if engine == 'c' and sep == r'\s+': @@ -899,10 +899,10 @@ def _clean_options(self, options, engine): del result['delimiter'] elif engine not in ('python', 'python-fwf'): # wait until regex engine integrated - fallback_reason = "the 'c' engine does not support"\ - " regex separators (separators > 1 char and"\ - r" different from '\s+' are"\ - " interpreted as regex)" + fallback_reason = ("the 'c' engine does not support" + " regex separators (separators > 1 char and" + r" different from '\s+' are" + " interpreted as regex)") engine = 'python' elif delim_whitespace: if 'python' in engine: @@ -915,10 +915,10 @@ def _clean_options(self, options, engine): except UnicodeDecodeError: encodeable = False if not encodeable and engine not in ('python', 'python-fwf'): - fallback_reason = "the separator encoded in {encoding}" \ - " is > 1 char long, and the 'c' engine" \ - " does not support such separators".format( - encoding=encoding) + fallback_reason = ("the separator encoded in {encoding}" + " is > 1 char long, and the 'c' engine" + " does not support such separators" + .format(encoding=encoding)) engine = 'python' quotechar = options['quotechar'] @@ -3203,8 +3203,8 @@ def _clean_index_names(columns, index_col): index_names.append(name) # hack - if isinstance(index_names[0], compat.string_types)\ - and 'Unnamed' in index_names[0]: + if (isinstance(index_names[0], compat.string_types) and + 'Unnamed' in index_names[0]): index_names[0] = None return index_names, columns, index_col diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ff37036533b4f..d2b523461104c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1804,8 +1804,8 @@ def validate_metadata(self, handler): if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) - if new_metadata is not None and cur_metadata is not None \ - and not array_equivalent(new_metadata, cur_metadata): + if (new_metadata is not None and cur_metadata is not None and + not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing") diff --git a/pandas/io/stata.py b/pandas/io/stata.py index a321e315f5225..68b2182c2ff07 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -442,8 +442,8 @@ def parse_dates_safe(dates, delta=False, year=False, days=False): conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) - conv_dates = 2 * (d.year - stata_epoch.year) + \ - (d.month > 6).astype(np.int) + conv_dates = (2 * (d.year - stata_epoch.year) + + (d.month > 6).astype(np.int)) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year @@ -568,16 +568,18 @@ def _cast_to_stata_types(data): elif dtype in (np.float32, np.float64): value = data[col].max() if np.isinf(value): - msg = 'Column {0} has a maximum value of infinity which is ' \ - 'outside the range supported by Stata.' - raise ValueError(msg.format(col)) + raise ValueError('Column {col} has a maximum value of ' + 'infinity which is outside the range ' + 'supported by Stata.'.format(col=col)) if dtype == np.float32 and value > float32_max: data[col] = data[col].astype(np.float64) elif dtype == np.float64: if value > float64_max: - msg = 'Column {0} has a maximum value ({1}) outside the ' \ - 'range supported by Stata ({1})' - raise ValueError(msg.format(col, value, float64_max)) + raise ValueError('Column {col} has a maximum value ' + '({val}) outside the range supported by ' + 'Stata ({float64_max})' + .format(col=col, val=value, + float64_max=float64_max)) if ws: warnings.warn(ws, PossiblePrecisionLoss) @@ -1704,9 +1706,10 @@ def _do_convert_categoricals(self, data, value_label_dict, lbllist, vc = Series(categories).value_counts() repeats = list(vc.index[vc > 1]) repeats = '\n' + '-' * 80 + '\n'.join(repeats) - msg = 'Value labels for column {0} are not unique. The ' \ - 'repeated labels are:\n{1}'.format(col, repeats) - raise ValueError(msg) + raise ValueError('Value labels for column {col} are not ' + 'unique. The repeated labels are:\n' + '{repeats}' + .format(col=col, repeats=repeats)) # TODO: is the next line needed above in the data(...) method? cat_data = Series(cat_data, index=data.index) cat_converted_data.append((col, cat_data)) @@ -2066,8 +2069,8 @@ def _check_column_names(self, data): name = text_type(name) for c in name: - if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \ - (c < '0' or c > '9') and c != '_': + if ((c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and + (c < '0' or c > '9') and c != '_'): name = name.replace(c, '_') # Variable name must not be a reserved word diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index 385d4d7f047c7..ff5351bb6c6ea 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -12,9 +12,9 @@ from pandas.plotting._style import plot_params from pandas.plotting._tools import table try: - from pandas.plotting._converter import \ - register as register_matplotlib_converters - from pandas.plotting._converter import \ - deregister as deregister_matplotlib_converters + from pandas.plotting._converter import ( + register as register_matplotlib_converters) + from pandas.plotting._converter import ( + deregister as deregister_matplotlib_converters) except ImportError: pass
Related: #11954. This gets most of the non-`textwrap.dedent` cases.
https://api.github.com/repos/pandas-dev/pandas/pulls/23073
2018-10-10T14:19:39Z
2018-10-12T13:12:16Z
2018-10-12T13:12:16Z
2018-10-12T15:44:17Z
Fixing memory leaks in read_csv
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index bb02bbb36424a..b9abf9293079f 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1382,6 +1382,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`) - Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`). - Bug in :meth:`HDFStore.append` when appending a :class:`DataFrame` with an empty string column and ``min_itemsize`` < 8 (:issue:`12242`) +- Bug in :func:`read_csv()` in which memory leaks occurred in the C engine when parsing ``NaN`` values due to insufficient cleanup on completion or error (:issue:`21353`) - Bug in :func:`read_csv()` in which incorrect error messages were being raised when ``skipfooter`` was passed in along with ``nrows``, ``iterator``, or ``chunksize`` (:issue:`23711`) - Bug in :meth:`read_csv()` in which :class:`MultiIndex` index names were being improperly handled in the cases when they were not provided (:issue:`23484`) - Bug in :meth:`read_html()` in which the error message was not displaying the valid flavors when an invalid one was provided (:issue:`23549`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 40aa03caa56eb..f74de79542628 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1070,18 +1070,6 @@ cdef class TextReader: conv = self._get_converter(i, name) - # XXX - na_flist = set() - if self.na_filter: - na_list, na_flist = self._get_na_list(i, name) - if na_list is None: - na_filter = 0 - else: - na_filter = 1 - na_hashset = kset_from_list(na_list) - else: - na_filter = 0 - col_dtype = None if self.dtype is not None: if isinstance(self.dtype, dict): @@ -1106,13 +1094,34 @@ cdef class TextReader: self.c_encoding) continue - # Should return as the desired dtype (inferred or specified) - col_res, na_count = self._convert_tokens( - i, start, end, name, na_filter, na_hashset, - na_flist, col_dtype) + # Collect the list of NaN values associated with the column. + # If we aren't supposed to do that, or none are collected, + # we set `na_filter` to `0` (`1` otherwise). + na_flist = set() + + if self.na_filter: + na_list, na_flist = self._get_na_list(i, name) + if na_list is None: + na_filter = 0 + else: + na_filter = 1 + na_hashset = kset_from_list(na_list) + else: + na_filter = 0 - if na_filter: - self._free_na_set(na_hashset) + # Attempt to parse tokens and infer dtype of the column. + # Should return as the desired dtype (inferred or specified). + try: + col_res, na_count = self._convert_tokens( + i, start, end, name, na_filter, na_hashset, + na_flist, col_dtype) + finally: + # gh-21353 + # + # Cleanup the NaN hash that we generated + # to avoid memory leaks. + if na_filter: + self._free_na_set(na_hashset) if upcast_na and na_count > 0: col_res = _maybe_upcast(col_res) @@ -2059,6 +2068,7 @@ cdef kh_str_t* kset_from_list(list values) except NULL: # None creeps in sometimes, which isn't possible here if not isinstance(val, bytes): + kh_destroy_str(table) raise ValueError('Must be all encoded bytes') k = kh_put_str(table, PyBytes_AsString(val), &ret)
This PR fixes a memory leak in parsers.pyx detected by valgrind, and also adds some further cleanup that should avoid memory leaks on exceptions, closes #21353 - Moved the allocation of na_hashset further down, closer to where it is used. Otherwise it will not be freed if `continue` is executed, - Delete `na_hashset` if there is an exception, - Also clean up the allocation inside `kset_from_list` before raising an exception.
https://api.github.com/repos/pandas-dev/pandas/pulls/23072
2018-10-10T08:33:17Z
2018-11-19T12:09:41Z
2018-11-19T12:09:40Z
2020-07-23T13:45:25Z
Corrected 'columns' argument of 'to_csv' method
diff --git a/doc/source/io.rst b/doc/source/io.rst index 039cba2993381..56da4dbea8706 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1603,7 +1603,7 @@ function takes a number of arguments. Only the first is required. * ``sep`` : Field delimiter for the output file (default ",") * ``na_rep``: A string representation of a missing value (default '') * ``float_format``: Format string for floating point numbers -* ``cols``: Columns to write (default None) +* ``columns``: Columns to write (default None) * ``header``: Whether to write out the column names (default True) * ``index``: whether to write row (index) names (default True) * ``index_label``: Column label(s) for index column(s) if desired. If None
Compared the 'to_csv' method described in https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html with the same method described in https://pandas.pydata.org/pandas-docs/stable/io.html#writing-to-csv-format and noticed difference between the two in the 'columns' and 'cols' argument. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23068
2018-10-10T02:46:23Z
2018-10-10T16:46:32Z
2018-10-10T16:46:32Z
2018-10-10T16:46:44Z
Add allow_sets-kwarg to is_list_like
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 16f0b9ee99909..d786711ffa6ea 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -198,6 +198,8 @@ Other Enhancements - :meth:`round`, :meth:`ceil`, and meth:`floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`) - :class:`Resampler` now is iterable like :class:`GroupBy` (:issue:`15314`). - :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`Resampler.quantile` (:issue:`15023`). +- :meth:`pandas.core.dtypes.is_list_like` has gained a keyword ``allow_sets`` which is ``True`` by default; if ``False``, + all instances of ``set`` will not be considered "list-like" anymore (:issue:`23061`) - :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`). - New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`). - Compatibility with Matplotlib 3.0 (:issue:`22790`). diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 1453725225e7d..5108e23c53b5a 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -141,6 +141,7 @@ def lfilter(*args, **kwargs): Mapping = collections.abc.Mapping Sequence = collections.abc.Sequence Sized = collections.abc.Sized + Set = collections.abc.Set else: # Python 2 @@ -201,6 +202,7 @@ def get_range_parameters(data): Mapping = collections.Mapping Sequence = collections.Sequence Sized = collections.Sized + Set = collections.Set if PY2: def iteritems(obj, **kw): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 22da546355df6..af5e1523c7cec 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -16,10 +16,10 @@ ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass, ABCDateOffset) from pandas.core.dtypes.inference import ( # noqa:F401 - is_bool, is_integer, is_hashable, is_iterator, is_float, - is_dict_like, is_scalar, is_string_like, is_list_like, is_number, - is_file_like, is_re, is_re_compilable, is_sequence, is_nested_list_like, - is_named_tuple, is_array_like, is_decimal, is_complex, is_interval) + is_bool, is_integer, is_float, is_number, is_decimal, is_complex, + is_re, is_re_compilable, is_dict_like, is_string_like, is_file_like, + is_list_like, is_nested_list_like, is_sequence, is_named_tuple, + is_hashable, is_iterator, is_array_like, is_scalar, is_interval) _POSSIBLY_CAST_DTYPES = {np.dtype(t).name for t in ['O', 'int8', 'uint8', 'int16', 'uint16', diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 67f391615eedb..7470497383064 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -5,7 +5,7 @@ from numbers import Number from pandas import compat from pandas.compat import (PY2, string_types, text_type, - string_and_binary_types, re_type) + string_and_binary_types, re_type, Set) from pandas._libs import lib is_bool = lib.is_bool @@ -247,7 +247,7 @@ def is_re_compilable(obj): return True -def is_list_like(obj): +def is_list_like(obj, allow_sets=True): """ Check if the object is list-like. @@ -259,6 +259,10 @@ def is_list_like(obj): Parameters ---------- obj : The object to check. + allow_sets : boolean, default True + If this parameter is False, sets will not be considered list-like + + .. versionadded:: 0.24.0 Returns ------- @@ -283,11 +287,15 @@ def is_list_like(obj): False """ - return (isinstance(obj, compat.Iterable) and + return (isinstance(obj, compat.Iterable) # we do not count strings/unicode/bytes as list-like - not isinstance(obj, string_and_binary_types) and + and not isinstance(obj, string_and_binary_types) + # exclude zero-dimensional numpy arrays, effectively scalars - not (isinstance(obj, np.ndarray) and obj.ndim == 0)) + and not (isinstance(obj, np.ndarray) and obj.ndim == 0) + + # exclude sets if allow_sets is False + and not (allow_sets is False and isinstance(obj, Set))) def is_array_like(obj): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 76cd6aabb93ae..d0dd03d6eb8df 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -47,6 +47,70 @@ def coerce(request): return request.param +# collect all objects to be tested for list-like-ness; use tuples of objects, +# whether they are list-like or not (special casing for sets), and their ID +ll_params = [ + ([1], True, 'list'), # noqa: E241 + ([], True, 'list-empty'), # noqa: E241 + ((1, ), True, 'tuple'), # noqa: E241 + (tuple(), True, 'tuple-empty'), # noqa: E241 + ({'a': 1}, True, 'dict'), # noqa: E241 + (dict(), True, 'dict-empty'), # noqa: E241 + ({'a', 1}, 'set', 'set'), # noqa: E241 + (set(), 'set', 'set-empty'), # noqa: E241 + (frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241 + (frozenset([]), 'set', 'frozenset-empty'), # noqa: E241 + (iter([1, 2]), True, 'iterator'), # noqa: E241 + (iter([]), True, 'iterator-empty'), # noqa: E241 + ((x for x in [1, 2]), True, 'generator'), # noqa: E241 + ((x for x in []), True, 'generator-empty'), # noqa: E241 + (Series([1]), True, 'Series'), # noqa: E241 + (Series([]), True, 'Series-empty'), # noqa: E241 + (Series(['a']).str, True, 'StringMethods'), # noqa: E241 + (Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241 + (Index([1]), True, 'Index'), # noqa: E241 + (Index([]), True, 'Index-empty'), # noqa: E241 + (DataFrame([[1]]), True, 'DataFrame'), # noqa: E241 + (DataFrame(), True, 'DataFrame-empty'), # noqa: E241 + (np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241 + (np.array([]), True, 'ndarray-1d-empty'), # noqa: E241 + (np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241 + (np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241 + (np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241 + (np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241 + (np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241 + (np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241 + (np.array(2), False, 'ndarray-0d'), # noqa: E241 + (1, False, 'int'), # noqa: E241 + (b'123', False, 'bytes'), # noqa: E241 + (b'', False, 'bytes-empty'), # noqa: E241 + ('123', False, 'string'), # noqa: E241 + ('', False, 'string-empty'), # noqa: E241 + (str, False, 'string-type'), # noqa: E241 + (object(), False, 'object'), # noqa: E241 + (np.nan, False, 'NaN'), # noqa: E241 + (None, False, 'None') # noqa: E241 +] +objs, expected, ids = zip(*ll_params) + + +@pytest.fixture(params=zip(objs, expected), ids=ids) +def maybe_list_like(request): + return request.param + + +def test_is_list_like(maybe_list_like): + obj, expected = maybe_list_like + expected = True if expected == 'set' else expected + assert inference.is_list_like(obj) == expected + + +def test_is_list_like_disallow_sets(maybe_list_like): + obj, expected = maybe_list_like + expected = False if expected == 'set' else expected + assert inference.is_list_like(obj, allow_sets=False) == expected + + def test_is_sequence(): is_seq = inference.is_sequence assert (is_seq((1, 2))) @@ -63,23 +127,6 @@ def __getitem__(self): assert (not is_seq(A())) -@pytest.mark.parametrize( - "ll", - [ - [], [1], (1, ), (1, 2), {'a': 1}, - {1, 'a'}, Series([1]), - Series([]), Series(['a']).str, - np.array([2])]) -def test_is_list_like_passes(ll): - assert inference.is_list_like(ll) - - -@pytest.mark.parametrize( - "ll", [1, '2', object(), str, np.array(2)]) -def test_is_list_like_fails(ll): - assert not inference.is_list_like(ll) - - def test_is_array_like(): assert inference.is_array_like(Series([])) assert inference.is_array_like(Series([1, 2]))
- [x] closes ~~#23009~~ #23061 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This is an attempt responding to https://github.com/pandas-dev/pandas/pull/22486#issuecomment-428159769: > @h-vetinari why don't you try (separate PR) excluding set from is_list_like and see what the implications of that are. Following some initial discussion in #23061, I decided to go with a variant that does not break anything - i.e. adding a keyword which defaults to the current behaviour. I've added a warning that's only raised if necessary, to note that this behaviour will be changed in the future -- regardless of whether it is deprecated or not, I think that users *as well as developers* should have to actively choose to include unordered sets (reason i.a. for #23009, and probably some more). The tedious part of this PR was hunting down all the internal uses of `is_list_like` and adding the kwarg there to avoid raising the warning. Hope I didn't miss any.
https://api.github.com/repos/pandas-dev/pandas/pulls/23065
2018-10-09T21:37:23Z
2018-10-18T15:53:33Z
2018-10-18T15:53:33Z
2018-10-22T14:16:59Z
DEPS: drop numpy < 1.12
diff --git a/ci/azure-macos-35.yaml b/ci/azure-macos-35.yaml index a36f748ded812..6ccdc79d11b27 100644 --- a/ci/azure-macos-35.yaml +++ b/ci/azure-macos-35.yaml @@ -8,10 +8,10 @@ dependencies: - html5lib - jinja2 - lxml - - matplotlib + - matplotlib=2.2.0 - nomkl - numexpr - - numpy=1.10.4 + - numpy=1.12.0 - openpyxl=2.5.5 - pytables - python=3.5* diff --git a/ci/azure-windows-27.yaml b/ci/azure-windows-27.yaml index bcd9ddee1715e..d48a9ba986a93 100644 --- a/ci/azure-windows-27.yaml +++ b/ci/azure-windows-27.yaml @@ -10,7 +10,7 @@ dependencies: - html5lib - jinja2=2.8 - lxml - - matplotlib + - matplotlib=2.0.1 - numexpr - numpy=1.12* - openpyxl=2.5.5 diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml index 9bfaef04ea2fa..fb10d89731f26 100644 --- a/ci/azure/macos.yml +++ b/ci/azure/macos.yml @@ -9,7 +9,7 @@ jobs: strategy: maxParallel: 11 matrix: - py35_np_110: + py35_np_120: ENV_FILE: ci/azure-macos-35.yaml CONDA_PY: "35" CONDA_ENV: pandas diff --git a/ci/azure/windows-py27.yml b/ci/azure/windows-py27.yml index 10251bc03b8dc..8718cc849b7a8 100644 --- a/ci/azure/windows-py27.yml +++ b/ci/azure/windows-py27.yml @@ -9,7 +9,7 @@ jobs: strategy: maxParallel: 11 matrix: - py36_np14: + py36_np121: ENV_FILE: ci/azure-windows-27.yaml CONDA_PY: "27" CONDA_ENV: pandas diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml index 84ec7e20fc8f1..5b726304cf414 100644 --- a/ci/circle-27-compat.yaml +++ b/ci/circle-27-compat.yaml @@ -3,18 +3,18 @@ channels: - defaults - conda-forge dependencies: - - bottleneck=1.0.0 + - bottleneck=1.2.0 - cython=0.28.2 - jinja2=2.8 - - numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr - - numpy=1.9.3 + - numexpr=2.6.1 + - numpy=1.12.0 - openpyxl=2.5.5 - psycopg2 - - pytables=3.2.2 + - pytables=3.4.2 - python-dateutil=2.5.0 - python=2.7* - pytz=2013b - - scipy=0.14.0 + - scipy=0.18.1 - sqlalchemy=0.7.8 - xlrd=0.9.2 - xlsxwriter=0.5.2 diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt index 376fdb1e14e3a..e9afd7a551b6e 100644 --- a/ci/requirements-optional-conda.txt +++ b/ci/requirements-optional-conda.txt @@ -1,6 +1,6 @@ beautifulsoup4>=4.2.1 blosc -bottleneck +bottleneck>=1.2.0 fastparquet feather-format gcsfs @@ -9,17 +9,17 @@ ipython>=5.6.0 ipykernel jinja2 lxml -matplotlib +matplotlib>=2.0.0 nbsphinx -numexpr +numexpr>=2.6.1 openpyxl=2.5.5 pyarrow pymysql -pytables +pytables>=3.4.2 pytest-cov pytest-xdist s3fs -scipy +scipy>=0.18.1 seaborn sqlalchemy statsmodels diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 09ce8e59a3b46..ebe0c4ca88ee6 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -2,7 +2,7 @@ # Do not modify directly beautifulsoup4>=4.2.1 blosc -bottleneck +bottleneck>=1.2.0 fastparquet feather-format gcsfs @@ -11,9 +11,9 @@ ipython>=5.6.0 ipykernel jinja2 lxml -matplotlib +matplotlib>=2.0.0 nbsphinx -numexpr +numexpr>=2.6.1 openpyxl==2.5.5 pyarrow pymysql @@ -21,7 +21,7 @@ tables pytest-cov pytest-xdist s3fs -scipy +scipy>=0.18.1 seaborn sqlalchemy statsmodels diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml index aca65f27d4187..dc5580ae6d287 100644 --- a/ci/travis-27-locale.yaml +++ b/ci/travis-27-locale.yaml @@ -3,11 +3,11 @@ channels: - defaults - conda-forge dependencies: - - bottleneck=1.0.0 + - bottleneck=1.2.0 - cython=0.28.2 - lxml - - matplotlib=1.4.3 - - numpy=1.9.3 + - matplotlib=2.0.0 + - numpy=1.12.0 - openpyxl=2.4.0 - python-dateutil - python-blosc diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml index cc0c5a3192188..f079ac309b97c 100644 --- a/ci/travis-27.yaml +++ b/ci/travis-27.yaml @@ -14,7 +14,7 @@ dependencies: - jemalloc=4.5.0.post - jinja2=2.8 - lxml - - matplotlib + - matplotlib=2.2.2 - mock - nomkl - numexpr diff --git a/doc/source/install.rst b/doc/source/install.rst index 7a846c817aee2..843384b680cf8 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -225,7 +225,7 @@ Dependencies ------------ * `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 24.2.0 or higher -* `NumPy <http://www.numpy.org>`__: 1.9.0 or higher +* `NumPy <http://www.numpy.org>`__: 1.12.0 or higher * `python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher * `pytz <http://pytz.sourceforge.net/>`__ @@ -236,11 +236,11 @@ Recommended Dependencies * `numexpr <https://github.com/pydata/numexpr>`__: for accelerating certain numerical operations. ``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups. - If installed, must be Version 2.4.6 or higher. + If installed, must be Version 2.6.1 or higher. * `bottleneck <https://github.com/kwgoodman/bottleneck>`__: for accelerating certain types of ``nan`` evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed, - must be Version 1.0.0 or higher. + must be Version 1.2.0 or higher. .. note:: @@ -255,9 +255,9 @@ Optional Dependencies * `Cython <http://www.cython.org>`__: Only necessary to build development version. Version 0.28.2 or higher. -* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.14.0 or higher +* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.18.1 or higher * `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended. -* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended. +* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher * `Feather Format <https://github.com/wesm/feather>`__: necessary for feather-based storage, version 0.3.1 or higher. * `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.4.1) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.0.6) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support. * `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are: @@ -266,7 +266,7 @@ Optional Dependencies * `pymysql <https://github.com/PyMySQL/PyMySQL>`__: for MySQL. * `SQLite <https://docs.python.org/3/library/sqlite3.html>`__: for SQLite, this is included in Python's standard library by default. -* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 1.4.3 or higher. +* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 2.0.0 or higher. * For Excel I/O: * `xlrd/xlwt <http://www.python-excel.org/>`__: Excel reading (xlrd) and writing (xlwt) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 8a7ff4be78a8a..3053625721560 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -206,8 +206,32 @@ Other Enhancements Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + - A newly constructed empty :class:`DataFrame` with integer as the ``dtype`` will now only be cast to ``float64`` if ``index`` is specified (:issue:`22858`) +.. _whatsnew_0240.api_breaking.deps: + +Dependencies have increased minimum versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We have updated our minimum supported versions of dependencies (:issue:`21242`). +If installed, we now require: + ++-----------------+-----------------+----------+ +| Package | Minimum Version | Required | ++=================+=================+==========+ +| numpy | 1.12.0 | X | ++-----------------+-----------------+----------+ +| bottleneck | 1.2.0 | | ++-----------------+-----------------+----------+ +| matplotlib | 2.0.0 | | ++-----------------+-----------------+----------+ +| numexpr | 2.6.1 | | ++-----------------+-----------------+----------+ +| pytables | 3.4.2 | | ++-----------------+-----------------+----------+ +| scipy | 0.18.1 | | ++-----------------+-----------------+----------+ .. _whatsnew_0240.api_breaking.interval_values: diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index d4e3def84664b..5e67cf2ee2837 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -9,19 +9,16 @@ # numpy versioning _np_version = np.__version__ _nlv = LooseVersion(_np_version) -_np_version_under1p10 = _nlv < LooseVersion('1.10') -_np_version_under1p11 = _nlv < LooseVersion('1.11') -_np_version_under1p12 = _nlv < LooseVersion('1.12') _np_version_under1p13 = _nlv < LooseVersion('1.13') _np_version_under1p14 = _nlv < LooseVersion('1.14') _np_version_under1p15 = _nlv < LooseVersion('1.15') -if _nlv < '1.9': +if _nlv < '1.12': raise ImportError('this version of pandas is incompatible with ' - 'numpy < 1.9.0\n' + 'numpy < 1.12.0\n' 'your numpy version is {0}.\n' - 'Please upgrade numpy to >= 1.9.0 to use ' + 'Please upgrade numpy to >= 1.12.0 to use ' 'this pandas version'.format(_np_version)) @@ -43,9 +40,7 @@ def np_datetime64_compat(s, *args, **kwargs): tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ - - if not _np_version_under1p11: - s = tz_replacer(s) + s = tz_replacer(s) return np.datetime64(s, *args, **kwargs) @@ -56,23 +51,17 @@ def np_array_datetime64_compat(arr, *args, **kwargs): tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ - - if not _np_version_under1p11: - - # is_list_like - if (hasattr(arr, '__iter__') and - not isinstance(arr, string_and_binary_types)): - arr = [tz_replacer(s) for s in arr] - else: - arr = tz_replacer(arr) + # is_list_like + if (hasattr(arr, '__iter__') + and not isinstance(arr, string_and_binary_types)): + arr = [tz_replacer(s) for s in arr] + else: + arr = tz_replacer(arr) return np.array(arr, *args, **kwargs) __all__ = ['np', - '_np_version_under1p10', - '_np_version_under1p11', - '_np_version_under1p12', '_np_version_under1p13', '_np_version_under1p14', '_np_version_under1p15' diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 4607aba070cfc..cb9ffc4bd0fd5 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -30,7 +30,6 @@ ensure_platform_int, ensure_object, ensure_float64, ensure_uint64, ensure_int64) -from pandas.compat.numpy import _np_version_under1p10 from pandas.core.dtypes.missing import isna, na_value_for_dtype from pandas.core import common as com @@ -910,26 +909,12 @@ def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ - def _broadcast(arr_or_scalar, shape): - """ - Helper function to broadcast arrays / scalars to the desired shape. - """ - if _np_version_under1p10: - if is_scalar(arr_or_scalar): - out = np.empty(shape) - out.fill(arr_or_scalar) - else: - out = arr_or_scalar - else: - out = np.broadcast_to(arr_or_scalar, shape) - return out - # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. - b2 = _broadcast(b, arr.shape) + b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. - b2_mask = _broadcast(b_mask, arr.shape) + b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py index 2a9ed0fb9764d..06f72bb36de5c 100644 --- a/pandas/core/computation/check.py +++ b/pandas/core/computation/check.py @@ -2,7 +2,7 @@ from distutils.version import LooseVersion _NUMEXPR_INSTALLED = False -_MIN_NUMEXPR_VERSION = "2.4.6" +_MIN_NUMEXPR_VERSION = "2.6.1" try: import numexpr as ne diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 640b2812d3e85..aff6f17fba2e2 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1805,12 +1805,7 @@ def to_series(right): elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns - try: - right = np.broadcast_to(right, left.shape) - except AttributeError: - # numpy < 1.10.0 - right = np.tile(right, (1, left.shape[1])) - + right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py index 5032b259e9831..385e88d58cc26 100644 --- a/pandas/plotting/_compat.py +++ b/pandas/plotting/_compat.py @@ -18,15 +18,8 @@ def inner(): return inner -_mpl_ge_1_2_1 = _mpl_version('1.2.1', operator.ge) -_mpl_le_1_2_1 = _mpl_version('1.2.1', operator.le) -_mpl_ge_1_3_1 = _mpl_version('1.3.1', operator.ge) -_mpl_ge_1_4_0 = _mpl_version('1.4.0', operator.ge) -_mpl_ge_1_4_1 = _mpl_version('1.4.1', operator.ge) -_mpl_ge_1_5_0 = _mpl_version('1.5.0', operator.ge) -_mpl_ge_2_0_0 = _mpl_version('2.0.0', operator.ge) -_mpl_le_2_0_0 = _mpl_version('2.0.0', operator.le) _mpl_ge_2_0_1 = _mpl_version('2.0.1', operator.ge) _mpl_ge_2_1_0 = _mpl_version('2.1.0', operator.ge) _mpl_ge_2_2_0 = _mpl_version('2.2.0', operator.ge) +_mpl_ge_2_2_2 = _mpl_version('2.2.2', operator.ge) _mpl_ge_3_0_0 = _mpl_version('3.0.0', operator.ge) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 96ea8a542a451..fe773a6054db5 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -35,8 +35,6 @@ from pandas.tseries.frequencies import FreqGroup from pandas.core.indexes.period import Period, PeriodIndex -from pandas.plotting._compat import _mpl_le_2_0_0 - # constants HOURS_PER_DAY = 24. MIN_PER_HOUR = 60. @@ -371,13 +369,6 @@ def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): if self._tz is dates.UTC: self._tz._utcoffset = self._tz.utcoffset(None) - # For mpl > 2.0 the format strings are controlled via rcparams - # so do not mess with them. For mpl < 2.0 change the second - # break point and add a musec break point - if _mpl_le_2_0_0(): - self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S' - self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f' - class PandasAutoDateLocator(dates.AutoDateLocator): diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 77c97412bd3d7..405c534e8528b 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -30,10 +30,7 @@ from pandas.io.formats.printing import pprint_thing -from pandas.plotting._compat import (_mpl_ge_1_3_1, - _mpl_ge_1_5_0, - _mpl_ge_2_0_0, - _mpl_ge_3_0_0) +from pandas.plotting._compat import _mpl_ge_3_0_0 from pandas.plotting._style import (plot_params, _get_standard_colors) from pandas.plotting._tools import (_subplots, _flatten, table, @@ -551,14 +548,6 @@ def plt(self): import matplotlib.pyplot as plt return plt - @staticmethod - def mpl_ge_1_3_1(): - return _mpl_ge_1_3_1() - - @staticmethod - def mpl_ge_1_5_0(): - return _mpl_ge_1_5_0() - _need_to_set_index = False def _get_xticks(self, convert_period=False): @@ -908,8 +897,7 @@ def _make_plot(self): scatter = ax.scatter(data[x].values, data[y].values, c=c_values, label=label, cmap=cmap, **self.kwds) if cb: - if self.mpl_ge_1_3_1(): - cbar_label = c if c_is_column else '' + cbar_label = c if c_is_column else '' self._plot_colorbar(ax, label=cbar_label) if label is not None: @@ -1012,10 +1000,9 @@ def _make_plot(self): **kwds) self._add_legend_handle(newlines[0], label, index=i) - if not _mpl_ge_2_0_0(): - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) @classmethod def _plot(cls, ax, x, y, style=None, column_num=None, @@ -1141,8 +1128,7 @@ def _plot(cls, ax, x, y, style=None, column_num=None, # need to remove label, because subplots uses mpl legend as it is line_kwds = kwds.copy() - if cls.mpl_ge_1_5_0(): - line_kwds.pop('label') + line_kwds.pop('label') lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) # get data from the line to get coordinates for fill_between @@ -1165,19 +1151,9 @@ def _plot(cls, ax, x, y, style=None, column_num=None, cls._update_stacker(ax, stacking_id, y) # LinePlot expects list of artists - res = [rect] if cls.mpl_ge_1_5_0() else lines + res = [rect] return res - def _add_legend_handle(self, handle, label, index=None): - if not self.mpl_ge_1_5_0(): - from matplotlib.patches import Rectangle - # Because fill_between isn't supported in legend, - # specifically add Rectangle handle here - alpha = self.kwds.get('alpha', None) - handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), - alpha=alpha) - LinePlot._add_legend_handle(self, handle, label, index=index) - def _post_plot_logic(self, ax, data): LinePlot._post_plot_logic(self, ax, data) diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index c72e092c73aa2..9bc12d22e1685 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -4,14 +4,12 @@ import warnings from contextlib import contextmanager -import re import numpy as np from pandas.core.dtypes.common import is_list_like from pandas.compat import lrange, lmap import pandas.compat as compat -from pandas.plotting._compat import _mpl_ge_2_0_0 def _get_standard_colors(num_colors=None, colormap=None, color_type='default', @@ -72,18 +70,9 @@ def _maybe_valid_colors(colors): # check whether each character can be convertible to colors maybe_color_cycle = _maybe_valid_colors(list(colors)) if maybe_single_color and maybe_color_cycle and len(colors) > 1: - # Special case for single str 'CN' match and convert to hex - # for supporting matplotlib < 2.0.0 - if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0(): - hex_color = [c['color'] - for c in list(plt.rcParams['axes.prop_cycle'])] - colors = [hex_color[int(colors[1])]] - else: - # this may no longer be required - msg = ("'{0}' can be parsed as both single color and " - "color cycle. Specify each color using a list " - "like ['{0}'] or {1}") - raise ValueError(msg.format(colors, list(colors))) + hex_color = [c['color'] + for c in list(plt.rcParams['axes.prop_cycle'])] + colors = [hex_color[int(colors[1])]] elif maybe_single_color: colors = [colors] else: diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index d81ab2b3a2ec3..fe98b74499983 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -15,9 +15,7 @@ import pandas.core.indexes.period as period from pandas.core import ops -from pandas import ( - Period, PeriodIndex, period_range, Series, - _np_version_under1p10) +from pandas import Period, PeriodIndex, period_range, Series # ------------------------------------------------------------------ @@ -897,20 +895,14 @@ def test_pi_ops_errors(self, ng): with pytest.raises(TypeError): np.add(obj, ng) - if _np_version_under1p10: - assert np.add(ng, obj) is NotImplemented - else: - with pytest.raises(TypeError): - np.add(ng, obj) + with pytest.raises(TypeError): + np.add(ng, obj) with pytest.raises(TypeError): np.subtract(obj, ng) - if _np_version_under1p10: - assert np.subtract(ng, obj) is NotImplemented - else: - with pytest.raises(TypeError): - np.subtract(ng, obj) + with pytest.raises(TypeError): + np.subtract(ng, obj) def test_pi_ops_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], @@ -1014,10 +1006,7 @@ def test_pi_sub_period(self): tm.assert_index_equal(result, exp) result = np.subtract(pd.Period('2012-01', freq='M'), idx) - if _np_version_under1p10: - assert result is NotImplemented - else: - tm.assert_index_equal(result, exp) + tm.assert_index_equal(result, exp) exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx') tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 8864e5fffeb12..b83fba7e7b277 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -15,7 +15,6 @@ from pandas.compat import lrange, PY35 from pandas import (compat, isna, notna, DataFrame, Series, MultiIndex, date_range, Timestamp, Categorical, - _np_version_under1p12, to_datetime, to_timedelta) import pandas as pd import pandas.core.nanops as nanops @@ -2021,9 +2020,6 @@ def test_dot(self): @pytest.mark.skipif(not PY35, reason='matmul supported for Python>=3.5') - @pytest.mark.xfail( - _np_version_under1p12, - reason="unpredictable return types under numpy < 1.12") def test_matmul(self): # matmul test is for GH 10259 a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py index 2f264874378bc..3dbac79fed02b 100644 --- a/pandas/tests/frame/test_quantile.py +++ b/pandas/tests/frame/test_quantile.py @@ -6,7 +6,7 @@ import pytest import numpy as np -from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11) +from pandas import DataFrame, Series, Timestamp import pandas as pd from pandas.util.testing import assert_series_equal, assert_frame_equal @@ -154,12 +154,8 @@ def test_quantile_interpolation(self): result = df.quantile([.25, .5], interpolation='midpoint') # https://github.com/numpy/numpy/issues/7163 - if _np_version_under1p11: - expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]], - index=[.25, .5], columns=['a', 'b', 'c']) - else: - expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], - index=[.25, .5], columns=['a', 'b', 'c']) + expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], + index=[.25, .5], columns=['a', 'b', 'c']) assert_frame_equal(result, expected) def test_quantile_multi(self): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index b60b222d095b9..9ce77326d37b7 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -7,8 +7,7 @@ import pandas._libs.tslib as tslib import pandas.util.testing as tm from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, - date_range, _np_version_under1p10, Index, - bdate_range) + date_range, bdate_range, Index) from pandas.tseries.offsets import BMonthEnd, CDay, BDay, Day, Hour from pandas.tests.test_base import Ops from pandas.core.dtypes.generic import ABCDateOffset @@ -89,12 +88,11 @@ def test_numpy_minmax(self): assert np.argmin(dr) == 0 assert np.argmax(dr) == 5 - if not _np_version_under1p10: - errmsg = "the 'out' parameter is not supported" - tm.assert_raises_regex( - ValueError, errmsg, np.argmin, dr, out=0) - tm.assert_raises_regex( - ValueError, errmsg, np.argmax, dr, out=0) + errmsg = "the 'out' parameter is not supported" + tm.assert_raises_regex( + ValueError, errmsg, np.argmin, dr, out=0) + tm.assert_raises_regex( + ValueError, errmsg, np.argmax, dr, out=0) def test_repeat_range(self, tz_naive_fixture): tz = tz_naive_fixture diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 85aa3f6a38fb3..a59efe57f83c4 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -5,8 +5,7 @@ import pandas as pd import pandas._libs.tslib as tslib import pandas.util.testing as tm -from pandas import (DatetimeIndex, PeriodIndex, Series, Period, - _np_version_under1p10, Index) +from pandas import DatetimeIndex, PeriodIndex, Series, Period, Index from pandas.tests.test_base import Ops @@ -73,12 +72,11 @@ def test_numpy_minmax(self): assert np.argmin(pr) == 0 assert np.argmax(pr) == 5 - if not _np_version_under1p10: - errmsg = "the 'out' parameter is not supported" - tm.assert_raises_regex( - ValueError, errmsg, np.argmin, pr, out=0) - tm.assert_raises_regex( - ValueError, errmsg, np.argmax, pr, out=0) + errmsg = "the 'out' parameter is not supported" + tm.assert_raises_regex( + ValueError, errmsg, np.argmin, pr, out=0) + tm.assert_raises_regex( + ValueError, errmsg, np.argmax, pr, out=0) def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py index 6d142722c315a..82527464ea6e7 100644 --- a/pandas/tests/indexes/period/test_partial_slicing.py +++ b/pandas/tests/indexes/period/test_partial_slicing.py @@ -5,7 +5,7 @@ import pandas as pd from pandas.util import testing as tm from pandas import (Series, period_range, DatetimeIndex, PeriodIndex, - DataFrame, _np_version_under1p12, Period) + DataFrame, Period) class TestPeriodIndex(object): @@ -68,16 +68,12 @@ def test_range_slice_day(self): didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400) pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400) - # changed to TypeError in 1.12 - # https://github.com/numpy/numpy/pull/6271 - exc = IndexError if _np_version_under1p12 else TypeError - for idx in [didx, pidx]: # slices against index should raise IndexError values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H', '2013/02/01 09:00'] for v in values: - with pytest.raises(exc): + with pytest.raises(TypeError): idx[v:] s = Series(np.random.rand(len(idx)), index=idx) @@ -89,7 +85,7 @@ def test_range_slice_day(self): invalid = ['2013/02/01 9H', '2013/02/01 09:00'] for v in invalid: - with pytest.raises(exc): + with pytest.raises(TypeError): idx[v:] def test_range_slice_seconds(self): @@ -98,16 +94,12 @@ def test_range_slice_seconds(self): periods=4000) pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) - # changed to TypeError in 1.12 - # https://github.com/numpy/numpy/pull/6271 - exc = IndexError if _np_version_under1p12 else TypeError - for idx in [didx, pidx]: # slices against index should raise IndexError values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H', '2013/02/01 09:00'] for v in values: - with pytest.raises(exc): + with pytest.raises(TypeError): idx[v:] s = Series(np.random.rand(len(idx)), index=idx) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index d7bdd18f48523..9f8a3e893c3de 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -5,10 +5,8 @@ import pandas as pd import pandas.util.testing as tm -from pandas import to_timedelta from pandas import (Series, Timedelta, Timestamp, TimedeltaIndex, - timedelta_range, - _np_version_under1p10) + timedelta_range, to_timedelta) from pandas._libs.tslib import iNaT from pandas.tests.test_base import Ops from pandas.tseries.offsets import Day, Hour @@ -68,12 +66,11 @@ def test_numpy_minmax(self): assert np.argmin(td) == 0 assert np.argmax(td) == 5 - if not _np_version_under1p10: - errmsg = "the 'out' parameter is not supported" - tm.assert_raises_regex( - ValueError, errmsg, np.argmin, td, out=0) - tm.assert_raises_regex( - ValueError, errmsg, np.argmax, td, out=0) + errmsg = "the 'out' parameter is not supported" + tm.assert_raises_regex( + ValueError, errmsg, np.argmin, td, out=0) + tm.assert_raises_regex( + ValueError, errmsg, np.argmax, td, out=0) def test_value_counts_unique(self): # GH 7735 diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index b142ce339879c..f41a3a10604af 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -39,7 +39,7 @@ def _ok_for_gaussian_kde(kind): except ImportError: return False - return plotting._compat._mpl_ge_1_5_0() + return True @td.skip_if_no_mpl @@ -50,31 +50,16 @@ def setup_method(self, method): import matplotlib as mpl mpl.rcdefaults() - self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1() - self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1() - self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0() - self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0() - self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0() self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1() + self.mpl_ge_2_1_0 = plotting._compat._mpl_ge_2_1_0() self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0() + self.mpl_ge_2_2_2 = plotting._compat._mpl_ge_2_2_2() self.mpl_ge_3_0_0 = plotting._compat._mpl_ge_3_0_0() - if self.mpl_ge_1_4_0: - self.bp_n_objects = 7 - else: - self.bp_n_objects = 8 - if self.mpl_ge_1_5_0: - # 1.5 added PolyCollections to legend handler - # so we have twice as many items. - self.polycollection_factor = 2 - else: - self.polycollection_factor = 1 - - if self.mpl_ge_2_0_0: - self.default_figsize = (6.4, 4.8) - else: - self.default_figsize = (8.0, 6.0) - self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default' + self.bp_n_objects = 7 + self.polycollection_factor = 2 + self.default_figsize = (6.4, 4.8) + self.default_tick_position = 'left' n = 100 with tm.RNGContext(42): @@ -462,7 +447,7 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None, assert isinstance(value.lines, dict) elif return_type == 'dict': line = value['medians'][0] - axes = line.axes if self.mpl_ge_1_5_0 else line.get_axes() + axes = line.axes if check_ax_title: assert axes.get_title() == key else: @@ -510,19 +495,11 @@ def is_grid_on(): obj.plot(kind=kind, grid=True, **kws) assert is_grid_on() - def _maybe_unpack_cycler(self, rcParams, field='color'): + def _unpack_cycler(self, rcParams, field='color'): """ - Compat layer for MPL 1.5 change to color cycle - - Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...] - After : plt.rcParams['axes.prop_cycle'] -> cycler(...) + Auxiliary function for correctly unpacking cycler after MPL >= 1.5 """ - if self.mpl_ge_1_5_0: - cyl = rcParams['axes.prop_cycle'] - colors = [v[field] for v in cyl] - else: - colors = rcParams['axes.color_cycle'] - return colors + return [v[field] for v in rcParams['axes.prop_cycle']] def _check_plot_works(f, filterwarnings='always', **kwargs): diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 7661b46a79061..e89584ca35d94 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -3,7 +3,6 @@ import pytest import itertools import string -from distutils.version import LooseVersion from pandas import Series, DataFrame, MultiIndex from pandas.compat import range, lzip @@ -21,15 +20,6 @@ """ Test cases for .boxplot method """ -def _skip_if_mpl_14_or_dev_boxplot(): - # GH 8382 - # Boxplot failures on 1.4 and 1.4.1 - # Don't need try / except since that's done at class level - import matplotlib - if LooseVersion(matplotlib.__version__) >= LooseVersion('1.4'): - pytest.skip("Matplotlib Regression in 1.4 and current dev.") - - @td.skip_if_no_mpl class TestDataFramePlots(TestPlotBase): @@ -71,12 +61,12 @@ def test_boxplot_legacy2(self): # passed ax should be used: fig, ax = self.plt.subplots() axes = df.boxplot('Col1', by='X', ax=ax) - ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes() + ax_axes = ax.axes assert ax_axes is axes fig, ax = self.plt.subplots() axes = df.groupby('Y').boxplot(ax=ax, return_type='axes') - ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes() + ax_axes = ax.axes assert ax_axes is axes['A'] # Multiple columns with an ax argument should use same figure @@ -155,7 +145,6 @@ def _check_ax_limits(col, ax): @pytest.mark.slow def test_boxplot_empty_column(self): - _skip_if_mpl_14_or_dev_boxplot() df = DataFrame(np.random.randn(20, 4)) df.loc[:, 0] = np.nan _check_plot_works(df.boxplot, return_type='axes') diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index c66e03fe7b2a2..4865638671ea9 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -7,7 +7,7 @@ from pandas.compat import lrange, zip import numpy as np -from pandas import Index, Series, DataFrame, NaT +from pandas import Index, Series, DataFrame, NaT, isna from pandas.compat import PY3 from pandas.core.indexes.datetimes import date_range, bdate_range from pandas.core.indexes.timedeltas import timedelta_range @@ -135,7 +135,7 @@ def f(*args, **kwds): _, ax = self.plt.subplots() ts.plot(style='k', ax=ax) - color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.) + color = (0., 0., 0., 1) assert color == ax.get_lines()[0].get_color() def test_both_style_and_color(self): @@ -403,80 +403,92 @@ def test_get_finder(self): def test_finder_daily(self): day_lst = [10, 40, 252, 400, 950, 2750, 10000] - if self.mpl_ge_2_0_0: + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst) + else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3 xpl1 = [7565, 7564, 7553, 7546, 7518, 7428, 7066] xpl2 = [7566, 7564, 7554, 7546, 7519, 7429, 7066] - else: - xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst) + rs1 = [] + rs2 = [] for i, n in enumerate(day_lst): - xp = xpl1[i] rng = bdate_range('1999-1-1', periods=n) ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] - assert xp == rs - xp = xpl2[i] + rs1.append(xaxis.get_majorticklocs()[0]) + vmin, vmax = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) - rs = xaxis.get_majorticklocs()[0] - assert xp == rs + rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) + assert rs1 == xpl1 + assert rs2 == xpl2 + @pytest.mark.slow def test_finder_quarterly(self): yrs = [3.5, 11] - if self.mpl_ge_2_0_0: + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs) + else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3 xpl1 = [68, 68] xpl2 = [72, 68] - else: - xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs) + rs1 = [] + rs2 = [] for i, n in enumerate(yrs): - xp = xpl1[i] rng = period_range('1987Q2', periods=int(n * 4), freq='Q') ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] - assert rs == xp - xp = xpl2[i] + rs1.append(xaxis.get_majorticklocs()[0]) + (vmin, vmax) = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) - rs = xaxis.get_majorticklocs()[0] - assert xp == rs + rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) + assert rs1 == xpl1 + assert rs2 == xpl2 + @pytest.mark.slow def test_finder_monthly(self): yrs = [1.15, 2.5, 4, 11] - if self.mpl_ge_2_0_0: + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs) + else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3 xpl1 = [216, 216, 204, 204] xpl2 = [216, 216, 216, 204] - else: - xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs) + rs1 = [] + rs2 = [] for i, n in enumerate(yrs): - xp = xpl1[i] rng = period_range('1987Q2', periods=int(n * 12), freq='M') ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] - assert rs == xp - xp = xpl2[i] + rs1.append(xaxis.get_majorticklocs()[0]) + vmin, vmax = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) - rs = xaxis.get_majorticklocs()[0] - assert xp == rs + rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) + assert rs1 == xpl1 + assert rs2 == xpl2 + def test_finder_monthly_long(self): rng = period_range('1988Q1', periods=24 * 12, freq='M') ser = Series(np.random.randn(len(rng)), rng) @@ -489,21 +501,26 @@ def test_finder_monthly_long(self): @pytest.mark.slow def test_finder_annual(self): - if self.mpl_ge_2_0_0: - xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970] - else: + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] + else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3 + xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970] + xp = [Period(x, freq='A').ordinal for x in xp] + rs = [] for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]): rng = period_range('1987', periods=nyears, freq='A') ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] - assert rs == Period(xp[i], freq='A').ordinal + rs.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) + assert rs == xp + @pytest.mark.slow def test_finder_minutely(self): nminutes = 50 * 24 * 60 @@ -513,10 +530,8 @@ def test_finder_minutely(self): ser.plot(ax=ax) xaxis = ax.get_xaxis() rs = xaxis.get_majorticklocs()[0] - if self.mpl_ge_2_0_0: - xp = Period('1998-12-29 12:00', freq='Min').ordinal - else: - xp = Period('1/1/1999', freq='Min').ordinal + xp = Period('1/1/1999', freq='Min').ordinal + assert rs == xp def test_finder_hourly(self): @@ -527,13 +542,13 @@ def test_finder_hourly(self): ser.plot(ax=ax) xaxis = ax.get_xaxis() rs = xaxis.get_majorticklocs()[0] - if self.mpl_ge_2_0_0: - xp = Period('1998-12-31 22:00', freq='H').ordinal - else: + if self.mpl_ge_2_0_1: xp = Period('1/1/1999', freq='H').ordinal + else: # 2.0.0 + xp = Period('1998-12-31 22:00', freq='H').ordinal + assert rs == xp - @td.skip_if_mpl_1_5 @pytest.mark.slow def test_gaps(self): ts = tm.makeTimeSeries() @@ -544,6 +559,12 @@ def test_gaps(self): assert len(lines) == 1 line = lines[0] data = line.get_xydata() + + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[5:25, 1].all() @@ -559,6 +580,12 @@ def test_gaps(self): assert len(lines) == 1 line = lines[0] data = line.get_xydata() + + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[2:5, 1].all() @@ -574,11 +601,15 @@ def test_gaps(self): assert len(lines) == 1 line = lines[0] data = line.get_xydata() + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[2:5, 1].all() - @td.skip_if_mpl_1_5 @pytest.mark.slow def test_gap_upsample(self): low = tm.makeTimeSeries() @@ -592,8 +623,13 @@ def test_gap_upsample(self): lines = ax.get_lines() assert len(lines) == 1 assert len(ax.right_ax.get_lines()) == 1 + line = lines[0] data = line.get_xydata() + if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1 + or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)): + # 2.0.0, 2.2.0 (exactly) or >= 3.0.0 + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask @@ -659,8 +695,6 @@ def test_secondary_y_ts(self): @pytest.mark.slow @td.skip_if_no_scipy def test_secondary_kde(self): - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") _skip_if_no_scipy_gaussian_kde() ser = Series(np.random.randn(10)) @@ -1359,18 +1393,13 @@ def test_plot_outofbounds_datetime(self): def test_format_timedelta_ticks_narrow(self): - if self.mpl_ge_2_2_0: - expected_labels = (['-1 days 23:59:59.999999998'] + - ['00:00:00.0000000{:0>2d}'.format(2 * i) - for i in range(6)]) - elif self.mpl_ge_2_0_0: + if self.mpl_ge_2_0_1: + expected_labels = (['00:00:00.0000000{:0>2d}'.format(i) + for i in range(10)]) + else: # 2.0.0 expected_labels = [''] + [ '00:00:00.00000000{:d}'.format(2 * i) for i in range(5)] + [''] - else: - expected_labels = [ - '00:00:00.00000000{:d}'.format(i) - for i in range(10)] rng = timedelta_range('0', periods=10, freq='ns') df = DataFrame(np.random.randn(len(rng), 3), rng) @@ -1378,41 +1407,30 @@ def test_format_timedelta_ticks_narrow(self): df.plot(fontsize=2, ax=ax) fig.canvas.draw() labels = ax.get_xticklabels() - assert len(labels) == len(expected_labels) - for l, l_expected in zip(labels, expected_labels): - assert l.get_text() == l_expected - def test_format_timedelta_ticks_wide(self): + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels - if self.mpl_ge_2_0_0: - expected_labels = [ - '', - '00:00:00', - '1 days 03:46:40', - '2 days 07:33:20', - '3 days 11:20:00', - '4 days 15:06:40', - '5 days 18:53:20', - '6 days 22:40:00', - '8 days 02:26:40', - '9 days 06:13:20', - '' - ] - if self.mpl_ge_2_2_0: - expected_labels[0] = '-2 days 20:13:20' - expected_labels[-1] = '10 days 10:00:00' - else: - expected_labels = [ - '00:00:00', - '1 days 03:46:40', - '2 days 07:33:20', - '3 days 11:20:00', - '4 days 15:06:40', - '5 days 18:53:20', - '6 days 22:40:00', - '8 days 02:26:40', - '' - ] + def test_format_timedelta_ticks_wide(self): + expected_labels = [ + '', + '00:00:00', + '1 days 03:46:40', + '2 days 07:33:20', + '3 days 11:20:00', + '4 days 15:06:40', + '5 days 18:53:20', + '6 days 22:40:00', + '8 days 02:26:40', + '9 days 06:13:20', + '' + ] + if self.mpl_ge_2_2_0: + expected_labels = expected_labels[1:-1] + elif self.mpl_ge_2_0_1: + expected_labels = expected_labels[1:-1] + expected_labels[-1] = '' rng = timedelta_range('0', periods=10, freq='1 d') df = DataFrame(np.random.randn(len(rng), 3), rng) @@ -1420,9 +1438,10 @@ def test_format_timedelta_ticks_wide(self): ax = df.plot(fontsize=2, ax=ax) fig.canvas.draw() labels = ax.get_xticklabels() - assert len(labels) == len(expected_labels) - for l, l_expected in zip(labels, expected_labels): - assert l.get_text() == l_expected + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels def test_timedelta_plot(self): # test issue #8711 diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index a4f5d8e2f4ff2..25dfbaba762c9 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -141,22 +141,15 @@ def test_plot(self): fig, ax = self.plt.subplots() axes = df.plot.bar(subplots=True, ax=ax) assert len(axes) == 1 - if self.mpl_ge_1_5_0: - result = ax.axes - else: - result = ax.get_axes() # deprecated + result = ax.axes assert result is axes[0] # GH 15516 def test_mpl2_color_cycle_str(self): - # test CN mpl 2.0 color cycle - if self.mpl_ge_2_0_0: - colors = ['C' + str(x) for x in range(10)] - df = DataFrame(randn(10, 3), columns=['a', 'b', 'c']) - for c in colors: - _check_plot_works(df.plot, color=c) - else: - pytest.skip("not supported in matplotlib < 2.0.0") + colors = ['C' + str(x) for x in range(10)] + df = DataFrame(randn(10, 3), columns=['a', 'b', 'c']) + for c in colors: + _check_plot_works(df.plot, color=c) def test_color_single_series_list(self): # GH 3486 @@ -854,7 +847,7 @@ def test_area_lim(self): @pytest.mark.slow def test_bar_colors(self): import matplotlib.pyplot as plt - default_colors = self._maybe_unpack_cycler(plt.rcParams) + default_colors = self._unpack_cycler(plt.rcParams) df = DataFrame(randn(5, 5)) ax = df.plot.bar() @@ -1180,11 +1173,9 @@ def test_plot_scatter_with_c(self): # default to Greys assert ax.collections[0].cmap.name == 'Greys' - if self.mpl_ge_1_3_1: - - # n.b. there appears to be no public method to get the colorbar - # label - assert ax.collections[0].colorbar._label == 'z' + # n.b. there appears to be no public method + # to get the colorbar label + assert ax.collections[0].colorbar._label == 'z' cm = 'cubehelix' ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm) @@ -1227,7 +1218,7 @@ def test_scatter_colors(self): with pytest.raises(TypeError): df.plot.scatter(x='a', y='b', c='c', color='green') - default_colors = self._maybe_unpack_cycler(self.plt.rcParams) + default_colors = self._unpack_cycler(self.plt.rcParams) ax = df.plot.scatter(x='a', y='b', c='c') tm.assert_numpy_array_equal( @@ -1392,10 +1383,7 @@ def test_bar_edge(self): def test_bar_log_no_subplots(self): # GH3254, GH3298 matplotlib/matplotlib#1882, #1892 # regressions in 1.2.1 - expected = np.array([1., 10.]) - - if not self.mpl_le_1_2_1: - expected = np.hstack((.1, expected, 100)) + expected = np.array([.1, 1., 10., 100]) # no subplots df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5)) @@ -1404,9 +1392,7 @@ def test_bar_log_no_subplots(self): @pytest.mark.slow def test_bar_log_subplots(self): - expected = np.array([1., 10., 100., 1000.]) - if not self.mpl_le_1_2_1: - expected = np.hstack((.1, expected, 1e4)) + expected = np.array([.1, 1., 10., 100., 1000., 1e4]) ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar( log=True, subplots=True) @@ -1521,8 +1507,6 @@ def test_boxplot_subplots_return_type(self): @td.skip_if_no_scipy def test_kde_df(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") df = DataFrame(randn(100, 4)) ax = _check_plot_works(df.plot, kind='kde') @@ -1545,8 +1529,6 @@ def test_kde_df(self): @td.skip_if_no_scipy def test_kde_missing_vals(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") df = DataFrame(np.random.uniform(size=(100, 4))) df.loc[0, 0] = np.nan @@ -1555,8 +1537,6 @@ def test_kde_missing_vals(self): @pytest.mark.slow def test_hist_df(self): from matplotlib.patches import Rectangle - if self.mpl_le_1_2_1: - pytest.skip("not supported in matplotlib <= 1.2.x") df = DataFrame(randn(100, 4)) series = df[0] @@ -1668,44 +1648,42 @@ def test_hist_df_coord(self): expected_y=np.array([0, 0, 0, 0, 0]), expected_h=np.array([6, 7, 8, 9, 10])) - if self.mpl_ge_1_3_1: - - # horizontal - ax = df.plot.hist(bins=5, orientation='horizontal') - self._check_box_coord(ax.patches[:5], - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([10, 9, 8, 7, 6])) - self._check_box_coord(ax.patches[5:10], - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([8, 8, 8, 8, 8])) - self._check_box_coord(ax.patches[10:], - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([6, 7, 8, 9, 10])) - - ax = df.plot.hist(bins=5, stacked=True, - orientation='horizontal') - self._check_box_coord(ax.patches[:5], - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([10, 9, 8, 7, 6])) - self._check_box_coord(ax.patches[5:10], - expected_x=np.array([10, 9, 8, 7, 6]), - expected_w=np.array([8, 8, 8, 8, 8])) - self._check_box_coord( - ax.patches[10:], - expected_x=np.array([18, 17, 16, 15, 14]), - expected_w=np.array([6, 7, 8, 9, 10])) - - axes = df.plot.hist(bins=5, stacked=True, subplots=True, - orientation='horizontal') - self._check_box_coord(axes[0].patches, - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([10, 9, 8, 7, 6])) - self._check_box_coord(axes[1].patches, - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([8, 8, 8, 8, 8])) - self._check_box_coord(axes[2].patches, - expected_x=np.array([0, 0, 0, 0, 0]), - expected_w=np.array([6, 7, 8, 9, 10])) + # horizontal + ax = df.plot.hist(bins=5, orientation='horizontal') + self._check_box_coord(ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(ax.patches[10:], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10])) + + ax = df.plot.hist(bins=5, stacked=True, + orientation='horizontal') + self._check_box_coord(ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], + expected_x=np.array([10, 9, 8, 7, 6]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([18, 17, 16, 15, 14]), + expected_w=np.array([6, 7, 8, 9, 10])) + + axes = df.plot.hist(bins=5, stacked=True, subplots=True, + orientation='horizontal') + self._check_box_coord(axes[0].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(axes[1].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(axes[2].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10])) @pytest.mark.slow def test_plot_int_columns(self): @@ -1904,14 +1882,13 @@ def test_dont_modify_colors(self): def test_line_colors_and_styles_subplots(self): # GH 9894 from matplotlib import cm - default_colors = self._maybe_unpack_cycler(self.plt.rcParams) + default_colors = self._unpack_cycler(self.plt.rcParams) df = DataFrame(randn(5, 5)) axes = df.plot(subplots=True) for ax, c in zip(axes, list(default_colors)): - if self.mpl_ge_2_0_0: - c = [c] + c = [c] self._check_colors(ax.get_lines(), linecolors=c) tm.close() @@ -1992,13 +1969,7 @@ def test_area_colors(self): self._check_colors(poly, facecolors=custom_colors) handles, labels = ax.get_legend_handles_labels() - if self.mpl_ge_1_5_0: - self._check_colors(handles, facecolors=custom_colors) - else: - # legend is stored as Line2D, thus check linecolors - linehandles = [x for x in handles - if not isinstance(x, PolyCollection)] - self._check_colors(linehandles, linecolors=custom_colors) + self._check_colors(handles, facecolors=custom_colors) for h in handles: assert h.get_alpha() is None @@ -2011,12 +1982,7 @@ def test_area_colors(self): self._check_colors(poly, facecolors=jet_colors) handles, labels = ax.get_legend_handles_labels() - if self.mpl_ge_1_5_0: - self._check_colors(handles, facecolors=jet_colors) - else: - linehandles = [x for x in handles - if not isinstance(x, PolyCollection)] - self._check_colors(linehandles, linecolors=jet_colors) + self._check_colors(handles, facecolors=jet_colors) for h in handles: assert h.get_alpha() is None tm.close() @@ -2029,18 +1995,14 @@ def test_area_colors(self): self._check_colors(poly, facecolors=jet_with_alpha) handles, labels = ax.get_legend_handles_labels() - if self.mpl_ge_1_5_0: - linecolors = jet_with_alpha - else: - # Line2D can't have alpha in its linecolor - linecolors = jet_colors + linecolors = jet_with_alpha self._check_colors(handles[:len(jet_colors)], linecolors=linecolors) for h in handles: assert h.get_alpha() == 0.5 @pytest.mark.slow def test_hist_colors(self): - default_colors = self._maybe_unpack_cycler(self.plt.rcParams) + default_colors = self._unpack_cycler(self.plt.rcParams) df = DataFrame(randn(5, 5)) ax = df.plot.hist() @@ -2076,8 +2038,6 @@ def test_hist_colors(self): @td.skip_if_no_scipy def test_kde_colors(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") from matplotlib import cm @@ -2101,11 +2061,9 @@ def test_kde_colors(self): @td.skip_if_no_scipy def test_kde_colors_and_styles_subplots(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") from matplotlib import cm - default_colors = self._maybe_unpack_cycler(self.plt.rcParams) + default_colors = self._unpack_cycler(self.plt.rcParams) df = DataFrame(randn(5, 5)) @@ -2164,7 +2122,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c=None): # TODO: outside this func? if fliers_c is None: - fliers_c = 'k' if self.mpl_ge_2_0_0 else 'b' + fliers_c = 'k' self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes'])) self._check_colors(bp['whiskers'], @@ -2176,7 +2134,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps'])) - default_colors = self._maybe_unpack_cycler(self.plt.rcParams) + default_colors = self._unpack_cycler(self.plt.rcParams) df = DataFrame(randn(5, 5)) bp = df.plot.box(return_type='dict') @@ -2225,17 +2183,14 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', def test_default_color_cycle(self): import matplotlib.pyplot as plt + import cycler colors = list('rgbk') - if self.mpl_ge_1_5_0: - import cycler - plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors) - else: - plt.rcParams['axes.color_cycle'] = colors + plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors) df = DataFrame(randn(5, 3)) ax = df.plot() - expected = self._maybe_unpack_cycler(plt.rcParams)[:3] + expected = self._unpack_cycler(plt.rcParams)[:3] self._check_colors(ax.get_lines(), linecolors=expected) def test_unordered_ts(self): @@ -2591,19 +2546,12 @@ def test_errorbar_asymmetrical(self): # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]... df = DataFrame(np.arange(15).reshape(3, 5)).T - data = df.values ax = df.plot(yerr=err, xerr=err / 2) - if self.mpl_ge_2_0_0: - yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1] - expected_0_0 = err[0, :, 0] * np.array([-1, 1]) - tm.assert_almost_equal(yerr_0_0, expected_0_0) - else: - assert ax.lines[7].get_ydata()[0] == data[0, 1] - err[1, 0, 0] - assert ax.lines[8].get_ydata()[0] == data[0, 1] + err[1, 1, 0] - assert ax.lines[5].get_xdata()[0] == -err[1, 0, 0] / 2 - assert ax.lines[6].get_xdata()[0] == err[1, 1, 0] / 2 + yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1] + expected_0_0 = err[0, :, 0] * np.array([-1, 1]) + tm.assert_almost_equal(yerr_0_0, expected_0_0) with pytest.raises(ValueError): df.plot(yerr=err.T) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 2864877550bac..1d9942603a269 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -122,7 +122,7 @@ def test_hist_no_overlap(self): subplot(122) y.hist() fig = gcf() - axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes() + axes = fig.axes assert len(axes) == 2 @pytest.mark.slow diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 8c84b785c88e4..54d17a4773749 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -76,10 +76,7 @@ def test_scatter_matrix_axis(self): axes0_labels = axes[0][0].yaxis.get_majorticklabels() # GH 5662 - if self.mpl_ge_2_0_0: - expected = ['-2', '0', '2'] - else: - expected = ['-2', '-1', '0', '1', '2'] + expected = ['-2', '0', '2'] self._check_text_labels(axes0_labels, expected) self._check_ticks_props( axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) @@ -91,10 +88,7 @@ def test_scatter_matrix_axis(self): axes = _check_plot_works(scatter_matrix, filterwarnings='always', frame=df, range_padding=.1) axes0_labels = axes[0][0].yaxis.get_majorticklabels() - if self.mpl_ge_2_0_0: - expected = ['-1.0', '-0.5', '0.0'] - else: - expected = ['-1.2', '-1.0', '-0.8', '-0.6', '-0.4', '-0.2', '0.0'] + expected = ['-1.0', '-0.5', '0.0'] self._check_text_labels(axes0_labels, expected) self._check_ticks_props( axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 5dc7d52e05778..dc708278836d2 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -88,10 +88,7 @@ def test_plot_figsize_and_title(self): def test_dont_modify_rcParams(self): # GH 8242 - if self.mpl_ge_1_5_0: - key = 'axes.prop_cycle' - else: - key = 'axes.color_cycle' + key = 'axes.prop_cycle' colors = self.plt.rcParams[key] _, ax = self.plt.subplots() Series([1, 2, 3]).plot(ax=ax) @@ -211,10 +208,7 @@ def test_line_use_index_false(self): @pytest.mark.slow def test_bar_log(self): - expected = np.array([1., 10., 100., 1000.]) - - if not self.mpl_le_1_2_1: - expected = np.hstack((.1, expected, 1e4)) + expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]) _, ax = self.plt.subplots() ax = Series([200, 500]).plot.bar(log=True, ax=ax) @@ -227,17 +221,12 @@ def test_bar_log(self): tm.close() # GH 9905 - expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00]) - - if not self.mpl_le_1_2_1: - expected = np.hstack((1.0e-04, expected, 1.0e+01)) - if self.mpl_ge_2_0_0: - expected = np.hstack((1.0e-05, expected)) + expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]) _, ax = self.plt.subplots() ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax) - ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001 - ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001 + ymin = 0.0007943282347242822 + ymax = 0.12589254117941673 res = ax.get_ylim() tm.assert_almost_equal(res[0], ymin) tm.assert_almost_equal(res[1], ymax) @@ -474,7 +463,7 @@ def test_hist_no_overlap(self): subplot(122) y.hist() fig = gcf() - axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes() + axes = fig.axes assert len(axes) == 2 @pytest.mark.slow @@ -591,8 +580,6 @@ def test_plot_fails_with_dupe_color_and_style(self): @pytest.mark.slow @td.skip_if_no_scipy def test_hist_kde(self): - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") _, ax = self.plt.subplots() ax = self.ts.plot.hist(logy=True, ax=ax) @@ -618,8 +605,6 @@ def test_hist_kde(self): @td.skip_if_no_scipy def test_kde_kwargs(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") sample_points = np.linspace(-100, 100, 20) _check_plot_works(self.ts.plot.kde, bw_method='scott', ind=20) @@ -638,8 +623,6 @@ def test_kde_kwargs(self): @td.skip_if_no_scipy def test_kde_missing_vals(self): _skip_if_no_scipy_gaussian_kde() - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") s = Series(np.random.uniform(size=50)) s[0] = np.nan @@ -656,22 +639,18 @@ def test_hist_kwargs(self): self._check_text_labels(ax.yaxis.get_label(), 'Frequency') tm.close() - if self.mpl_ge_1_3_1: - _, ax = self.plt.subplots() - ax = self.ts.plot.hist(orientation='horizontal', ax=ax) - self._check_text_labels(ax.xaxis.get_label(), 'Frequency') - tm.close() + _, ax = self.plt.subplots() + ax = self.ts.plot.hist(orientation='horizontal', ax=ax) + self._check_text_labels(ax.xaxis.get_label(), 'Frequency') + tm.close() - _, ax = self.plt.subplots() - ax = self.ts.plot.hist(align='left', stacked=True, ax=ax) - tm.close() + _, ax = self.plt.subplots() + ax = self.ts.plot.hist(align='left', stacked=True, ax=ax) + tm.close() @pytest.mark.slow @td.skip_if_no_scipy def test_hist_kde_color(self): - if not self.mpl_ge_1_5_0: - pytest.skip("mpl is not supported") - _, ax = self.plt.subplots() ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax) self._check_ax_scales(ax, yaxis='log') @@ -870,10 +849,7 @@ def test_time_series_plot_color_kwargs(self): def test_time_series_plot_color_with_empty_kwargs(self): import matplotlib as mpl - if self.mpl_ge_1_5_0: - def_colors = self._maybe_unpack_cycler(mpl.rcParams) - else: - def_colors = mpl.rcParams['axes.color_cycle'] + def_colors = self._unpack_cycler(mpl.rcParams) index = date_range('1/1/2000', periods=12) s = Series(np.arange(1, 13), index=index) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 517bb9511552c..d4a204ed265b5 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -11,8 +11,7 @@ import pandas as pd from pandas import (Series, Categorical, DataFrame, isna, notna, - bdate_range, date_range, _np_version_under1p10, - CategoricalIndex) + bdate_range, date_range, CategoricalIndex) from pandas.core.index import MultiIndex from pandas.core.indexes.datetimes import Timestamp from pandas.core.indexes.timedeltas import Timedelta @@ -1246,12 +1245,11 @@ def test_numpy_argmin_deprecated(self): assert result == 1 - if not _np_version_under1p10: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.argmin, - s, out=data) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.argmin, + s, out=data) def test_idxmax(self): # test idxmax @@ -1315,12 +1313,11 @@ def test_numpy_argmax_deprecated(self): assert result == 10 - if not _np_version_under1p10: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.argmax, - s, out=data) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.argmax, + s, out=data) def test_ptp(self): # GH21614 diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index a1ec8314841e3..1cd7c73337e4f 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -9,8 +9,8 @@ import numpy as np import pandas as pd -from pandas import (Series, DataFrame, bdate_range, - isna, compat, _np_version_under1p12) + +from pandas import Series, DataFrame, bdate_range, isna, compat from pandas.errors import PerformanceWarning from pandas.tseries.offsets import BDay import pandas.util.testing as tm @@ -559,17 +559,16 @@ def test_numpy_take(self): sp = SparseSeries([1.0, 2.0, 3.0]) indices = [1, 2] - if not _np_version_under1p12: - tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(), - np.take(sp.to_dense(), indices, axis=0)) + tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(), + np.take(sp.to_dense(), indices, axis=0)) - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.take, - sp, indices, out=np.empty(sp.shape)) + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.take, + sp, indices, out=np.empty(sp.shape)) - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.take, - sp, indices, out=None, mode='clip') + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.take, + sp, indices, out=None, mode='clip') def test_setitem(self): self.bseries[5] = 7. diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index d2b7979aed98d..1fd801c68fdde 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -224,10 +224,6 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): def test_complex_sorting(self): # gh 12666 - check no segfault - # Test not valid numpy versions older than 1.11 - if pd._np_version_under1p11: - pytest.skip("Test valid only for numpy 1.11+") - x17 = np.array([complex(i) for i in range(17)], dtype=object) pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index c101fd25ce5e5..a7b9bf9c9a351 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -13,7 +13,7 @@ from pandas.core.api import DataFrame, Panel from pandas.core.computation import expressions as expr -from pandas import compat, _np_version_under1p11, _np_version_under1p13 +from pandas import compat, _np_version_under1p13 from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal) from pandas.io.formats.printing import pprint_thing @@ -272,10 +272,7 @@ def testit(): for op, op_str in [('add', '+'), ('sub', '-'), ('mul', '*'), ('div', '/'), ('pow', '**')]: - # numpy >= 1.11 doesn't handle integers - # raised to integer powers - # https://github.com/pandas-dev/pandas/issues/15363 - if op == 'pow' and not _np_version_under1p11: + if op == 'pow': continue if op == 'div': diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 98026f6d4cf0e..aa5d0016eca95 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -7,8 +7,7 @@ import numpy as np from numpy import nan from pandas.core import common as com -from pandas import (DataFrame, MultiIndex, merge, concat, Series, compat, - _np_version_under1p10) +from pandas import DataFrame, MultiIndex, merge, concat, Series, compat from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.sorting import (is_int64_overflow_possible, @@ -416,7 +415,7 @@ def test_mixed_integer_from_list(self): def test_unsortable(self): # GH 13714 arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) - if compat.PY2 and not _np_version_under1p10: + if compat.PY2: # RuntimeWarning: tp_compare didn't return -1 or -2 for exception with warnings.catch_warnings(): pytest.raises(TypeError, safe_sort, arr) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index cc663fc59cbf1..4b0c4d581a008 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -7,7 +7,6 @@ from datetime import datetime, timedelta from numpy.random import randn import numpy as np -from pandas import _np_version_under1p12 import pandas as pd from pandas import (Series, DataFrame, bdate_range, @@ -1292,8 +1291,6 @@ def test_rolling_quantile_np_percentile(self): tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) - @pytest.mark.skipif(_np_version_under1p12, - reason='numpy midpoint interpolation is broken') @pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1]) @pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher', 'nearest', 'midpoint']) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 5d7b23894e745..2fe891346065d 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -78,17 +78,6 @@ def _skip_if_no_mpl(): return True -def _skip_if_mpl_1_5(): - mod = safe_import("matplotlib") - - if mod: - v = mod.__version__ - if LooseVersion(v) > LooseVersion('1.4.3') or str(v)[0] == '0': - return True - else: - mod.use("Agg", warn=False) - - def _skip_if_mpl_2_2(): mod = safe_import("matplotlib") @@ -164,8 +153,6 @@ def decorated_func(func): reason="NumPy 1.15 or greater required") skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present") -skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), - reason="matplotlib 1.5") xfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(), reason="matplotlib 2.2") skip_if_32bit = pytest.mark.skipif(is_platform_32bit(), diff --git a/setup.py b/setup.py index bfd0c50c9e9be..f31aaa7e79a0d 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' -min_numpy_ver = '1.9.0' +min_numpy_ver = '1.12.0' setuptools_kwargs = { 'install_requires': [ 'python-dateutil >= 2.5.0',
- [x] closes #21242 - [x] tests modified / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Spurred on also partly by compat failure of #22725. ;-)
https://api.github.com/repos/pandas-dev/pandas/pulls/23062
2018-10-09T18:25:59Z
2018-10-15T17:07:34Z
2018-10-15T17:07:33Z
2018-10-15T18:28:01Z
REF: collect ops dispatch functions in one place, try to de-duplicate SparseDataFrame methods
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index f29b4410fbf54..2335b26c576eb 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -943,6 +943,134 @@ def should_series_dispatch(left, right, op): return False +def dispatch_to_series(left, right, func, str_rep=None, axis=None): + """ + Evaluate the frame operation func(left, right) by evaluating + column-by-column, dispatching to the Series implementation. + + Parameters + ---------- + left : DataFrame + right : scalar or DataFrame + func : arithmetic or comparison operator + str_rep : str or None, default None + axis : {None, 0, 1, "index", "columns"} + + Returns + ------- + DataFrame + """ + # Note: we use iloc to access columns for compat with cases + # with non-unique columns. + import pandas.core.computation.expressions as expressions + + right = lib.item_from_zerodim(right) + if lib.is_scalar(right): + + def column_op(a, b): + return {i: func(a.iloc[:, i], b) + for i in range(len(a.columns))} + + elif isinstance(right, ABCDataFrame): + assert right._indexed_same(left) + + def column_op(a, b): + return {i: func(a.iloc[:, i], b.iloc[:, i]) + for i in range(len(a.columns))} + + elif isinstance(right, ABCSeries) and axis == "columns": + # We only get here if called via left._combine_match_columns, + # in which case we specifically want to operate row-by-row + assert right.index.equals(left.columns) + + def column_op(a, b): + return {i: func(a.iloc[:, i], b.iloc[i]) + for i in range(len(a.columns))} + + elif isinstance(right, ABCSeries): + assert right.index.equals(left.index) # Handle other cases later + + def column_op(a, b): + return {i: func(a.iloc[:, i], b) + for i in range(len(a.columns))} + + else: + # Remaining cases have less-obvious dispatch rules + raise NotImplementedError(right) + + new_data = expressions.evaluate(column_op, str_rep, left, right) + + result = left._constructor(new_data, index=left.index, copy=False) + # Pin columns instead of passing to constructor for compat with + # non-unique columns case + result.columns = left.columns + return result + + +def dispatch_to_index_op(op, left, right, index_class): + """ + Wrap Series left in the given index_class to delegate the operation op + to the index implementation. DatetimeIndex and TimedeltaIndex perform + type checking, timezone handling, overflow checks, etc. + + Parameters + ---------- + op : binary operator (operator.add, operator.sub, ...) + left : Series + right : object + index_class : DatetimeIndex or TimedeltaIndex + + Returns + ------- + result : object, usually DatetimeIndex, TimedeltaIndex, or Series + """ + left_idx = index_class(left) + + # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, + # left_idx may inherit a freq from a cached DatetimeIndex. + # See discussion in GH#19147. + if getattr(left_idx, 'freq', None) is not None: + left_idx = left_idx._shallow_copy(freq=None) + try: + result = op(left_idx, right) + except NullFrequencyError: + # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError + # on add/sub of integers (or int-like). We re-raise as a TypeError. + raise TypeError('incompatible type for a datetime/timedelta ' + 'operation [{name}]'.format(name=op.__name__)) + return result + + +def dispatch_to_extension_op(op, left, right): + """ + Assume that left or right is a Series backed by an ExtensionArray, + apply the operator defined by op. + """ + + # The op calls will raise TypeError if the op is not defined + # on the ExtensionArray + + # unbox Series and Index to arrays + if isinstance(left, (ABCSeries, ABCIndexClass)): + new_left = left._values + else: + new_left = left + + if isinstance(right, (ABCSeries, ABCIndexClass)): + new_right = right._values + else: + new_right = right + + res_values = op(new_left, new_right) + res_name = get_op_result_name(left, right) + + if op.__name__ in ['divmod', 'rdivmod']: + return _construct_divmod_result( + left, res_values, left.index, res_name) + + return _construct_result(left, res_values, left.index, res_name) + + # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods @@ -1202,36 +1330,6 @@ def _construct_divmod_result(left, result, index, name, dtype=None): ) -def dispatch_to_extension_op(op, left, right): - """ - Assume that left or right is a Series backed by an ExtensionArray, - apply the operator defined by op. - """ - - # The op calls will raise TypeError if the op is not defined - # on the ExtensionArray - - # unbox Series and Index to arrays - if isinstance(left, (ABCSeries, ABCIndexClass)): - new_left = left._values - else: - new_left = left - - if isinstance(right, (ABCSeries, ABCIndexClass)): - new_right = right._values - else: - new_right = right - - res_values = op(new_left, new_right) - res_name = get_op_result_name(left, right) - - if op.__name__ in ['divmod', 'rdivmod']: - return _construct_divmod_result( - left, res_values, left.index, res_name) - - return _construct_result(left, res_values, left.index, res_name) - - def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid @@ -1329,40 +1427,6 @@ def wrapper(left, right): return wrapper -def dispatch_to_index_op(op, left, right, index_class): - """ - Wrap Series left in the given index_class to delegate the operation op - to the index implementation. DatetimeIndex and TimedeltaIndex perform - type checking, timezone handling, overflow checks, etc. - - Parameters - ---------- - op : binary operator (operator.add, operator.sub, ...) - left : Series - right : object - index_class : DatetimeIndex or TimedeltaIndex - - Returns - ------- - result : object, usually DatetimeIndex, TimedeltaIndex, or Series - """ - left_idx = index_class(left) - - # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, - # left_idx may inherit a freq from a cached DatetimeIndex. - # See discussion in GH#19147. - if getattr(left_idx, 'freq', None) is not None: - left_idx = left_idx._shallow_copy(freq=None) - try: - result = op(left_idx, right) - except NullFrequencyError: - # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError - # on add/sub of integers (or int-like). We re-raise as a TypeError. - raise TypeError('incompatible type for a datetime/timedelta ' - 'operation [{name}]'.format(name=op.__name__)) - return result - - def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) @@ -1661,69 +1725,6 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # ----------------------------------------------------------------------------- # DataFrame -def dispatch_to_series(left, right, func, str_rep=None, axis=None): - """ - Evaluate the frame operation func(left, right) by evaluating - column-by-column, dispatching to the Series implementation. - - Parameters - ---------- - left : DataFrame - right : scalar or DataFrame - func : arithmetic or comparison operator - str_rep : str or None, default None - axis : {None, 0, 1, "index", "columns"} - - Returns - ------- - DataFrame - """ - # Note: we use iloc to access columns for compat with cases - # with non-unique columns. - import pandas.core.computation.expressions as expressions - - right = lib.item_from_zerodim(right) - if lib.is_scalar(right): - - def column_op(a, b): - return {i: func(a.iloc[:, i], b) - for i in range(len(a.columns))} - - elif isinstance(right, ABCDataFrame): - assert right._indexed_same(left) - - def column_op(a, b): - return {i: func(a.iloc[:, i], b.iloc[:, i]) - for i in range(len(a.columns))} - - elif isinstance(right, ABCSeries) and axis == "columns": - # We only get here if called via left._combine_match_columns, - # in which case we specifically want to operate row-by-row - assert right.index.equals(left.columns) - - def column_op(a, b): - return {i: func(a.iloc[:, i], b.iloc[i]) - for i in range(len(a.columns))} - - elif isinstance(right, ABCSeries): - assert right.index.equals(left.index) # Handle other cases later - - def column_op(a, b): - return {i: func(a.iloc[:, i], b) - for i in range(len(a.columns))} - - else: - # Remaining cases have less-obvious dispatch rules - raise NotImplementedError(right) - - new_data = expressions.evaluate(column_op, str_rep, left, right) - - result = left._constructor(new_data, index=left.index, copy=False) - # Pin columns instead of passing to constructor for compat with - # non-unique columns case - result.columns = left.columns - return result - def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index e46df2b2bde70..c7d8be0d2e9e4 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -548,12 +548,12 @@ def xs(self, key, axis=0, copy=False): # Arithmetic-related methods def _combine_frame(self, other, func, fill_value=None, level=None): - this, other = self.align(other, join='outer', level=level, copy=False) - new_index, new_columns = this.index, this.columns - if level is not None: raise NotImplementedError("'level' argument is not supported") + this, other = self.align(other, join='outer', level=level, copy=False) + new_index, new_columns = this.index, this.columns + if self.empty and other.empty: return self._constructor(index=new_index).__finalize__(self) @@ -573,17 +573,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): if col in this and col in other: new_data[col] = func(this[col], other[col]) - # if the fill values are the same use them? or use a valid one - new_fill_value = None - other_fill_value = getattr(other, 'default_fill_value', np.nan) - if self.default_fill_value == other_fill_value: - new_fill_value = self.default_fill_value - elif np.isnan(self.default_fill_value) and not np.isnan( - other_fill_value): - new_fill_value = other_fill_value - elif not np.isnan(self.default_fill_value) and np.isnan( - other_fill_value): - new_fill_value = self.default_fill_value + new_fill_value = self._get_op_result_fill_value(other, func) return self._constructor(data=new_data, index=new_index, columns=new_columns, @@ -596,26 +586,16 @@ def _combine_match_index(self, other, func, level=None): if level is not None: raise NotImplementedError("'level' argument is not supported") - new_index = self.index.union(other.index) - this = self - if self.index is not new_index: - this = self.reindex(new_index) - - if other.index is not new_index: - other = other.reindex(new_index) + this, other = self.align(other, join='outer', axis=0, level=level, + copy=False) for col, series in compat.iteritems(this): new_data[col] = func(series.values, other.values) - # fill_value is a function of our operator - if isna(other.fill_value) or isna(self.default_fill_value): - fill_value = np.nan - else: - fill_value = func(np.float64(self.default_fill_value), - np.float64(other.fill_value)) + fill_value = self._get_op_result_fill_value(other, func) return self._constructor( - new_data, index=new_index, columns=self.columns, + new_data, index=this.index, columns=self.columns, default_fill_value=fill_value).__finalize__(self) def _combine_match_columns(self, other, func, level=None): @@ -627,24 +607,56 @@ def _combine_match_columns(self, other, func, level=None): if level is not None: raise NotImplementedError("'level' argument is not supported") - new_data = {} - - union = intersection = self.columns + left, right = self.align(other, join='outer', axis=1, level=level, + copy=False) + assert left.columns.equals(right.index) - if not union.equals(other.index): - union = other.index.union(self.columns) - intersection = other.index.intersection(self.columns) + new_data = {} - for col in intersection: - new_data[col] = func(self[col], float(other[col])) + for col in left.columns: + new_data[col] = func(left[col], float(right[col])) return self._constructor( - new_data, index=self.index, columns=union, + new_data, index=left.index, columns=left.columns, default_fill_value=self.default_fill_value).__finalize__(self) def _combine_const(self, other, func, errors='raise'): return self._apply_columns(lambda x: func(x, other)) + def _get_op_result_fill_value(self, other, func): + own_default = self.default_fill_value + + if isinstance(other, DataFrame): + # i.e. called from _combine_frame + + other_default = getattr(other, 'default_fill_value', np.nan) + + # if the fill values are the same use them? or use a valid one + if own_default == other_default: + # TOOD: won't this evaluate as False if both are np.nan? + fill_value = own_default + elif np.isnan(own_default) and not np.isnan(other_default): + fill_value = other_default + elif not np.isnan(own_default) and np.isnan(other_default): + fill_value = own_default + else: + fill_value = None + + elif isinstance(other, SparseSeries): + # i.e. called from _combine_match_index + + # fill_value is a function of our operator + if isna(other.fill_value) or isna(own_default): + fill_value = np.nan + else: + fill_value = func(np.float64(own_default), + np.float64(other.fill_value)) + + else: + raise NotImplementedError(type(other)) + + return fill_value + def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False): if level is not None: diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 71eba1e6901a1..5435ec643f813 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -677,6 +677,51 @@ def test_dt64ser_sub_datetime_dtype(self): # TODO: This next block of tests came from tests.series.test_operators, # needs to be de-duplicated and parametrized over `box` classes + def test_operators_datetimelike_invalid(self, all_arithmetic_operators): + # these are all TypeEror ops + op_str = all_arithmetic_operators + + def check(get_ser, test_ser): + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + op = getattr(get_ser, op_str, None) + with tm.assert_raises_regex(TypeError, 'operate|cannot'): + op(test_ser) + + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), + Timestamp('20120104')]) + if op_str not in ['__sub__', '__rsub__']: + check(dt1, dt2) + + # ## datetime64 with timetimedelta ### + # TODO(jreback) __rsub__ should raise? + if op_str not in ['__add__', '__radd__', '__sub__']: + check(dt1, td1) + + # 8260, 10763 + # datetime64 with tz + tz = 'US/Eastern' + dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, + tz=tz), name='foo') + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + td1 = Series(pd.timedelta_range('1 days 1 min', periods=5, freq='H')) + td2 = td1.copy() + td2.iloc[1] = np.nan + + if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']: + check(dt2, td2) + @pytest.mark.parametrize('klass', [Series, pd.Index]) def test_sub_datetime64_not_ns(self, klass): # GH#7996 diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 8156c5ea671c2..b71af4b777022 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +from collections import deque +from datetime import datetime import operator import pytest @@ -16,28 +18,86 @@ # Comparisons class TestFrameComparisons(object): - def test_flex_comparison_nat(self): - # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, - # and _definitely_ not be NaN - df = pd.DataFrame([pd.NaT]) - - result = df == pd.NaT - # result.iloc[0, 0] is a np.bool_ object - assert result.iloc[0, 0].item() is False - - result = df.eq(pd.NaT) - assert result.iloc[0, 0].item() is False - - result = df != pd.NaT - assert result.iloc[0, 0].item() is True - - result = df.ne(pd.NaT) - assert result.iloc[0, 0].item() is True + # Specifically _not_ flex-comparisons + + def test_comparison_invalid(self): + + def check(df, df2): + + for (x, y) in [(df, df2), (df2, df)]: + # we expect the result to match Series comparisons for + # == and !=, inequalities should raise + result = x == y + expected = pd.DataFrame({col: x[col] == y[col] + for col in x.columns}, + index=x.index, columns=x.columns) + tm.assert_frame_equal(result, expected) + + result = x != y + expected = pd.DataFrame({col: x[col] != y[col] + for col in x.columns}, + index=x.index, columns=x.columns) + tm.assert_frame_equal(result, expected) + + with pytest.raises(TypeError): + x >= y + with pytest.raises(TypeError): + x > y + with pytest.raises(TypeError): + x < y + with pytest.raises(TypeError): + x <= y + + # GH4968 + # invalid date/int comparisons + df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=['a']) + df['dates'] = pd.date_range('20010101', periods=len(df)) + + df2 = df.copy() + df2['dates'] = df['a'] + check(df, df2) + + df = pd.DataFrame(np.random.randint(10, size=(10, 2)), + columns=['a', 'b']) + df2 = pd.DataFrame({'a': pd.date_range('20010101', periods=len(df)), + 'b': pd.date_range('20100101', periods=len(df))}) + check(df, df2) + + def test_timestamp_compare(self): + # make sure we can compare Timestamps on the right AND left hand side + # GH#4982 + df = pd. DataFrame({'dates1': pd.date_range('20010101', periods=10), + 'dates2': pd.date_range('20010102', periods=10), + 'intcol': np.random.randint(1000000000, size=10), + 'floatcol': np.random.randn(10), + 'stringcol': list(tm.rands(10))}) + df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT + ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq', + 'ne': 'ne'} + + for left, right in ops.items(): + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + if left in ['eq', 'ne']: + expected = left_f(df, pd.Timestamp('20010109')) + result = right_f(pd.Timestamp('20010109'), df) + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(TypeError): + left_f(df, pd.Timestamp('20010109')) + with pytest.raises(TypeError): + right_f(pd.Timestamp('20010109'), df) + # nats + expected = left_f(df, pd.Timestamp('nat')) + result = right_f(pd.Timestamp('nat'), df) + tm.assert_frame_equal(result, expected) def test_mixed_comparison(self): - # GH 13128, GH 22163 != datetime64 vs non-dt64 should be False, + # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, # not raise TypeError - # (this appears to be fixed before #22163, not sure when) + # (this appears to be fixed before GH#22163, not sure when) df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]]) other = pd.DataFrame([['a', 'b'], ['c', 'd']]) @@ -80,6 +140,137 @@ def test_df_string_comparison(self): tm.assert_frame_equal(df[mask_b], df.loc[0:0, :]) tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :]) + +class TestFrameFlexComparisons(object): + # TODO: test_bool_flex_frame needs a better name + def test_bool_flex_frame(self): + data = np.random.randn(5, 3) + other_data = np.random.randn(5, 3) + df = pd.DataFrame(data) + other = pd.DataFrame(other_data) + ndim_5 = np.ones(df.shape + (1, 3)) + + # Unaligned + def _check_unaligned_frame(meth, op, df, other): + part_o = other.loc[3:, 1:].copy() + rs = meth(part_o) + xp = op(df, part_o.reindex(index=df.index, columns=df.columns)) + tm.assert_frame_equal(rs, xp) + + # DataFrame + assert df.eq(df).values.all() + assert not df.ne(df).values.any() + for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']: + f = getattr(df, op) + o = getattr(operator, op) + # No NAs + tm.assert_frame_equal(f(other), o(df, other)) + _check_unaligned_frame(f, o, df, other) + # ndarray + tm.assert_frame_equal(f(other.values), o(df, other.values)) + # scalar + tm.assert_frame_equal(f(0), o(df, 0)) + # NAs + msg = "Unable to coerce to Series/DataFrame" + tm.assert_frame_equal(f(np.nan), o(df, np.nan)) + with tm.assert_raises_regex(ValueError, msg): + f(ndim_5) + + # Series + def _test_seq(df, idx_ser, col_ser): + idx_eq = df.eq(idx_ser, axis=0) + col_eq = df.eq(col_ser) + idx_ne = df.ne(idx_ser, axis=0) + col_ne = df.ne(col_ser) + tm.assert_frame_equal(col_eq, df == pd.Series(col_ser)) + tm.assert_frame_equal(col_eq, -col_ne) + tm.assert_frame_equal(idx_eq, -idx_ne) + tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) + tm.assert_frame_equal(col_eq, df.eq(list(col_ser))) + tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0)) + tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) + + idx_gt = df.gt(idx_ser, axis=0) + col_gt = df.gt(col_ser) + idx_le = df.le(idx_ser, axis=0) + col_le = df.le(col_ser) + + tm.assert_frame_equal(col_gt, df > pd.Series(col_ser)) + tm.assert_frame_equal(col_gt, -col_le) + tm.assert_frame_equal(idx_gt, -idx_le) + tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) + + idx_ge = df.ge(idx_ser, axis=0) + col_ge = df.ge(col_ser) + idx_lt = df.lt(idx_ser, axis=0) + col_lt = df.lt(col_ser) + tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser)) + tm.assert_frame_equal(col_ge, -col_lt) + tm.assert_frame_equal(idx_ge, -idx_lt) + tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) + + idx_ser = pd.Series(np.random.randn(5)) + col_ser = pd.Series(np.random.randn(3)) + _test_seq(df, idx_ser, col_ser) + + # list/tuple + _test_seq(df, idx_ser.values, col_ser.values) + + # NA + df.loc[0, 0] = np.nan + rs = df.eq(df) + assert not rs.loc[0, 0] + rs = df.ne(df) + assert rs.loc[0, 0] + rs = df.gt(df) + assert not rs.loc[0, 0] + rs = df.lt(df) + assert not rs.loc[0, 0] + rs = df.ge(df) + assert not rs.loc[0, 0] + rs = df.le(df) + assert not rs.loc[0, 0] + + # complex + arr = np.array([np.nan, 1, 6, np.nan]) + arr2 = np.array([2j, np.nan, 7, None]) + df = pd.DataFrame({'a': arr}) + df2 = pd.DataFrame({'a': arr2}) + rs = df.gt(df2) + assert not rs.values.any() + rs = df.ne(df2) + assert rs.values.all() + + arr3 = np.array([2j, np.nan, None]) + df3 = pd.DataFrame({'a': arr3}) + rs = df3.gt(2j) + assert not rs.values.any() + + # corner, dtype=object + df1 = pd.DataFrame({'col': ['foo', np.nan, 'bar']}) + df2 = pd.DataFrame({'col': ['foo', datetime.now(), 'bar']}) + result = df1.ne(df2) + exp = pd.DataFrame({'col': [False, True, False]}) + tm.assert_frame_equal(result, exp) + + def test_flex_comparison_nat(self): + # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, + # and _definitely_ not be NaN + df = pd.DataFrame([pd.NaT]) + + result = df == pd.NaT + # result.iloc[0, 0] is a np.bool_ object + assert result.iloc[0, 0].item() is False + + result = df.eq(pd.NaT) + assert result.iloc[0, 0].item() is False + + result = df != pd.NaT + assert result.iloc[0, 0].item() is True + + result = df.ne(pd.NaT) + assert result.iloc[0, 0].item() is True + @pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le']) def test_df_flex_cmp_constant_return_types(self, opname): # GH 15077, non-empty DataFrame @@ -380,3 +571,82 @@ def test_td64_df_add_int_frame(self): df - other with pytest.raises(TypeError): other - df + + def test_arith_mixed(self): + + left = pd.DataFrame({'A': ['a', 'b', 'c'], + 'B': [1, 2, 3]}) + + result = left + left + expected = pd.DataFrame({'A': ['aa', 'bb', 'cc'], + 'B': [2, 4, 6]}) + tm.assert_frame_equal(result, expected) + + def test_arith_getitem_commute(self): + df = pd.DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]}) + + def _test_op(df, op): + result = op(df, 1) + + if not df.columns.is_unique: + raise ValueError("Only unique columns supported by this test") + + for col in result.columns: + tm.assert_series_equal(result[col], op(df[col], 1)) + + _test_op(df, operator.add) + _test_op(df, operator.sub) + _test_op(df, operator.mul) + _test_op(df, operator.truediv) + _test_op(df, operator.floordiv) + _test_op(df, operator.pow) + + _test_op(df, lambda x, y: y + x) + _test_op(df, lambda x, y: y - x) + _test_op(df, lambda x, y: y * x) + _test_op(df, lambda x, y: y / x) + _test_op(df, lambda x, y: y ** x) + + _test_op(df, lambda x, y: x + y) + _test_op(df, lambda x, y: x - y) + _test_op(df, lambda x, y: x * y) + _test_op(df, lambda x, y: x / y) + _test_op(df, lambda x, y: x ** y) + + @pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]), + range(1, 3), deque([1, 2])]) + def test_arith_alignment_non_pandas_object(self, values): + # GH#17901 + df = pd.DataFrame({'A': [1, 1], 'B': [1, 1]}) + expected = pd.DataFrame({'A': [2, 2], 'B': [3, 3]}) + result = df + values + tm.assert_frame_equal(result, expected) + + def test_arith_non_pandas_object(self): + df = pd.DataFrame(np.arange(1, 10, dtype='f8').reshape(3, 3), + columns=['one', 'two', 'three'], + index=['a', 'b', 'c']) + + val1 = df.xs('a').values + added = pd.DataFrame(df.values + val1, + index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val1, added) + + added = pd.DataFrame((df.values.T + val1).T, + index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val1, axis=0), added) + + val2 = list(df['two']) + + added = pd.DataFrame(df.values + val2, + index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val2, added) + + added = pd.DataFrame((df.values.T + val2).T, index=df.index, + columns=df.columns) + tm.assert_frame_equal(df.add(val2, axis='index'), added) + + val3 = np.random.rand(*df.shape) + added = pd.DataFrame(df.values + val3, + index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val3), added) diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 20ca4bc7de43e..65459735e639b 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import print_function -from collections import deque -from datetime import datetime from decimal import Decimal import operator @@ -13,8 +11,7 @@ from pandas.compat import range from pandas import compat -from pandas import (DataFrame, Series, MultiIndex, Timestamp, - date_range) +from pandas import DataFrame, Series, MultiIndex import pandas.core.common as com import pandas as pd @@ -243,75 +240,6 @@ def test_operators_none_as_na(self, op): result = op(df.fillna(7), df) assert_frame_equal(result, expected, check_dtype=False) - def test_comparison_invalid(self): - - def check(df, df2): - - for (x, y) in [(df, df2), (df2, df)]: - # we expect the result to match Series comparisons for - # == and !=, inequalities should raise - result = x == y - expected = DataFrame({col: x[col] == y[col] - for col in x.columns}, - index=x.index, columns=x.columns) - assert_frame_equal(result, expected) - - result = x != y - expected = DataFrame({col: x[col] != y[col] - for col in x.columns}, - index=x.index, columns=x.columns) - assert_frame_equal(result, expected) - - pytest.raises(TypeError, lambda: x >= y) - pytest.raises(TypeError, lambda: x > y) - pytest.raises(TypeError, lambda: x < y) - pytest.raises(TypeError, lambda: x <= y) - - # GH4968 - # invalid date/int comparisons - df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a']) - df['dates'] = date_range('20010101', periods=len(df)) - - df2 = df.copy() - df2['dates'] = df['a'] - check(df, df2) - - df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b']) - df2 = DataFrame({'a': date_range('20010101', periods=len( - df)), 'b': date_range('20100101', periods=len(df))}) - check(df, df2) - - def test_timestamp_compare(self): - # make sure we can compare Timestamps on the right AND left hand side - # GH4982 - df = DataFrame({'dates1': date_range('20010101', periods=10), - 'dates2': date_range('20010102', periods=10), - 'intcol': np.random.randint(1000000000, size=10), - 'floatcol': np.random.randn(10), - 'stringcol': list(tm.rands(10))}) - df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT - ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq', - 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - - # no nats - if left in ['eq', 'ne']: - expected = left_f(df, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), df) - assert_frame_equal(result, expected) - else: - with pytest.raises(TypeError): - left_f(df, Timestamp('20010109')) - with pytest.raises(TypeError): - right_f(Timestamp('20010109'), df) - # nats - expected = left_f(df, Timestamp('nat')) - result = right_f(Timestamp('nat'), df) - assert_frame_equal(result, expected) - @pytest.mark.parametrize('op,res', [('__eq__', False), ('__ne__', True)]) # TODO: not sure what's correct here. @@ -385,158 +313,6 @@ def test_binary_ops_align(self): for res in [res3, res4, res5, res6]: assert_frame_equal(res, exp) - def test_arith_mixed(self): - - left = DataFrame({'A': ['a', 'b', 'c'], - 'B': [1, 2, 3]}) - - result = left + left - expected = DataFrame({'A': ['aa', 'bb', 'cc'], - 'B': [2, 4, 6]}) - assert_frame_equal(result, expected) - - def test_arith_getitem_commute(self): - df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]}) - - self._test_op(df, operator.add) - self._test_op(df, operator.sub) - self._test_op(df, operator.mul) - self._test_op(df, operator.truediv) - self._test_op(df, operator.floordiv) - self._test_op(df, operator.pow) - - self._test_op(df, lambda x, y: y + x) - self._test_op(df, lambda x, y: y - x) - self._test_op(df, lambda x, y: y * x) - self._test_op(df, lambda x, y: y / x) - self._test_op(df, lambda x, y: y ** x) - - self._test_op(df, lambda x, y: x + y) - self._test_op(df, lambda x, y: x - y) - self._test_op(df, lambda x, y: x * y) - self._test_op(df, lambda x, y: x / y) - self._test_op(df, lambda x, y: x ** y) - - @staticmethod - def _test_op(df, op): - result = op(df, 1) - - if not df.columns.is_unique: - raise ValueError("Only unique columns supported by this test") - - for col in result.columns: - assert_series_equal(result[col], op(df[col], 1)) - - def test_bool_flex_frame(self): - data = np.random.randn(5, 3) - other_data = np.random.randn(5, 3) - df = DataFrame(data) - other = DataFrame(other_data) - ndim_5 = np.ones(df.shape + (1, 3)) - - # Unaligned - def _check_unaligned_frame(meth, op, df, other): - part_o = other.loc[3:, 1:].copy() - rs = meth(part_o) - xp = op(df, part_o.reindex(index=df.index, columns=df.columns)) - assert_frame_equal(rs, xp) - - # DataFrame - assert df.eq(df).values.all() - assert not df.ne(df).values.any() - for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']: - f = getattr(df, op) - o = getattr(operator, op) - # No NAs - assert_frame_equal(f(other), o(df, other)) - _check_unaligned_frame(f, o, df, other) - # ndarray - assert_frame_equal(f(other.values), o(df, other.values)) - # scalar - assert_frame_equal(f(0), o(df, 0)) - # NAs - msg = "Unable to coerce to Series/DataFrame" - assert_frame_equal(f(np.nan), o(df, np.nan)) - with tm.assert_raises_regex(ValueError, msg): - f(ndim_5) - - # Series - def _test_seq(df, idx_ser, col_ser): - idx_eq = df.eq(idx_ser, axis=0) - col_eq = df.eq(col_ser) - idx_ne = df.ne(idx_ser, axis=0) - col_ne = df.ne(col_ser) - assert_frame_equal(col_eq, df == Series(col_ser)) - assert_frame_equal(col_eq, -col_ne) - assert_frame_equal(idx_eq, -idx_ne) - assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) - assert_frame_equal(col_eq, df.eq(list(col_ser))) - assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0)) - assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) - - idx_gt = df.gt(idx_ser, axis=0) - col_gt = df.gt(col_ser) - idx_le = df.le(idx_ser, axis=0) - col_le = df.le(col_ser) - - assert_frame_equal(col_gt, df > Series(col_ser)) - assert_frame_equal(col_gt, -col_le) - assert_frame_equal(idx_gt, -idx_le) - assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) - - idx_ge = df.ge(idx_ser, axis=0) - col_ge = df.ge(col_ser) - idx_lt = df.lt(idx_ser, axis=0) - col_lt = df.lt(col_ser) - assert_frame_equal(col_ge, df >= Series(col_ser)) - assert_frame_equal(col_ge, -col_lt) - assert_frame_equal(idx_ge, -idx_lt) - assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) - - idx_ser = Series(np.random.randn(5)) - col_ser = Series(np.random.randn(3)) - _test_seq(df, idx_ser, col_ser) - - # list/tuple - _test_seq(df, idx_ser.values, col_ser.values) - - # NA - df.loc[0, 0] = np.nan - rs = df.eq(df) - assert not rs.loc[0, 0] - rs = df.ne(df) - assert rs.loc[0, 0] - rs = df.gt(df) - assert not rs.loc[0, 0] - rs = df.lt(df) - assert not rs.loc[0, 0] - rs = df.ge(df) - assert not rs.loc[0, 0] - rs = df.le(df) - assert not rs.loc[0, 0] - - # complex - arr = np.array([np.nan, 1, 6, np.nan]) - arr2 = np.array([2j, np.nan, 7, None]) - df = DataFrame({'a': arr}) - df2 = DataFrame({'a': arr2}) - rs = df.gt(df2) - assert not rs.values.any() - rs = df.ne(df2) - assert rs.values.all() - - arr3 = np.array([2j, np.nan, None]) - df3 = DataFrame({'a': arr3}) - rs = df3.gt(2j) - assert not rs.values.any() - - # corner, dtype=object - df1 = DataFrame({'col': ['foo', np.nan, 'bar']}) - df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']}) - result = df1.ne(df2) - exp = DataFrame({'col': [False, True, False]}) - assert_frame_equal(result, exp) - def test_dti_tz_convert_to_utc(self): base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz='UTC') @@ -548,40 +324,6 @@ def test_dti_tz_convert_to_utc(self): exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base) assert_frame_equal(df1 + df2, exp) - def test_arith_non_pandas_object(self): - df = self.simple - - val1 = df.xs('a').values - added = DataFrame(df.values + val1, index=df.index, - columns=df.columns) - assert_frame_equal(df + val1, added) - - added = DataFrame((df.values.T + val1).T, - index=df.index, columns=df.columns) - assert_frame_equal(df.add(val1, axis=0), added) - - val2 = list(df['two']) - - added = DataFrame(df.values + val2, index=df.index, columns=df.columns) - assert_frame_equal(df + val2, added) - - added = DataFrame((df.values.T + val2).T, index=df.index, - columns=df.columns) - assert_frame_equal(df.add(val2, axis='index'), added) - - val3 = np.random.rand(*df.shape) - added = DataFrame(df.values + val3, index=df.index, columns=df.columns) - assert_frame_equal(df.add(val3), added) - - @pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]), - range(1, 3), deque([1, 2])]) - def test_arith_alignment_non_pandas_object(self, values): - # GH 17901 - df = DataFrame({'A': [1, 1], 'B': [1, 1]}) - expected = DataFrame({'A': [2, 2], 'B': [3, 3]}) - result = df + values - assert_frame_equal(result, expected) - def test_combineFrame(self): frame_copy = self.frame.reindex(self.frame.index[::2]) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 7ee78645fe96e..e781488a799ec 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -1,18 +1,159 @@ # -*- coding: utf-8 -*- import operator +from datetime import timedelta import numpy as np import pytest import pandas as pd import pandas.util.testing as tm -from pandas import Series +from pandas import Series, compat +from pandas.core.indexes.period import IncompatibleFrequency + + +def _permute(obj): + return obj.take(np.random.permutation(len(obj))) + + +class TestSeriesFlexArithmetic(object): + @pytest.mark.parametrize( + 'ts', + [ + (lambda x: x, lambda x: x * 2, False), + (lambda x: x, lambda x: x[::2], False), + (lambda x: x, lambda x: 5, True), + (lambda x: tm.makeFloatSeries(), + lambda x: tm.makeFloatSeries(), + True) + ]) + @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv', + 'truediv', 'div', 'pow']) + def test_flex_method_equivalence(self, opname, ts): + # check that Series.{opname} behaves like Series.__{opname}__, + tser = tm.makeTimeSeries().rename('ts') + + series = ts[0](tser) + other = ts[1](tser) + check_reverse = ts[2] + + if opname == 'div' and compat.PY3: + pytest.skip('div test only for Py3') + + op = getattr(Series, opname) + + if op == 'div': + alt = operator.truediv + else: + alt = getattr(operator, opname) + + result = op(series, other) + expected = alt(series, other) + tm.assert_almost_equal(result, expected) + if check_reverse: + rop = getattr(Series, "r" + opname) + result = rop(series, other) + expected = alt(other, series) + tm.assert_almost_equal(result, expected) + + +class TestSeriesArithmetic(object): + # Some of these may end up in tests/arithmetic, but are not yet sorted + + def test_empty_series_add_sub(self): + # GH#13844 + a = Series(dtype='M8[ns]') + b = Series(dtype='m8[ns]') + tm.assert_series_equal(a, a + b) + tm.assert_series_equal(a, a - b) + tm.assert_series_equal(a, b + a) + with pytest.raises(TypeError): + b - a + + def test_add_series_with_period_index(self): + rng = pd.period_range('1/1/2000', '1/1/2010', freq='A') + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts + ts[::2] + expected = ts + ts + expected[1::2] = np.nan + tm.assert_series_equal(result, expected) + + result = ts + _permute(ts[::2]) + tm.assert_series_equal(result, expected) + + msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)" + with tm.assert_raises_regex(IncompatibleFrequency, msg): + ts + ts.asfreq('D', how="end") + + def test_operators_datetimelike(self): + + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series([pd.Timestamp('20111230'), pd.Timestamp('20120101'), + pd.Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([pd.Timestamp('20111231'), pd.Timestamp('20120102'), + pd.Timestamp('20120104')]) + dt1 - dt2 + dt2 - dt1 + + # ## datetime64 with timetimedelta ### + dt1 + td1 + td1 + dt1 + dt1 - td1 + # TODO: Decide if this ought to work. + # td1 - dt1 + + # ## timetimedelta with datetime64 ### + td1 + dt1 + dt1 + td1 + # ------------------------------------------------------------------ # Comparisons +class TestSeriesFlexComparison(object): + def test_comparison_flex_basic(self): + left = pd.Series(np.random.randn(10)) + right = pd.Series(np.random.randn(10)) + + tm.assert_series_equal(left.eq(right), left == right) + tm.assert_series_equal(left.ne(right), left != right) + tm.assert_series_equal(left.le(right), left < right) + tm.assert_series_equal(left.lt(right), left <= right) + tm.assert_series_equal(left.gt(right), left > right) + tm.assert_series_equal(left.ge(right), left >= right) + + # axis + for axis in [0, None, 'index']: + tm.assert_series_equal(left.eq(right, axis=axis), left == right) + tm.assert_series_equal(left.ne(right, axis=axis), left != right) + tm.assert_series_equal(left.le(right, axis=axis), left < right) + tm.assert_series_equal(left.lt(right, axis=axis), left <= right) + tm.assert_series_equal(left.gt(right, axis=axis), left > right) + tm.assert_series_equal(left.ge(right, axis=axis), left >= right) + + # + msg = 'No axis named 1 for object type' + for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: + with tm.assert_raises_regex(ValueError, msg): + getattr(left, op)(right, axis=1) + class TestSeriesComparison(object): + def test_comparison_different_length(self): + a = Series(['a', 'b', 'c']) + b = Series(['b', 'a']) + with pytest.raises(ValueError): + a < b + + a = Series([1, 2]) + b = Series([2, 3, 4]) + with pytest.raises(ValueError): + a == b @pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le']) def test_ser_flex_cmp_return_dtypes(self, opname): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 57688c7a3c3ab..082ed5e0f5123 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -13,11 +13,10 @@ import pandas.util.testing as tm from pandas import ( Categorical, DataFrame, Index, NaT, Series, bdate_range, compat, - date_range, isna, timedelta_range + date_range, isna ) from pandas.compat import range from pandas.core import ops -from pandas.core.indexes.datetimes import Timestamp from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal ) @@ -589,17 +588,6 @@ def test_nat_comparisons(self, dtype, box, reverse, pair): expected = Series([False, False, True]) assert_series_equal(left <= right, expected) - def test_comparison_different_length(self): - a = Series(['a', 'b', 'c']) - b = Series(['b', 'a']) - with pytest.raises(ValueError): - a < b - - a = Series([1, 2]) - b = Series([2, 3, 4]) - with pytest.raises(ValueError): - a == b - def test_ne(self): ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) expected = [True, True, False, True, True] @@ -638,31 +626,6 @@ def test_comp_ops_df_compat(self): class TestSeriesFlexComparisonOps(object): - def test_comparison_flex_basic(self): - left = pd.Series(np.random.randn(10)) - right = pd.Series(np.random.randn(10)) - - assert_series_equal(left.eq(right), left == right) - assert_series_equal(left.ne(right), left != right) - assert_series_equal(left.le(right), left < right) - assert_series_equal(left.lt(right), left <= right) - assert_series_equal(left.gt(right), left > right) - assert_series_equal(left.ge(right), left >= right) - - # axis - for axis in [0, None, 'index']: - assert_series_equal(left.eq(right, axis=axis), left == right) - assert_series_equal(left.ne(right, axis=axis), left != right) - assert_series_equal(left.le(right, axis=axis), left < right) - assert_series_equal(left.lt(right, axis=axis), left <= right) - assert_series_equal(left.gt(right, axis=axis), left > right) - assert_series_equal(left.ge(right, axis=axis), left >= right) - - # - msg = 'No axis named 1 for object type' - for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: - with tm.assert_raises_regex(ValueError, msg): - getattr(left, op)(right, axis=1) def test_comparison_flex_alignment(self): left = Series([1, 3, 2], index=list('abc')) @@ -709,119 +672,7 @@ def test_comparison_flex_alignment_fill(self): assert_series_equal(left.gt(right, fill_value=0), exp) -class TestDatetimeSeriesArithmetic(object): - - def test_operators_datetimelike_invalid(self, all_arithmetic_operators): - # these are all TypeEror ops - op_str = all_arithmetic_operators - - def check(get_ser, test_ser): - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate|cannot'): - op(test_ser) - - # ## timedelta64 ### - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # ## datetime64 ### - dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')]) - dt1.iloc[2] = np.nan - dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), - Timestamp('20120104')]) - if op_str not in ['__sub__', '__rsub__']: - check(dt1, dt2) - - # ## datetime64 with timetimedelta ### - # TODO(jreback) __rsub__ should raise? - if op_str not in ['__add__', '__radd__', '__sub__']: - check(dt1, td1) - - # 8260, 10763 - # datetime64 with tz - tz = 'US/Eastern' - dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, - tz=tz), name='foo') - dt2 = dt1.copy() - dt2.iloc[2] = np.nan - td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) - td2 = td1.copy() - td2.iloc[1] = np.nan - - if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']: - check(dt2, td2) - - def test_operators_datetimelike(self): - - # ## timedelta64 ### - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # ## datetime64 ### - dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')]) - dt1.iloc[2] = np.nan - dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), - Timestamp('20120104')]) - dt1 - dt2 - dt2 - dt1 - - # ## datetime64 with timetimedelta ### - dt1 + td1 - td1 + dt1 - dt1 - td1 - # TODO: Decide if this ought to work. - # td1 - dt1 - - # ## timetimedelta with datetime64 ### - td1 + dt1 - dt1 + td1 - - class TestSeriesOperators(TestData): - @pytest.mark.parametrize( - 'ts', - [ - (lambda x: x, lambda x: x * 2, False), - (lambda x: x, lambda x: x[::2], False), - (lambda x: x, lambda x: 5, True), - (lambda x: tm.makeFloatSeries(), - lambda x: tm.makeFloatSeries(), - True) - ]) - @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv', - 'truediv', 'div', 'pow']) - def test_op_method(self, opname, ts): - # check that Series.{opname} behaves like Series.__{opname}__, - tser = tm.makeTimeSeries().rename('ts') - - series = ts[0](tser) - other = ts[1](tser) - check_reverse = ts[2] - - if opname == 'div' and compat.PY3: - pytest.skip('div test only for Py3') - - op = getattr(Series, opname) - - if op == 'div': - alt = operator.truediv - else: - alt = getattr(operator, opname) - - result = op(series, other) - expected = alt(series, other) - assert_almost_equal(result, expected) - if check_reverse: - rop = getattr(Series, "r" + opname) - result = rop(series, other) - expected = alt(other, series) - assert_almost_equal(result, expected) def test_operators_empty_int_corner(self): s1 = Series([], [], dtype=np.int32) diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index d80e2fd276407..88a5ff261fbb4 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -2,16 +2,11 @@ import pytest import pandas as pd -import pandas.core.indexes.period as period import pandas.util.testing as tm from pandas import DataFrame, Period, Series, period_range from pandas.core.arrays import PeriodArray -def _permute(obj): - return obj.take(np.random.permutation(len(obj))) - - class TestSeriesPeriod(object): def setup_method(self, method): @@ -116,22 +111,6 @@ def test_intercept_astype_object(self): result = df.values.squeeze() assert (result[:, 0] == expected.values).all() - def test_add_series(self): - rng = period_range('1/1/2000', '1/1/2010', freq='A') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts + ts[::2] - expected = ts + ts - expected[1::2] = np.nan - tm.assert_series_equal(result, expected) - - result = ts + _permute(ts[::2]) - tm.assert_series_equal(result, expected) - - msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)" - with tm.assert_raises_regex(period.IncompatibleFrequency, msg): - ts + ts.asfreq('D', how="end") - def test_align_series(self, join_type): rng = period_range('1/1/2000', '1/1/2010', freq='A') ts = Series(np.random.randn(len(rng)), index=rng) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 497b1aef02897..b46570fcfb1a5 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -456,16 +456,6 @@ def test_timeseries_coercion(self): assert ser.index.is_all_dates assert isinstance(ser.index, DatetimeIndex) - def test_empty_series_ops(self): - # see issue #13844 - a = Series(dtype='M8[ns]') - b = Series(dtype='m8[ns]') - assert_series_equal(a, a + b) - assert_series_equal(a, a - b) - assert_series_equal(a, b + a) - with pytest.raises(TypeError): - b - a - def test_contiguous_boolean_preserve_freq(self): rng = date_range('1/1/2000', '3/1/2000', freq='B')
https://api.github.com/repos/pandas-dev/pandas/pulls/23060
2018-10-09T15:27:43Z
2018-10-28T13:48:54Z
2018-10-28T13:48:54Z
2018-10-28T16:17:52Z
Update api.rst
diff --git a/doc/source/api.rst b/doc/source/api.rst index 073ed8a082a11..ffa240febf731 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -906,7 +906,6 @@ Indexing, iteration DataFrame.loc DataFrame.iloc DataFrame.insert - DataFrame.insert DataFrame.__iter__ DataFrame.items DataFrame.keys
remove duplicate line for DataFrame.insert(...) - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23052
2018-10-09T01:34:40Z
2018-10-09T02:14:33Z
2018-10-09T02:14:33Z
2018-10-09T05:59:05Z
DOC: update the pandas.DateTimeArrayMixin.is_month_start docstring
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0f07a9cf3c0e0..b16a399c0bbb1 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -998,48 +998,60 @@ def date(self): 'dim', "The number of days in the month") daysinmonth = days_in_month - is_month_start = _field_accessor( - 'is_month_start', - 'is_month_start', - "Logical indicating if first day of month (defined by frequency)") - is_month_end = _field_accessor( - 'is_month_end', - 'is_month_end', - """ - Indicator for whether the date is the last day of the month. + _is_month_doc = """ + Indicates whether the date is the {first_or_last} day of the month. Returns ------- Series or array - For Series, returns a Series with boolean values. For - DatetimeIndex, returns a boolean array. + For Series, returns a Series with boolean values. + For DatetimeIndex, returns a boolean array. See Also -------- - is_month_start : Indicator for whether the date is the first day - of the month. + is_month_start : Return a boolean indicating whether the date + is the first day of the month. + is_month_end : Return a boolean indicating whether the date + is the last day of the month. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. - >>> dates = pd.Series(pd.date_range("2018-02-27", periods=3)) - >>> dates + >>> s = pd.Series(pd.date_range("2018-02-27", periods=3)) + >>> s 0 2018-02-27 1 2018-02-28 2 2018-03-01 dtype: datetime64[ns] - >>> dates.dt.is_month_end + >>> s.dt.is_month_start + 0 False + 1 False + 2 True + dtype: bool + >>> s.dt.is_month_end 0 False 1 True 2 False dtype: bool >>> idx = pd.date_range("2018-02-27", periods=3) + >>> idx.is_month_start + array([False, False, True]) >>> idx.is_month_end - array([False, True, False], dtype=bool) - """) + array([False, True, False]) + """ + is_month_start = _field_accessor( + 'is_month_start', + 'is_month_start', + _is_month_doc.format(first_or_last='first')) + + is_month_end = _field_accessor( + 'is_month_end', + 'is_month_end', + _is_month_doc.format(first_or_last='last')) + is_quarter_start = _field_accessor( 'is_quarter_start', 'is_quarter_start',
- [x] closes #20146 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry should close PR #20146
https://api.github.com/repos/pandas-dev/pandas/pulls/23051
2018-10-09T01:05:35Z
2018-10-26T09:15:50Z
2018-10-26T09:15:50Z
2018-10-26T10:11:36Z
CLN GH22985 Fixed interpolation with object error message
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8de52fbfa79f0..ce70e3ce56c08 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6387,7 +6387,9 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, if _maybe_transposed_self._data.get_dtype_counts().get( 'object') == len(_maybe_transposed_self.T): - raise TypeError("Cannot interpolate with all NaNs.") + raise TypeError("Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype.") # create/use the index if method == 'linear': diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 136299a4b81be..9d1bd9e9a0234 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -814,6 +814,19 @@ def test_interp_raise_on_only_mixed(self): with pytest.raises(TypeError): df.interpolate(axis=1) + def test_interp_raise_on_all_object_dtype(self): + # GH 22985 + df = DataFrame({ + 'A': [1, 2, 3], + 'B': [4, 5, 6]}, + dtype='object') + with tm.assert_raises_regex( + TypeError, + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype."): + df.interpolate() + def test_interp_inplace(self): df = DataFrame({'a': [1., 2., np.nan, 4.]}) expected = DataFrame({'a': [1., 2., 3., 4.]})
- [x] closes #22985 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry It looks like the problem with #22985 is mostly the mismatch between the input trigger and the error message, so this is just a tiny hotfix.
https://api.github.com/repos/pandas-dev/pandas/pulls/23044
2018-10-08T14:17:39Z
2018-10-10T12:04:57Z
2018-10-10T12:04:56Z
2018-10-10T12:05:01Z
BUG-22796 Concat multicolumn tz-aware DataFrame
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ca4ea8e366754..a4209ba90aaee 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -892,6 +892,7 @@ Reshaping - Bug in :func:`pandas.wide_to_long` when a string is passed to the stubnames argument and a column name is a substring of that stubname (:issue:`22468`) - Bug in :func:`merge` when merging ``datetime64[ns, tz]`` data that contained a DST transition (:issue:`18885`) - Bug in :func:`merge_asof` when merging on float values within defined tolerance (:issue:`22981`) +- Bug in :func:`pandas.concat` when concatenating a multicolumn DataFrame with tz-aware data against a DataFrame with a different number of columns (:issue`22796`) Build Changes ^^^^^^^^^^^^^ diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 611cae28877c3..f07fb3cd80eab 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -546,6 +546,17 @@ def __new__(cls, unit=None, tz=None): cls._cache[key] = u return u + @classmethod + def construct_array_type(cls): + """Return the array type associated with this dtype + + Returns + ------- + type + """ + from pandas import DatetimeIndex + return DatetimeIndex + @classmethod def construct_from_string(cls, string): """ attempt to construct this type from a string, raise a TypeError if diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 5a3f11525acf8..6d67070000dcd 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -186,6 +186,10 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): if getattr(self.block, 'is_datetimetz', False) or \ is_datetimetz(empty_dtype): + if self.block is None: + array = empty_dtype.construct_array_type() + missing_arr = array([fill_value], dtype=empty_dtype) + return missing_arr.repeat(self.shape[1]) pass elif getattr(self.block, 'is_categorical', False): pass diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index d1f921bc5e894..ece9559313ba0 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -54,6 +54,38 @@ def test_concat_multiple_tzs(self): expected = DataFrame(dict(time=[ts2, ts3])) assert_frame_equal(results, expected) + @pytest.mark.parametrize( + 't1', + [ + '2015-01-01', + pytest.param(pd.NaT, marks=pytest.mark.xfail( + reason='GH23037 incorrect dtype when concatenating', + strict=True))]) + def test_concat_tz_NaT(self, t1): + # GH 22796 + # Concating tz-aware multicolumn DataFrames + ts1 = Timestamp(t1, tz='UTC') + ts2 = Timestamp('2015-01-01', tz='UTC') + ts3 = Timestamp('2015-01-01', tz='UTC') + + df1 = DataFrame([[ts1, ts2]]) + df2 = DataFrame([[ts3]]) + + result = pd.concat([df1, df2]) + expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0]) + + assert_frame_equal(result, expected) + + def test_concat_tz_not_aligned(self): + # GH 22796 + ts = pd.to_datetime([1, 2]).tz_localize("UTC") + a = pd.DataFrame({"A": ts}) + b = pd.DataFrame({"A": ts, "B": ts}) + result = pd.concat([a, b], sort=True, ignore_index=True) + expected = pd.DataFrame({"A": list(ts) + list(ts), + "B": [pd.NaT, pd.NaT] + list(ts)}) + assert_frame_equal(result, expected) + def test_concat_tuple_keys(self): # GH 14438 df1 = pd.DataFrame(np.ones((2, 2)), columns=list('AB'))
- [X] closes #22796 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Numpy arrays don't have the datetimetz dtype, so I just passed through the DatetimeIndex directly. Side note: There's another small bug (I think) where np.nan or pd.NaT takes on the dtype of the column instead of the row when concatenating, but the column should instead have an object dtype.
https://api.github.com/repos/pandas-dev/pandas/pulls/23036
2018-10-08T01:48:14Z
2018-10-09T18:41:43Z
2018-10-09T18:41:43Z
2018-10-09T18:42:09Z
BUG: Fixed nlargest/smallest functionality for dataframes with MultiIndex columns
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a41b0c9521f99..a547edec2f3ce 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -1194,6 +1194,7 @@ Reshaping - Bug in :func:`merge_asof` when merging on float values within defined tolerance (:issue:`22981`) - Bug in :func:`pandas.concat` when concatenating a multicolumn DataFrame with tz-aware data against a DataFrame with a different number of columns (:issue`22796`) - Bug in :func:`merge_asof` where confusing error message raised when attempting to merge with missing values (:issue:`23189`) +- Bug in :meth:`DataFrame.nsmallest` and :meth:`DataFrame.nlargest` for dataframes that have :class:`MultiIndex`ed columns (:issue:`23033`). .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 0f1eb12883fd5..df2da26685a16 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1161,7 +1161,7 @@ class SelectNFrame(SelectN): def __init__(self, obj, n, keep, columns): super(SelectNFrame, self).__init__(obj, n, keep) - if not is_list_like(columns): + if not is_list_like(columns) or isinstance(columns, tuple): columns = [columns] columns = list(columns) self.columns = columns diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b83fba7e7b277..ab4eaf02f38dd 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2153,7 +2153,7 @@ def test_n(self, df_strings, nselect_method, n, order): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('columns', [ - ('group', 'category_string'), ('group', 'string')]) + ['group', 'category_string'], ['group', 'string']]) def test_n_error(self, df_main_dtypes, nselect_method, columns): df = df_main_dtypes col = columns[1] @@ -2259,3 +2259,20 @@ def test_series_nat_conversion(self): df.rank() result = df tm.assert_frame_equal(result, expected) + + def test_multiindex_column_lookup(self): + # Check whether tuples are correctly treated as multi-level lookups. + # GH 23033 + df = pd.DataFrame( + columns=pd.MultiIndex.from_product([['x'], ['a', 'b']]), + data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]]) + + # nsmallest + result = df.nsmallest(3, ('x', 'a')) + expected = df.iloc[[2, 0, 3]] + tm.assert_frame_equal(result, expected) + + # nlargest + result = df.nlargest(3, ('x', 'b')) + expected = df.iloc[[3, 2, 1]] + tm.assert_frame_equal(result, expected)
This change fixes nlargest/smallest functionality for dataframes with MultiIndex columns. - [x] closes #23033 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23034
2018-10-08T00:45:00Z
2018-10-24T12:33:14Z
2018-10-24T12:33:13Z
2018-10-24T22:09:00Z
Add cookbook entry for triangular correlation matrix (closes #22840)
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index be8457fc14a4f..21d1f11ba49ba 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -1226,6 +1226,17 @@ Computation Correlation *********** +Often it's useful to obtain the lower (or upper) triangular form of a correlation matrix calculated from :func:`DataFrame.corr`. This can be achieved by passing a boolean mask to ``where`` as follows: + +.. ipython:: python + + df = pd.DataFrame(np.random.random(size=(100, 5))) + + corr_mat = df.corr() + mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1) + + corr_mat.where(mask) + The `method` argument within `DataFrame.corr` can accept a callable in addition to the named correlation types. Here we compute the `distance correlation <https://en.wikipedia.org/wiki/Distance_correlation>`__ matrix for a `DataFrame` object. .. code-block:: python
- [ x ] closes #22840 - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/23032
2018-10-08T00:38:08Z
2018-11-03T14:36:24Z
2018-11-03T14:36:24Z
2018-11-03T14:40:37Z
BUG: Fix PeriodIndex +/- TimedeltaIndex
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a4209ba90aaee..0eeaff2fe62a1 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -729,6 +729,7 @@ Datetimelike - Bug in :class:`DatetimeIndex` where frequency was being set if original frequency was ``None`` (:issue:`22150`) - Bug in rounding methods of :class:`DatetimeIndex` (:meth:`~DatetimeIndex.round`, :meth:`~DatetimeIndex.ceil`, :meth:`~DatetimeIndex.floor`) and :class:`Timestamp` (:meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, :meth:`~Timestamp.floor`) could give rise to loss of precision (:issue:`22591`) - Bug in :func:`to_datetime` with an :class:`Index` argument that would drop the ``name`` from the result (:issue:`21697`) +- Bug in :class:`PeriodIndex` where adding or subtracting a :class:`timedelta` or :class:`Tick` object produced incorrect results (:issue:`22988`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e4ace2bfe1509..a8c3b372e278f 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -382,6 +382,11 @@ def _add_delta_tdi(self, other): if not len(self) == len(other): raise ValueError("cannot add indices of unequal length") + if isinstance(other, np.ndarray): + # ndarray[timedelta64]; wrap in TimedeltaIndex for op + from pandas import TimedeltaIndex + other = TimedeltaIndex(other) + self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr(self_i8, other_i8, @@ -632,11 +637,17 @@ def __add__(self, other): return self._add_datelike(other) elif is_integer_dtype(other): result = self._addsub_int_array(other, operator.add) - elif is_float_dtype(other) or is_period_dtype(other): + elif is_float_dtype(other): # Explicitly catch invalid dtypes raise TypeError("cannot add {dtype}-dtype to {cls}" .format(dtype=other.dtype, cls=type(self).__name__)) + elif is_period_dtype(other): + # if self is a TimedeltaArray and other is a PeriodArray with + # a timedelta-like (i.e. Tick) freq, this operation is valid. + # Defer to the PeriodArray implementation. + # In remaining cases, this will end up raising TypeError. + return NotImplemented elif is_extension_array_dtype(other): # Categorical op will raise; defer explicitly return NotImplemented diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7daaa8de1734f..bfddce662123f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -506,7 +506,7 @@ def _add_delta(self, delta): Parameters ---------- delta : {timedelta, np.timedelta64, DateOffset, - TimedelaIndex, ndarray[timedelta64]} + TimedeltaIndex, ndarray[timedelta64]} Returns ------- diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 6d13fb9ecaa39..7aaf3ddbb9c67 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from datetime import timedelta +import operator import warnings import numpy as np @@ -17,8 +18,8 @@ from pandas.util._decorators import (cache_readonly, deprecate_kwarg) from pandas.core.dtypes.common import ( - is_integer_dtype, is_float_dtype, is_period_dtype, - is_datetime64_dtype) + is_integer_dtype, is_float_dtype, is_period_dtype, is_timedelta64_dtype, + is_datetime64_dtype, _TD_DTYPE) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ABCSeries @@ -355,24 +356,54 @@ def _add_offset(self, other): return self._time_shift(other.n) def _add_delta_td(self, other): + assert isinstance(self.freq, Tick) # checked by calling function assert isinstance(other, (timedelta, np.timedelta64, Tick)) - nanos = delta_to_nanoseconds(other) - own_offset = frequencies.to_offset(self.freq.rule_code) - if isinstance(own_offset, Tick): - offset_nanos = delta_to_nanoseconds(own_offset) - if np.all(nanos % offset_nanos == 0): - return self._time_shift(nanos // offset_nanos) + delta = self._check_timedeltalike_freq_compat(other) - # raise when input doesn't have freq - raise IncompatibleFrequency("Input has different freq from " - "{cls}(freq={freqstr})" - .format(cls=type(self).__name__, - freqstr=self.freqstr)) + # Note: when calling parent class's _add_delta_td, it will call + # delta_to_nanoseconds(delta). Because delta here is an integer, + # delta_to_nanoseconds will return it unchanged. + return DatetimeLikeArrayMixin._add_delta_td(self, delta) + + def _add_delta_tdi(self, other): + assert isinstance(self.freq, Tick) # checked by calling function + + delta = self._check_timedeltalike_freq_compat(other) + return self._addsub_int_array(delta, operator.add) def _add_delta(self, other): - ordinal_delta = self._maybe_convert_timedelta(other) - return self._time_shift(ordinal_delta) + """ + Add a timedelta-like, Tick, or TimedeltaIndex-like object + to self. + + Parameters + ---------- + other : {timedelta, np.timedelta64, Tick, + TimedeltaIndex, ndarray[timedelta64]} + + Returns + ------- + result : same type as self + """ + if not isinstance(self.freq, Tick): + # We cannot add timedelta-like to non-tick PeriodArray + raise IncompatibleFrequency("Input has different freq from " + "{cls}(freq={freqstr})" + .format(cls=type(self).__name__, + freqstr=self.freqstr)) + + # TODO: standardize across datetimelike subclasses whether to return + # i8 view or _shallow_copy + if isinstance(other, (Tick, timedelta, np.timedelta64)): + new_values = self._add_delta_td(other) + return self._shallow_copy(new_values) + elif is_timedelta64_dtype(other): + # ndarray[timedelta64] or TimedeltaArray/index + new_values = self._add_delta_tdi(other) + return self._shallow_copy(new_values) + else: # pragma: no cover + raise TypeError(type(other).__name__) @deprecate_kwarg(old_arg_name='n', new_arg_name='periods') def shift(self, periods): @@ -428,14 +459,9 @@ def _maybe_convert_timedelta(self, other): other, (timedelta, np.timedelta64, Tick, np.ndarray)): offset = frequencies.to_offset(self.freq.rule_code) if isinstance(offset, Tick): - if isinstance(other, np.ndarray): - nanos = np.vectorize(delta_to_nanoseconds)(other) - else: - nanos = delta_to_nanoseconds(other) - offset_nanos = delta_to_nanoseconds(offset) - check = np.all(nanos % offset_nanos == 0) - if check: - return nanos // offset_nanos + # _check_timedeltalike_freq_compat will raise if incompatible + delta = self._check_timedeltalike_freq_compat(other) + return delta elif isinstance(other, DateOffset): freqstr = other.rule_code base = frequencies.get_base_alias(freqstr) @@ -454,6 +480,58 @@ def _maybe_convert_timedelta(self, other): raise IncompatibleFrequency(msg.format(cls=type(self).__name__, freqstr=self.freqstr)) + def _check_timedeltalike_freq_compat(self, other): + """ + Arithmetic operations with timedelta-like scalars or array `other` + are only valid if `other` is an integer multiple of `self.freq`. + If the operation is valid, find that integer multiple. Otherwise, + raise because the operation is invalid. + + Parameters + ---------- + other : timedelta, np.timedelta64, Tick, + ndarray[timedelta64], TimedeltaArray, TimedeltaIndex + + Returns + ------- + multiple : int or ndarray[int64] + + Raises + ------ + IncompatibleFrequency + """ + assert isinstance(self.freq, Tick) # checked by calling function + own_offset = frequencies.to_offset(self.freq.rule_code) + base_nanos = delta_to_nanoseconds(own_offset) + + if isinstance(other, (timedelta, np.timedelta64, Tick)): + nanos = delta_to_nanoseconds(other) + + elif isinstance(other, np.ndarray): + # numpy timedelta64 array; all entries must be compatible + assert other.dtype.kind == 'm' + if other.dtype != _TD_DTYPE: + # i.e. non-nano unit + # TODO: disallow unit-less timedelta64 + other = other.astype(_TD_DTYPE) + nanos = other.view('i8') + else: + # TimedeltaArray/Index + nanos = other.asi8 + + if np.all(nanos % base_nanos == 0): + # nanos being added is an integer multiple of the + # base-frequency to self.freq + delta = nanos // base_nanos + # delta is the integer (or integer-array) number of periods + # by which will be added to self. + return delta + + raise IncompatibleFrequency("Input has different freq from " + "{cls}(freq={freqstr})" + .format(cls=type(self).__name__, + freqstr=self.freqstr)) + PeriodArrayMixin._add_comparison_ops() PeriodArrayMixin._add_datetimelike_methods() diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 3210290b9c5c8..d81ab2b3a2ec3 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -446,26 +446,36 @@ def test_pi_add_sub_td64_array_non_tick_raises(self): with pytest.raises(period.IncompatibleFrequency): tdarr - rng - @pytest.mark.xfail(reason='op with TimedeltaIndex raises, with ndarray OK', - strict=True) def test_pi_add_sub_td64_array_tick(self): - rng = pd.period_range('1/1/2000', freq='Q', periods=3) + # PeriodIndex + Timedelta-like is allowed only with + # tick-like frequencies + rng = pd.period_range('1/1/2000', freq='90D', periods=3) tdi = pd.TimedeltaIndex(['-1 Day', '-1 Day', '-1 Day']) tdarr = tdi.values - expected = rng + tdi + expected = pd.period_range('12/31/1999', freq='90D', periods=3) + result = rng + tdi + tm.assert_index_equal(result, expected) result = rng + tdarr tm.assert_index_equal(result, expected) + result = tdi + rng + tm.assert_index_equal(result, expected) result = tdarr + rng tm.assert_index_equal(result, expected) - expected = rng - tdi + expected = pd.period_range('1/2/2000', freq='90D', periods=3) + + result = rng - tdi + tm.assert_index_equal(result, expected) result = rng - tdarr tm.assert_index_equal(result, expected) with pytest.raises(TypeError): tdarr - rng + with pytest.raises(TypeError): + tdi - rng + # ----------------------------------------------------------------- # operations with array/Index of DateOffset objects @@ -596,6 +606,56 @@ def test_pi_sub_intarray(self, box): # Timedelta-like (timedelta, timedelta64, Timedelta, Tick) # TODO: Some of these are misnomers because of non-Tick DateOffsets + def test_pi_add_timedeltalike_minute_gt1(self, three_days): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # minute frequency with n != 1. A more general case is tested below + # in test_pi_add_timedeltalike_tick_gt1, but here we write out the + # expected result more explicitly. + other = three_days + rng = pd.period_range('2014-05-01', periods=3, freq='2D') + + expected = pd.PeriodIndex(['2014-05-04', '2014-05-06', '2014-05-08'], + freq='2D') + + result = rng + other + tm.assert_index_equal(result, expected) + + result = other + rng + tm.assert_index_equal(result, expected) + + # subtraction + expected = pd.PeriodIndex(['2014-04-28', '2014-04-30', '2014-05-02'], + freq='2D') + result = rng - other + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + other - rng + + @pytest.mark.parametrize('freqstr', ['5ns', '5us', '5ms', + '5s', '5T', '5h', '5d']) + def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # tick-like frequency with n != 1 + other = three_days + rng = pd.period_range('2014-05-01', periods=6, freq=freqstr) + + expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr) + + result = rng + other + tm.assert_index_equal(result, expected) + + result = other + rng + tm.assert_index_equal(result, expected) + + # subtraction + expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr) + result = rng - other + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + other - rng + def test_pi_add_iadd_timedeltalike_daily(self, three_days): # Tick other = three_days diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 4e01e0feb004c..61a3c4bb6934e 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -805,6 +805,7 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, Specify object name being compared, internally used to show appropriate assertion message """ + __tracebackhide__ = True def _check_types(l, r, obj='Index'): if exact: @@ -1048,6 +1049,8 @@ def assert_interval_array_equal(left, right, exact='equiv', def raise_assert_detail(obj, message, left, right, diff=None): + __tracebackhide__ = True + if isinstance(left, np.ndarray): left = pprint_thing(left) elif is_categorical_dtype(left):
- [x] closes #22998 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23031
2018-10-07T22:28:11Z
2018-10-15T00:49:09Z
2018-10-15T00:49:09Z
2018-10-15T00:49:23Z
TST: collect logical ops tests, use fixtures
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index a09efe6d4761c..a8e61b3fd9d3a 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -388,19 +388,11 @@ def test_td64arr_sub_period(self, box, freq): with pytest.raises(TypeError): p - idx - @pytest.mark.parametrize('box', [ - pd.Index, - Series, - pytest.param(pd.DataFrame, - marks=pytest.mark.xfail(reason="broadcasts along " - "wrong axis", - raises=ValueError, - strict=True)) - ], ids=lambda x: x.__name__) @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H']) @pytest.mark.parametrize('tdi_freq', [None, 'H']) - def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq): + def test_td64arr_sub_pi(self, box_df_broadcast_failure, tdi_freq, pi_freq): # GH#20049 subtracting PeriodIndex should raise TypeError + box = box_df_broadcast_failure tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq) dti = Timestamp('2018-03-07 17:16:40') + tdi pi = dti.to_period(pi_freq) @@ -529,16 +521,9 @@ def test_td64arr_rsub_int_series_invalid(self, box, tdser): with pytest.raises(err): Series([2, 3, 4]) - tdser - @pytest.mark.parametrize('box', [ - pd.Index, - Series, - pytest.param(pd.DataFrame, - marks=pytest.mark.xfail(reason="Attempts to broadcast " - "incorrectly", - strict=True, raises=ValueError)) - ], ids=lambda x: x.__name__) - def test_td64arr_add_intlike(self, box): + def test_td64arr_add_intlike(self, box_df_broadcast_failure): # GH#19123 + box = box_df_broadcast_failure tdi = TimedeltaIndex(['59 days', '59 days', 'NaT']) ser = tm.box_expected(tdi, box) err = TypeError if box is not pd.Index else NullFrequencyError @@ -706,21 +691,13 @@ def test_td64arr_sub_td64_array(self, box_df_broadcast_failure): tm.assert_equal(result, expected) # TODO: parametrize over [add, sub, radd, rsub]? - @pytest.mark.parametrize('box', [ - pd.Index, - Series, - pytest.param(pd.DataFrame, - marks=pytest.mark.xfail(reason="Tries to broadcast " - "incorrectly leading " - "to alignment error", - strict=True, raises=ValueError)) - ], ids=lambda x: x.__name__) @pytest.mark.parametrize('names', [(None, None, None), ('Egon', 'Venkman', None), ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_td64arr_add_sub_tdi(self, box, names): + def test_td64arr_add_sub_tdi(self, box_df_broadcast_failure, names): # GH#17250 make sure result dtype is correct # GH#19043 make sure names are propagated correctly + box = box_df_broadcast_failure tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0]) ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], @@ -830,19 +807,12 @@ def test_timedelta64_operations_with_DateOffset(self): td - op(5) op(5) - td - @pytest.mark.parametrize('box', [ - pd.Index, - Series, - pytest.param(pd.DataFrame, - marks=pytest.mark.xfail(reason="Tries to broadcast " - "incorrectly", - strict=True, raises=ValueError)) - ], ids=lambda x: x.__name__) @pytest.mark.parametrize('names', [(None, None, None), ('foo', 'bar', None), ('foo', 'foo', 'foo')]) - def test_td64arr_add_offset_index(self, names, box): + def test_td64arr_add_offset_index(self, names, box_df_broadcast_failure): # GH#18849, GH#19744 + box = box_df_broadcast_failure tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], name=names[0]) other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 6ed289614b96a..433b0f09e13bc 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -27,38 +27,122 @@ from pandas.tests.frame.common import TestData, _check_mixed_float -class TestDataFrameOperators(TestData): +class TestDataFrameUnaryOperators(object): + # __pos__, __neg__, __inv__ + + @pytest.mark.parametrize('df,expected', [ + (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})), + (pd.DataFrame({'a': [False, True]}), + pd.DataFrame({'a': [True, False]})), + (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), + pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))})) + ]) + def test_neg_numeric(self, df, expected): + assert_frame_equal(-df, expected) + assert_series_equal(-df['a'], expected['a']) + + @pytest.mark.parametrize('df, expected', [ + (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)), + ([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]), + ]) + def test_neg_object(self, df, expected): + # GH#21380 + df = pd.DataFrame({'a': df}) + expected = pd.DataFrame({'a': expected}) + assert_frame_equal(-df, expected) + assert_series_equal(-df['a'], expected['a']) + + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': ['a', 'b']}), + pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), + ]) + def test_neg_raises(self, df): + with pytest.raises(TypeError): + (- df) + with pytest.raises(TypeError): + (- df['a']) + + def test_invert(self): + _seriesd = tm.getSeriesData() + df = pd.DataFrame(_seriesd) + + assert_frame_equal(-(df < 0), ~(df < 0)) + + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': [-1, 1]}), + pd.DataFrame({'a': [False, True]}), + pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), + ]) + def test_pos_numeric(self, df): + # GH#16073 + assert_frame_equal(+df, df) + assert_series_equal(+df['a'], df['a']) + + @pytest.mark.parametrize('df', [ + # numpy changing behavior in the future + pytest.param(pd.DataFrame({'a': ['a', 'b']}), + marks=[pytest.mark.filterwarnings("ignore")]), + pd.DataFrame({'a': np.array([-1, 2], dtype=object)}), + pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}), + ]) + def test_pos_object(self, df): + # GH#21380 + assert_frame_equal(+df, df) + assert_series_equal(+df['a'], df['a']) + + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), + ]) + def test_pos_raises(self, df): + with pytest.raises(TypeError): + (+ df) + with pytest.raises(TypeError): + (+ df['a']) - def test_operators_boolean(self): - # GH 5808 +class TestDataFrameLogicalOperators(object): + # &, |, ^ + + def test_logical_ops_empty_frame(self): + # GH#5808 # empty frames, non-mixed dtype + df = DataFrame(index=[1]) + + result = df & df + assert_frame_equal(result, df) + + result = df | df + assert_frame_equal(result, df) + + df2 = DataFrame(index=[1, 2]) + result = df & df2 + assert_frame_equal(result, df2) - result = DataFrame(index=[1]) & DataFrame(index=[1]) - assert_frame_equal(result, DataFrame(index=[1])) + dfa = DataFrame(index=[1], columns=['A']) - result = DataFrame(index=[1]) | DataFrame(index=[1]) - assert_frame_equal(result, DataFrame(index=[1])) + result = dfa & dfa + assert_frame_equal(result, dfa) - result = DataFrame(index=[1]) & DataFrame(index=[1, 2]) - assert_frame_equal(result, DataFrame(index=[1, 2])) + def test_logical_ops_bool_frame(self): + # GH#5808 + df1a_bool = DataFrame(True, index=[1], columns=['A']) - result = DataFrame(index=[1], columns=['A']) & DataFrame( - index=[1], columns=['A']) - assert_frame_equal(result, DataFrame(index=[1], columns=['A'])) + result = df1a_bool & df1a_bool + assert_frame_equal(result, df1a_bool) - result = DataFrame(True, index=[1], columns=['A']) & DataFrame( - True, index=[1], columns=['A']) - assert_frame_equal(result, DataFrame(True, index=[1], columns=['A'])) + result = df1a_bool | df1a_bool + assert_frame_equal(result, df1a_bool) - result = DataFrame(True, index=[1], columns=['A']) | DataFrame( - True, index=[1], columns=['A']) - assert_frame_equal(result, DataFrame(True, index=[1], columns=['A'])) + def test_logical_ops_int_frame(self): + # GH#5808 + df1a_int = DataFrame(1, index=[1], columns=['A']) + df1a_bool = DataFrame(True, index=[1], columns=['A']) - # boolean ops - result = DataFrame(1, index=[1], columns=['A']) | DataFrame( - True, index=[1], columns=['A']) - assert_frame_equal(result, DataFrame(1, index=[1], columns=['A'])) + result = df1a_int | df1a_bool + assert_frame_equal(result, df1a_int) + + def test_logical_ops_invalid(self): + # GH#5808 df1 = DataFrame(1.0, index=[1], columns=['A']) df2 = DataFrame(True, index=[1], columns=['A']) @@ -70,6 +154,70 @@ def test_operators_boolean(self): with pytest.raises(TypeError): df1 | df2 + def test_logical_operators(self): + + def _check_bin_op(op): + result = op(df1, df2) + expected = DataFrame(op(df1.values, df2.values), index=df1.index, + columns=df1.columns) + assert result.values.dtype == np.bool_ + assert_frame_equal(result, expected) + + def _check_unary_op(op): + result = op(df1) + expected = DataFrame(op(df1.values), index=df1.index, + columns=df1.columns) + assert result.values.dtype == np.bool_ + assert_frame_equal(result, expected) + + df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}, + 'b': {'a': False, 'b': True, 'c': False, + 'd': False, 'e': False}, + 'c': {'a': False, 'b': False, 'c': True, + 'd': False, 'e': False}, + 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}, + 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}} + + df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False}, + 'b': {'a': False, 'b': True, 'c': False, + 'd': False, 'e': False}, + 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False}, + 'd': {'a': False, 'b': False, 'c': False, + 'd': True, 'e': False}, + 'e': {'a': False, 'b': False, 'c': False, + 'd': False, 'e': True}} + + df1 = DataFrame(df1) + df2 = DataFrame(df2) + + _check_bin_op(operator.and_) + _check_bin_op(operator.or_) + _check_bin_op(operator.xor) + + # operator.neg is deprecated in numpy >= 1.9 + _check_unary_op(operator.inv) # TODO: belongs elsewhere + + def test_logical_with_nas(self): + d = DataFrame({'a': [np.nan, False], 'b': [True, True]}) + + # GH4947 + # bool comparisons should return bool + result = d['a'] | d['b'] + expected = Series([False, True]) + assert_series_equal(result, expected) + + # GH4604, automatic casting here + result = d['a'].fillna(False) | d['b'] + expected = Series([True, True]) + assert_series_equal(result, expected) + + result = d['a'].fillna(False, downcast=False) | d['b'] + expected = Series([True, True]) + assert_series_equal(result, expected) + + +class TestDataFrameOperators(TestData): + @pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul, operator.truediv]) def test_operators_none_as_na(self, op): @@ -164,142 +312,15 @@ def test_timestamp_compare(self): result = right_f(Timestamp('nat'), df) assert_frame_equal(result, expected) - def test_logical_operators(self): - - def _check_bin_op(op): - result = op(df1, df2) - expected = DataFrame(op(df1.values, df2.values), index=df1.index, - columns=df1.columns) - assert result.values.dtype == np.bool_ - assert_frame_equal(result, expected) - - def _check_unary_op(op): - result = op(df1) - expected = DataFrame(op(df1.values), index=df1.index, - columns=df1.columns) - assert result.values.dtype == np.bool_ - assert_frame_equal(result, expected) - - df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}, - 'b': {'a': False, 'b': True, 'c': False, - 'd': False, 'e': False}, - 'c': {'a': False, 'b': False, 'c': True, - 'd': False, 'e': False}, - 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}, - 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}} - - df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False}, - 'b': {'a': False, 'b': True, 'c': False, - 'd': False, 'e': False}, - 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False}, - 'd': {'a': False, 'b': False, 'c': False, - 'd': True, 'e': False}, - 'e': {'a': False, 'b': False, 'c': False, - 'd': False, 'e': True}} - - df1 = DataFrame(df1) - df2 = DataFrame(df2) - - _check_bin_op(operator.and_) - _check_bin_op(operator.or_) - _check_bin_op(operator.xor) - - # operator.neg is deprecated in numpy >= 1.9 - _check_unary_op(operator.inv) - @pytest.mark.parametrize('op,res', [('__eq__', False), ('__ne__', True)]) - # not sure what's correct here. + # TODO: not sure what's correct here. @pytest.mark.filterwarnings("ignore:elementwise:FutureWarning") def test_logical_typeerror_with_non_valid(self, op, res): # we are comparing floats vs a string result = getattr(self.frame, op)('foo') assert bool(result.all().all()) is res - def test_logical_with_nas(self): - d = DataFrame({'a': [np.nan, False], 'b': [True, True]}) - - # GH4947 - # bool comparisons should return bool - result = d['a'] | d['b'] - expected = Series([False, True]) - assert_series_equal(result, expected) - - # GH4604, automatic casting here - result = d['a'].fillna(False) | d['b'] - expected = Series([True, True]) - assert_series_equal(result, expected) - - result = d['a'].fillna(False, downcast=False) | d['b'] - expected = Series([True, True]) - assert_series_equal(result, expected) - - @pytest.mark.parametrize('df,expected', [ - (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})), - (pd.DataFrame({'a': [False, True]}), - pd.DataFrame({'a': [True, False]})), - (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), - pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))})) - ]) - def test_neg_numeric(self, df, expected): - assert_frame_equal(-df, expected) - assert_series_equal(-df['a'], expected['a']) - - @pytest.mark.parametrize('df, expected', [ - (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)), - ([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]), - ]) - def test_neg_object(self, df, expected): - # GH 21380 - df = pd.DataFrame({'a': df}) - expected = pd.DataFrame({'a': expected}) - assert_frame_equal(-df, expected) - assert_series_equal(-df['a'], expected['a']) - - @pytest.mark.parametrize('df', [ - pd.DataFrame({'a': ['a', 'b']}), - pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), - ]) - def test_neg_raises(self, df): - with pytest.raises(TypeError): - (- df) - with pytest.raises(TypeError): - (- df['a']) - - def test_invert(self): - assert_frame_equal(-(self.frame < 0), ~(self.frame < 0)) - - @pytest.mark.parametrize('df', [ - pd.DataFrame({'a': [-1, 1]}), - pd.DataFrame({'a': [False, True]}), - pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), - ]) - def test_pos_numeric(self, df): - # GH 16073 - assert_frame_equal(+df, df) - assert_series_equal(+df['a'], df['a']) - - @pytest.mark.parametrize('df', [ - # numpy changing behavior in the future - pytest.param(pd.DataFrame({'a': ['a', 'b']}), - marks=[pytest.mark.filterwarnings("ignore")]), - pd.DataFrame({'a': np.array([-1, 2], dtype=object)}), - pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}), - ]) - def test_pos_object(self, df): - # GH 21380 - assert_frame_equal(+df, df) - assert_series_equal(+df['a'], df['a']) - - @pytest.mark.parametrize('df', [ - pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), - ]) - def test_pos_raises(self, df): - with pytest.raises(TypeError): - (+ df) - with pytest.raises(TypeError): - (+ df['a']) - def test_binary_ops_align(self): # test aligning binary ops diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index d56df2371b2e3..d52b848bebad1 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -14,9 +14,6 @@ def _permute(obj): class TestPeriodIndex(object): - def setup_method(self, method): - pass - def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index b1d9d362d1402..40089c8e9e477 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -182,7 +182,7 @@ def test_frame_ctor_datetime64_column(self): df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates}) assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')) - def test_frame_add_datetime64_column(self): + def test_frame_append_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') df = DataFrame(index=np.arange(len(rng))) @@ -195,7 +195,7 @@ def test_frame_datetime64_pre1900_repr(self): # it works! repr(df) - def test_frame_add_datetime64_col_other_units(self): + def test_frame_append_datetime64_col_other_units(self): n = 100 units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y'] diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index f3ab197771d53..55e3dfde3ceb7 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -25,6 +25,361 @@ from .common import TestData +class TestSeriesLogicalOps(object): + @pytest.mark.parametrize('bool_op', [operator.and_, + operator.or_, operator.xor]) + def test_bool_operators_with_nas(self, bool_op): + # boolean &, |, ^ should work with object arrays and propagate NAs + ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) + ser[::2] = np.nan + + mask = ser.isna() + filled = ser.fillna(ser[0]) + + result = bool_op(ser < ser[9], ser > ser[3]) + + expected = bool_op(filled < filled[9], filled > filled[3]) + expected[mask] = False + assert_series_equal(result, expected) + + def test_operators_bitwise(self): + # GH#9016: support bitwise op for integer types + index = list('bca') + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + s_tff = Series([True, False, False], index=index) + s_empty = Series([]) + + # TODO: unused + # s_0101 = Series([0, 1, 0, 1]) + + s_0123 = Series(range(4), dtype='int64') + s_3333 = Series([3] * 4) + s_4444 = Series([4] * 4) + + res = s_tft & s_empty + expected = s_fff + assert_series_equal(res, expected) + + res = s_tft | s_empty + expected = s_tft + assert_series_equal(res, expected) + + res = s_0123 & s_3333 + expected = Series(range(4), dtype='int64') + assert_series_equal(res, expected) + + res = s_0123 | s_4444 + expected = Series(range(4, 8), dtype='int64') + assert_series_equal(res, expected) + + s_a0b1c0 = Series([1], list('b')) + + res = s_tft & s_a0b1c0 + expected = s_tff.reindex(list('abc')) + assert_series_equal(res, expected) + + res = s_tft | s_a0b1c0 + expected = s_tft.reindex(list('abc')) + assert_series_equal(res, expected) + + n0 = 0 + res = s_tft & n0 + expected = s_fff + assert_series_equal(res, expected) + + res = s_0123 & n0 + expected = Series([0] * 4) + assert_series_equal(res, expected) + + n1 = 1 + res = s_tft & n1 + expected = s_tft + assert_series_equal(res, expected) + + res = s_0123 & n1 + expected = Series([0, 1, 0, 1]) + assert_series_equal(res, expected) + + s_1111 = Series([1] * 4, dtype='int8') + res = s_0123 & s_1111 + expected = Series([0, 1, 0, 1], dtype='int64') + assert_series_equal(res, expected) + + res = s_0123.astype(np.int16) | s_1111.astype(np.int32) + expected = Series([1, 1, 3, 3], dtype='int32') + assert_series_equal(res, expected) + + with pytest.raises(TypeError): + s_1111 & 'a' + with pytest.raises(TypeError): + s_1111 & ['a', 'b', 'c', 'd'] + with pytest.raises(TypeError): + s_0123 & np.NaN + with pytest.raises(TypeError): + s_0123 & 3.14 + with pytest.raises(TypeError): + s_0123 & [0.1, 4, 3.14, 2] + + # s_0123 will be all false now because of reindexing like s_tft + if compat.PY3: + # unable to sort incompatible object via .union. + exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3]) + with tm.assert_produces_warning(RuntimeWarning): + assert_series_equal(s_tft & s_0123, exp) + else: + exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) + assert_series_equal(s_tft & s_0123, exp) + + # s_tft will be all false now because of reindexing like s_0123 + if compat.PY3: + # unable to sort incompatible object via .union. + exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a']) + with tm.assert_produces_warning(RuntimeWarning): + assert_series_equal(s_0123 & s_tft, exp) + else: + exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) + assert_series_equal(s_0123 & s_tft, exp) + + assert_series_equal(s_0123 & False, Series([False] * 4)) + assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) + assert_series_equal(s_0123 & [False], Series([False] * 4)) + assert_series_equal(s_0123 & (False), Series([False] * 4)) + assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), + Series([False] * 4)) + + s_ftft = Series([False, True, False, True]) + assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) + + s_abNd = Series(['a', 'b', np.NaN, 'd']) + res = s_0123 & s_abNd + expected = s_ftft + assert_series_equal(res, expected) + + def test_scalar_na_logical_ops_corners(self): + s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) + + with pytest.raises(TypeError): + s & datetime(2005, 1, 1) + + s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) + s[::2] = np.nan + + expected = Series(True, index=s.index) + expected[::2] = False + result = s & list(s) + assert_series_equal(result, expected) + + d = DataFrame({'A': s}) + # TODO: Fix this exception - needs to be fixed! (see GH5035) + # (previously this was a TypeError because series returned + # NotImplemented + + # this is an alignment issue; these are equivalent + # https://github.com/pandas-dev/pandas/issues/5284 + + with pytest.raises(TypeError): + d.__and__(s, axis='columns') + + with pytest.raises(TypeError): + s & d + + # this is wrong as its not a boolean result + # result = d.__and__(s,axis='index') + + @pytest.mark.parametrize('op', [ + operator.and_, + operator.or_, + operator.xor, + pytest.param(ops.rand_, + marks=pytest.mark.xfail(reason="GH#22092 Index " + "implementation returns " + "Index", + raises=AssertionError, + strict=True)), + pytest.param(ops.ror_, + marks=pytest.mark.xfail(reason="GH#22092 Index " + "implementation raises", + raises=ValueError, strict=True)), + pytest.param(ops.rxor, + marks=pytest.mark.xfail(reason="GH#22092 Index " + "implementation raises", + raises=TypeError, strict=True)) + ]) + def test_logical_ops_with_index(self, op): + # GH#22092, GH#19792 + ser = Series([True, True, False, False]) + idx1 = Index([True, False, True, False]) + idx2 = Index([1, 0, 1, 0]) + + expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))]) + + result = op(ser, idx1) + assert_series_equal(result, expected) + + expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], + dtype=bool) + + result = op(ser, idx2) + assert_series_equal(result, expected) + + def test_logical_ops_label_based(self): + # GH#4947 + # logical ops should be label based + + a = Series([True, False, True], list('bca')) + b = Series([False, True, False], list('abc')) + + expected = Series([False, True, False], list('abc')) + result = a & b + assert_series_equal(result, expected) + + expected = Series([True, True, False], list('abc')) + result = a | b + assert_series_equal(result, expected) + + expected = Series([True, False, False], list('abc')) + result = a ^ b + assert_series_equal(result, expected) + + # rhs is bigger + a = Series([True, False, True], list('bca')) + b = Series([False, True, False, True], list('abcd')) + + expected = Series([False, True, False, False], list('abcd')) + result = a & b + assert_series_equal(result, expected) + + expected = Series([True, True, False, False], list('abcd')) + result = a | b + assert_series_equal(result, expected) + + # filling + + # vs empty + result = a & Series([]) + expected = Series([False, False, False], list('bca')) + assert_series_equal(result, expected) + + result = a | Series([]) + expected = Series([True, False, True], list('bca')) + assert_series_equal(result, expected) + + # vs non-matching + result = a & Series([1], ['z']) + expected = Series([False, False, False, False], list('abcz')) + assert_series_equal(result, expected) + + result = a | Series([1], ['z']) + expected = Series([True, True, False, False], list('abcz')) + assert_series_equal(result, expected) + + # identity + # we would like s[s|e] == s to hold for any e, whether empty or not + for e in [Series([]), Series([1], ['z']), + Series(np.nan, b.index), Series(np.nan, a.index)]: + result = a[a | e] + assert_series_equal(result, a[a]) + + for e in [Series(['z'])]: + if compat.PY3: + with tm.assert_produces_warning(RuntimeWarning): + result = a[a | e] + else: + result = a[a | e] + assert_series_equal(result, a[a]) + + # vs scalars + index = list('bca') + t = Series([True, False, True]) + + for v in [True, 1, 2]: + result = Series([True, False, True], index=index) | v + expected = Series([True, True, True], index=index) + assert_series_equal(result, expected) + + for v in [np.nan, 'foo']: + with pytest.raises(TypeError): + t | v + + for v in [False, 0]: + result = Series([True, False, True], index=index) | v + expected = Series([True, False, True], index=index) + assert_series_equal(result, expected) + + for v in [True, 1]: + result = Series([True, False, True], index=index) & v + expected = Series([True, False, True], index=index) + assert_series_equal(result, expected) + + for v in [False, 0]: + result = Series([True, False, True], index=index) & v + expected = Series([False, False, False], index=index) + assert_series_equal(result, expected) + for v in [np.nan]: + with pytest.raises(TypeError): + t & v + + def test_logical_ops_df_compat(self): + # GH#1134 + s1 = pd.Series([True, False, True], index=list('ABC'), name='x') + s2 = pd.Series([True, True, False], index=list('ABD'), name='x') + + exp = pd.Series([True, False, False, False], + index=list('ABCD'), name='x') + assert_series_equal(s1 & s2, exp) + assert_series_equal(s2 & s1, exp) + + # True | np.nan => True + exp = pd.Series([True, True, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s1 | s2, exp) + # np.nan | True => np.nan, filled with False + exp = pd.Series([True, True, False, False], + index=list('ABCD'), name='x') + assert_series_equal(s2 | s1, exp) + + # DataFrame doesn't fill nan with False + exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() & s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() & s1.to_frame(), exp) + + exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() | s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() | s1.to_frame(), exp) + + # different length + s3 = pd.Series([True, False, True], index=list('ABC'), name='x') + s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x') + + exp = pd.Series([True, False, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s3 & s4, exp) + assert_series_equal(s4 & s3, exp) + + # np.nan | True => np.nan, filled with False + exp = pd.Series([True, True, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s3 | s4, exp) + # True | np.nan => True + exp = pd.Series([True, True, True, True], + index=list('ABCD'), name='x') + assert_series_equal(s4 | s3, exp) + + exp = pd.DataFrame({'x': [True, False, True, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() & s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() & s3.to_frame(), exp) + + exp = pd.DataFrame({'x': [True, True, True, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() | s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() | s3.to_frame(), exp) + + class TestSeriesComparisons(object): def test_comparisons(self): left = np.random.randn(10) @@ -164,22 +519,6 @@ def test_comparison_operators_with_nas(self): # expected = f(val, s.dropna()).reindex(s.index) # assert_series_equal(result, expected) - @pytest.mark.parametrize('bool_op', [operator.and_, - operator.or_, operator.xor]) - def test_bool_operators_with_nas(self, bool_op): - # boolean &, |, ^ should work with object arrays and propagate NAs - ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) - ser[::2] = np.nan - - mask = ser.isna() - filled = ser.fillna(ser[0]) - - result = bool_op(ser < ser[9], ser > ser[3]) - - expected = bool_op(filled < filled[9], filled > filled[3]) - expected[mask] = False - assert_series_equal(result, expected) - def test_unequal_categorical_comparison_raises_type_error(self): # unequal comparison should raise for unordered cats cat = Series(Categorical(list("abc"))) @@ -258,104 +597,44 @@ def test_comparison_different_length(self): with pytest.raises(ValueError): a == b - def test_comparison_label_based(self): - - # GH 4947 - # comparisons should be label based - - a = Series([True, False, True], list('bca')) - b = Series([False, True, False], list('abc')) - - expected = Series([False, True, False], list('abc')) - result = a & b - assert_series_equal(result, expected) - - expected = Series([True, True, False], list('abc')) - result = a | b - assert_series_equal(result, expected) - - expected = Series([True, False, False], list('abc')) - result = a ^ b - assert_series_equal(result, expected) - - # rhs is bigger - a = Series([True, False, True], list('bca')) - b = Series([False, True, False, True], list('abcd')) - - expected = Series([False, True, False, False], list('abcd')) - result = a & b - assert_series_equal(result, expected) - - expected = Series([True, True, False, False], list('abcd')) - result = a | b - assert_series_equal(result, expected) - - # filling - - # vs empty - result = a & Series([]) - expected = Series([False, False, False], list('bca')) - assert_series_equal(result, expected) - - result = a | Series([]) - expected = Series([True, False, True], list('bca')) - assert_series_equal(result, expected) + def test_ne(self): + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + expected = [True, True, False, True, True] + assert tm.equalContents(ts.index != 5, expected) + assert tm.equalContents(~(ts.index == 5), expected) - # vs non-matching - result = a & Series([1], ['z']) - expected = Series([False, False, False, False], list('abcz')) - assert_series_equal(result, expected) + def test_comp_ops_df_compat(self): + # GH 1134 + s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - result = a | Series([1], ['z']) - expected = Series([True, True, False, False], list('abcz')) - assert_series_equal(result, expected) + s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - # identity - # we would like s[s|e] == s to hold for any e, whether empty or not - for e in [Series([]), Series([1], ['z']), - Series(np.nan, b.index), Series(np.nan, a.index)]: - result = a[a | e] - assert_series_equal(result, a[a]) + for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - for e in [Series(['z'])]: - if compat.PY3: - with tm.assert_produces_warning(RuntimeWarning): - result = a[a | e] - else: - result = a[a | e] - assert_series_equal(result, a[a]) + msg = "Can only compare identically-labeled Series objects" + with tm.assert_raises_regex(ValueError, msg): + left == right - # vs scalars - index = list('bca') - t = Series([True, False, True]) + with tm.assert_raises_regex(ValueError, msg): + left != right - for v in [True, 1, 2]: - result = Series([True, False, True], index=index) | v - expected = Series([True, True, True], index=index) - assert_series_equal(result, expected) + with tm.assert_raises_regex(ValueError, msg): + left < right - for v in [np.nan, 'foo']: - with pytest.raises(TypeError): - t | v + msg = "Can only compare identically-labeled DataFrame objects" + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() == right.to_frame() - for v in [False, 0]: - result = Series([True, False, True], index=index) | v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() != right.to_frame() - for v in [True, 1]: - result = Series([True, False, True], index=index) & v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() < right.to_frame() - for v in [False, 0]: - result = Series([True, False, True], index=index) & v - expected = Series([False, False, False], index=index) - assert_series_equal(result, expected) - for v in [np.nan]: - with pytest.raises(TypeError): - t & v +class TestSeriesFlexComparisonOps(object): def test_comparison_flex_basic(self): left = pd.Series(np.random.randn(10)) right = pd.Series(np.random.randn(10)) @@ -414,53 +693,17 @@ def test_comparison_flex_alignment_fill(self): exp = pd.Series([True, True, False, False], index=list('abcd')) assert_series_equal(left.ne(right, fill_value=2), exp) - exp = pd.Series([False, False, True, True], index=list('abcd')) - assert_series_equal(left.le(right, fill_value=0), exp) - - exp = pd.Series([False, False, False, True], index=list('abcd')) - assert_series_equal(left.lt(right, fill_value=0), exp) - - exp = pd.Series([True, True, True, False], index=list('abcd')) - assert_series_equal(left.ge(right, fill_value=0), exp) - - exp = pd.Series([True, True, False, False], index=list('abcd')) - assert_series_equal(left.gt(right, fill_value=0), exp) - - def test_ne(self): - ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) - expected = [True, True, False, True, True] - assert tm.equalContents(ts.index != 5, expected) - assert tm.equalContents(~(ts.index == 5), expected) - - def test_comp_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - - s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - - for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - - msg = "Can only compare identically-labeled Series objects" - with tm.assert_raises_regex(ValueError, msg): - left == right - - with tm.assert_raises_regex(ValueError, msg): - left != right - - with tm.assert_raises_regex(ValueError, msg): - left < right - - msg = "Can only compare identically-labeled DataFrame objects" - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() == right.to_frame() + exp = pd.Series([False, False, True, True], index=list('abcd')) + assert_series_equal(left.le(right, fill_value=0), exp) - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() != right.to_frame() + exp = pd.Series([False, False, False, True], index=list('abcd')) + assert_series_equal(left.lt(right, fill_value=0), exp) - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() < right.to_frame() + exp = pd.Series([True, True, True, False], index=list('abcd')) + assert_series_equal(left.ge(right, fill_value=0), exp) + + exp = pd.Series([True, True, False, False], index=list('abcd')) + assert_series_equal(left.gt(right, fill_value=0), exp) class TestDatetimeSeriesArithmetic(object): @@ -577,12 +820,6 @@ def test_op_method(self, opname, ts): expected = alt(other, series) assert_almost_equal(result, expected) - def test_neg(self): - assert_series_equal(-self.series, -1 * self.series) - - def test_invert(self): - assert_series_equal(-(self.series < 0), ~(self.series < 0)) - def test_operators_empty_int_corner(self): s1 = Series([], [], dtype=np.int32) s2 = Series({'x': 0.}) @@ -604,188 +841,6 @@ def test_ops_datetimelike_align(self): result = (dt2.to_frame() - dt.to_frame())[0] assert_series_equal(result, expected) - @pytest.mark.parametrize('op', [ - operator.and_, - operator.or_, - operator.xor, - pytest.param(ops.rand_, - marks=pytest.mark.xfail(reason="GH#22092 Index " - "implementation returns " - "Index", - raises=AssertionError, - strict=True)), - pytest.param(ops.ror_, - marks=pytest.mark.xfail(reason="GH#22092 Index " - "implementation raises", - raises=ValueError, strict=True)), - pytest.param(ops.rxor, - marks=pytest.mark.xfail(reason="GH#22092 Index " - "implementation raises", - raises=TypeError, strict=True)) - ]) - def test_bool_ops_with_index(self, op): - # GH#22092, GH#19792 - ser = Series([True, True, False, False]) - idx1 = Index([True, False, True, False]) - idx2 = Index([1, 0, 1, 0]) - - expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))]) - - result = op(ser, idx1) - assert_series_equal(result, expected) - - expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], - dtype=bool) - - result = op(ser, idx2) - assert_series_equal(result, expected) - - def test_operators_bitwise(self): - # GH 9016: support bitwise op for integer types - index = list('bca') - - s_tft = Series([True, False, True], index=index) - s_fff = Series([False, False, False], index=index) - s_tff = Series([True, False, False], index=index) - s_empty = Series([]) - - # TODO: unused - # s_0101 = Series([0, 1, 0, 1]) - - s_0123 = Series(range(4), dtype='int64') - s_3333 = Series([3] * 4) - s_4444 = Series([4] * 4) - - res = s_tft & s_empty - expected = s_fff - assert_series_equal(res, expected) - - res = s_tft | s_empty - expected = s_tft - assert_series_equal(res, expected) - - res = s_0123 & s_3333 - expected = Series(range(4), dtype='int64') - assert_series_equal(res, expected) - - res = s_0123 | s_4444 - expected = Series(range(4, 8), dtype='int64') - assert_series_equal(res, expected) - - s_a0b1c0 = Series([1], list('b')) - - res = s_tft & s_a0b1c0 - expected = s_tff.reindex(list('abc')) - assert_series_equal(res, expected) - - res = s_tft | s_a0b1c0 - expected = s_tft.reindex(list('abc')) - assert_series_equal(res, expected) - - n0 = 0 - res = s_tft & n0 - expected = s_fff - assert_series_equal(res, expected) - - res = s_0123 & n0 - expected = Series([0] * 4) - assert_series_equal(res, expected) - - n1 = 1 - res = s_tft & n1 - expected = s_tft - assert_series_equal(res, expected) - - res = s_0123 & n1 - expected = Series([0, 1, 0, 1]) - assert_series_equal(res, expected) - - s_1111 = Series([1] * 4, dtype='int8') - res = s_0123 & s_1111 - expected = Series([0, 1, 0, 1], dtype='int64') - assert_series_equal(res, expected) - - res = s_0123.astype(np.int16) | s_1111.astype(np.int32) - expected = Series([1, 1, 3, 3], dtype='int32') - assert_series_equal(res, expected) - - with pytest.raises(TypeError): - s_1111 & 'a' - with pytest.raises(TypeError): - s_1111 & ['a', 'b', 'c', 'd'] - with pytest.raises(TypeError): - s_0123 & np.NaN - with pytest.raises(TypeError): - s_0123 & 3.14 - with pytest.raises(TypeError): - s_0123 & [0.1, 4, 3.14, 2] - - # s_0123 will be all false now because of reindexing like s_tft - if compat.PY3: - # unable to sort incompatible object via .union. - exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3]) - with tm.assert_produces_warning(RuntimeWarning): - assert_series_equal(s_tft & s_0123, exp) - else: - exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) - assert_series_equal(s_tft & s_0123, exp) - - # s_tft will be all false now because of reindexing like s_0123 - if compat.PY3: - # unable to sort incompatible object via .union. - exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a']) - with tm.assert_produces_warning(RuntimeWarning): - assert_series_equal(s_0123 & s_tft, exp) - else: - exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) - assert_series_equal(s_0123 & s_tft, exp) - - assert_series_equal(s_0123 & False, Series([False] * 4)) - assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) - assert_series_equal(s_0123 & [False], Series([False] * 4)) - assert_series_equal(s_0123 & (False), Series([False] * 4)) - assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), - Series([False] * 4)) - - s_ftft = Series([False, True, False, True]) - assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) - - s_abNd = Series(['a', 'b', np.NaN, 'd']) - res = s_0123 & s_abNd - expected = s_ftft - assert_series_equal(res, expected) - - def test_scalar_na_cmp_corners(self): - s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) - - with pytest.raises(TypeError): - s & datetime(2005, 1, 1) - - s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) - s[::2] = np.nan - - expected = Series(True, index=s.index) - expected[::2] = False - result = s & list(s) - assert_series_equal(result, expected) - - d = DataFrame({'A': s}) - # TODO: Fix this exception - needs to be fixed! (see GH5035) - # (previously this was a TypeError because series returned - # NotImplemented - - # this is an alignment issue; these are equivalent - # https://github.com/pandas-dev/pandas/issues/5284 - - with pytest.raises(TypeError): - d.__and__(s, axis='columns') - - with pytest.raises(TypeError): - s & d - - # this is wrong as its not a boolean result - # result = d.__and__(s,axis='index') - def test_operators_corner(self): series = self.ts @@ -934,62 +989,15 @@ def test_idxminmax_with_inf(self): np.isnan(s.idxmax(skipna=False)) -class TestSeriesOperationsDataFrameCompat(object): - - def test_bool_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([True, False, True], index=list('ABC'), name='x') - s2 = pd.Series([True, True, False], index=list('ABD'), name='x') - - exp = pd.Series([True, False, False, False], - index=list('ABCD'), name='x') - assert_series_equal(s1 & s2, exp) - assert_series_equal(s2 & s1, exp) - - # True | np.nan => True - exp = pd.Series([True, True, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s1 | s2, exp) - # np.nan | True => np.nan, filled with False - exp = pd.Series([True, True, False, False], - index=list('ABCD'), name='x') - assert_series_equal(s2 | s1, exp) - - # DataFrame doesn't fill nan with False - exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() & s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() & s1.to_frame(), exp) - - exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() | s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() | s1.to_frame(), exp) - - # different length - s3 = pd.Series([True, False, True], index=list('ABC'), name='x') - s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x') - - exp = pd.Series([True, False, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s3 & s4, exp) - assert_series_equal(s4 & s3, exp) - - # np.nan | True => np.nan, filled with False - exp = pd.Series([True, True, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s3 | s4, exp) - # True | np.nan => True - exp = pd.Series([True, True, True, True], - index=list('ABCD'), name='x') - assert_series_equal(s4 | s3, exp) +class TestSeriesUnaryOps(object): + # __neg__, __pos__, __inv__ - exp = pd.DataFrame({'x': [True, False, True, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() & s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() & s3.to_frame(), exp) + def test_neg(self): + ser = tm.makeStringSeries() + ser.name = 'series' + assert_series_equal(-ser, -1 * ser) - exp = pd.DataFrame({'x': [True, True, True, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() | s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() | s3.to_frame(), exp) + def test_invert(self): + ser = tm.makeStringSeries() + ser.name = 'series' + assert_series_equal(-(ser < 0), ~(ser < 0))
Collect tests for logical ops |, &, ^. Use fixtures in a couple of places in tests/arithmetic Rename a couple of poorly named tests No logic is changed, nothing is moved cross-module.
https://api.github.com/repos/pandas-dev/pandas/pulls/23029
2018-10-07T18:46:13Z
2018-10-07T21:41:20Z
2018-10-07T21:41:20Z
2018-10-07T22:26:33Z
DOC: Upgraded Docstring pandas.DataFrame.dot
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f7f1855a4fabc..46651ee83d6f2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -931,16 +931,70 @@ def __len__(self): def dot(self, other): """ - Matrix multiplication with DataFrame or Series objects. Can also be - called using `self @ other` in Python >= 3.5. + Compute the matrix mutiplication between the DataFrame and other. + + This method computes the matrix product between the DataFrame and the + values of an other Series, DataFrame or a numpy array. + + It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- - other : DataFrame or Series + other : Series, DataFrame or array-like + The other object to compute the matrix product with. Returns ------- - dot_product : DataFrame or Series + Series or DataFrame + If other is a Series, return the matrix product between self and + other as a Serie. If other is a DataFrame or a numpy.array, return + the matrix product of self and other in a DataFrame of a np.array. + + See Also + -------- + Series.dot: Similar method for Series. + + Notes + ----- + The dimensions of DataFrame and other must be compatible in order to + compute the matrix multiplication. + + The dot method for Series computes the inner product, instead of the + matrix product here. + + Examples + -------- + Here we multiply a DataFrame with a Series. + + >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + >>> s = pd.Series([1, 1, 2, 1]) + >>> df.dot(s) + 0 -4 + 1 5 + dtype: int64 + + Here we multiply a DataFrame with another DataFrame. + + >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(other) + 0 1 + 0 1 4 + 1 2 2 + + Note that the dot method give the same result as @ + + >>> df @ other + 0 1 + 0 1 4 + 1 2 2 + + The dot method works also if other is an np.array. + + >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(arr) + 0 1 + 0 1 4 + 1 2 2 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index)
- [ ] closes #xxxx - [ ] tests added / passed - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23024
2018-10-07T11:08:44Z
2018-12-19T12:31:20Z
2018-12-19T12:31:20Z
2018-12-19T12:31:24Z
REF: Fuse all the types
diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in index 6bcc735656c6b..b39b5eaced8fd 100644 --- a/pandas/_libs/algos_common_helper.pxi.in +++ b/pandas/_libs/algos_common_helper.pxi.in @@ -16,33 +16,30 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in {{py: -# name, c_type, dest_type, dest_dtype -dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'), - ('float32', 'float32_t', 'float32_t', 'np.float32'), - ('int8', 'int8_t', 'float32_t', 'np.float32'), - ('int16', 'int16_t', 'float32_t', 'np.float32'), - ('int32', 'int32_t', 'float64_t', 'np.float64'), - ('int64', 'int64_t', 'float64_t', 'np.float64')] +# name, c_type, dest_type +dtypes = [('float64', 'float64_t', 'float64_t'), + ('float32', 'float32_t', 'float32_t'), + ('int8', 'int8_t', 'float32_t'), + ('int16', 'int16_t', 'float32_t'), + ('int32', 'int32_t', 'float64_t'), + ('int64', 'int64_t', 'float64_t')] def get_dispatch(dtypes): - for name, c_type, dest_type, dest_dtype, in dtypes: - - dest_type2 = dest_type - dest_type = dest_type.replace('_t', '') - - yield name, c_type, dest_type, dest_type2, dest_dtype + for name, c_type, dest_type, in dtypes: + dest_name = dest_type[:-2] # i.e. strip "_t" + yield name, c_type, dest_type, dest_name }} -{{for name, c_type, dest_type, dest_type2, dest_dtype +{{for name, c_type, dest_type, dest_name in get_dispatch(dtypes)}} @cython.boundscheck(False) @cython.wraparound(False) def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr, - ndarray[{{dest_type2}}, ndim=2] out, + ndarray[{{dest_type}}, ndim=2] out, Py_ssize_t periods, int axis): cdef: Py_ssize_t i, j, sx, sy @@ -84,9 +81,9 @@ def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr, out[i, j] = arr[i, j] - arr[i, j - periods] -def put2d_{{name}}_{{dest_type}}(ndarray[{{c_type}}, ndim=2, cast=True] values, +def put2d_{{name}}_{{dest_name}}(ndarray[{{c_type}}, ndim=2, cast=True] values, ndarray[int64_t] indexer, Py_ssize_t loc, - ndarray[{{dest_type2}}] out): + ndarray[{{dest_type}}] out): cdef: Py_ssize_t i, j, k diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in index 130276ae0e73c..bb4aec75ed567 100644 --- a/pandas/_libs/algos_rank_helper.pxi.in +++ b/pandas/_libs/algos_rank_helper.pxi.in @@ -131,45 +131,20 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', argsorted = _as.astype('i8') {{if dtype == 'object'}} - for i in range(n): - sum_ranks += i + 1 - dups += 1 - isnan = sorted_mask[i] - val = util.get_value_at(sorted_data, i) - - if isnan and keep_na: - ranks[argsorted[i]] = nan - continue - count += 1.0 - - if (i == n - 1 or - are_diff(util.get_value_at(sorted_data, i + 1), val) or - i == non_na_idx): - if tiebreak == TIEBREAK_AVERAGE: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i + 1 - elif tiebreak == TIEBREAK_FIRST: - raise ValueError('first not supported for non-numeric data') - elif tiebreak == TIEBREAK_FIRST_DESCENDING: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = 2 * i - j - dups + 2 - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = total_tie_count - sum_ranks = dups = 0 + if True: {{else}} with nogil: + {{endif}} + # TODO: why does the 2d version not have a nogil block? for i in range(n): sum_ranks += i + 1 dups += 1 + + {{if dtype == 'object'}} + val = util.get_value_at(sorted_data, i) + {{else}} val = sorted_data[i] + {{endif}} {{if dtype != 'uint64'}} isnan = sorted_mask[i] @@ -180,8 +155,14 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', count += 1.0 - if (i == n - 1 or sorted_data[i + 1] != val or - i == non_na_idx): + {{if dtype == 'object'}} + if (i == n - 1 or + are_diff(util.get_value_at(sorted_data, i + 1), val) or + i == non_na_idx): + {{else}} + if (i == n - 1 or sorted_data[i + 1] != val or i == non_na_idx): + {{endif}} + if tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = sum_ranks / dups @@ -192,8 +173,13 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = i + 1 elif tiebreak == TIEBREAK_FIRST: + {{if dtype == 'object'}} + raise ValueError('first not supported for ' + 'non-numeric data') + {{else}} for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = j + 1 + {{endif}} elif tiebreak == TIEBREAK_FIRST_DESCENDING: for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = 2 * i - j - dups + 2 @@ -202,7 +188,6 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = total_tie_count sum_ranks = dups = 0 - {{endif}} if pct: if tiebreak == TIEBREAK_DENSE: return ranks / total_tie_count diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 5b01117381a27..addbb2b3e8165 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -14,26 +14,22 @@ _int64_max = np.iinfo(np.int64).max {{py: -# name, c_type, dest_type, dest_dtype -dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'), - ('float32', 'float32_t', 'float32_t', 'np.float32')] +# name, c_type +dtypes = [('float64', 'float64_t'), + ('float32', 'float32_t')] def get_dispatch(dtypes): - for name, c_type, dest_type, dest_dtype in dtypes: - - dest_type2 = dest_type - dest_type = dest_type.replace('_t', '') - - yield name, c_type, dest_type, dest_type2, dest_dtype + for name, c_type in dtypes: + yield name, c_type }} -{{for name, c_type, dest_type, dest_type2, dest_dtype in get_dispatch(dtypes)}} +{{for name, c_type in get_dispatch(dtypes)}} @cython.wraparound(False) @cython.boundscheck(False) -def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, @@ -43,8 +39,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count - ndarray[{{dest_type2}}, ndim=2] sumx, nobs + {{c_type}} val, count + ndarray[{{c_type}}, ndim=2] sumx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -80,7 +76,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, @@ -90,8 +86,8 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count - ndarray[{{dest_type2}}, ndim=2] prodx, nobs + {{c_type}} val, count + ndarray[{{c_type}}, ndim=2] prodx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -127,15 +123,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) -def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, - ndarray[{{dest_type2}}, ndim=2] values, + ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, ct, oldmean - ndarray[{{dest_type2}}, ndim=2] nobs, mean + {{c_type}} val, ct, oldmean + ndarray[{{c_type}}, ndim=2] nobs, mean assert min_count == -1, "'min_count' only used in add and prod" @@ -179,15 +175,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, - ndarray[{{dest_type2}}, ndim=2] values, + ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count - ndarray[{{dest_type2}}, ndim=2] sumx, nobs + {{c_type}} val, count + ndarray[{{c_type}}, ndim=2] sumx, nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -224,9 +220,9 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_ohlc_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, - ndarray[{{dest_type2}}, ndim=2] values, + ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, Py_ssize_t min_count=-1): """ @@ -234,7 +230,7 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab - {{dest_type2}} val, count + {{c_type}} val, count Py_ssize_t ngroups = len(counts) assert min_count == -1, "'min_count' only used in add and prod" @@ -278,26 +274,26 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{py: -# name, c_type, dest_type2, nan_val -dtypes = [('float64', 'float64_t', 'float64_t', 'NAN'), - ('float32', 'float32_t', 'float32_t', 'NAN'), - ('int64', 'int64_t', 'int64_t', 'iNaT'), - ('object', 'object', 'object', 'NAN')] +# name, c_type, nan_val +dtypes = [('float64', 'float64_t', 'NAN'), + ('float32', 'float32_t', 'NAN'), + ('int64', 'int64_t', 'iNaT'), + ('object', 'object', 'NAN')] def get_dispatch(dtypes): - for name, c_type, dest_type2, nan_val in dtypes: + for name, c_type, nan_val in dtypes: - yield name, c_type, dest_type2, nan_val + yield name, c_type, nan_val }} -{{for name, c_type, dest_type2, nan_val in get_dispatch(dtypes)}} +{{for name, c_type, nan_val in get_dispatch(dtypes)}} @cython.wraparound(False) @cython.boundscheck(False) -def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, @@ -307,8 +303,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val - ndarray[{{dest_type2}}, ndim=2] resx + {{c_type}} val + ndarray[{{c_type}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -354,7 +350,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, +def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, int64_t rank, @@ -364,8 +360,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val - ndarray[{{dest_type2}}, ndim=2] resx + {{c_type}} val + ndarray[{{c_type}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -473,7 +469,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # with mask, without obfuscating location of missing data # in values array masked_vals = np.array(values[:, 0], copy=True) - {{if name=='int64'}} + {{if name == 'int64'}} mask = (masked_vals == {{nan_val}}).astype(np.uint8) {{else}} mask = np.isnan(masked_vals).astype(np.uint8) @@ -597,41 +593,31 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, {{endfor}} -#---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # group_min, group_max -#---------------------------------------------------------------------- +# ---------------------------------------------------------------------- -{{py: - -# name, c_type, dest_type2, nan_val -dtypes = [('float64', 'float64_t', 'NAN', 'np.inf'), - ('float32', 'float32_t', 'NAN', 'np.inf'), - ('int64', 'int64_t', 'iNaT', '_int64_max')] - -def get_dispatch(dtypes): - - for name, dest_type2, nan_val, inf_val in dtypes: - yield name, dest_type2, nan_val, inf_val -}} - - -{{for name, dest_type2, nan_val, inf_val in get_dispatch(dtypes)}} +# TODO: consider implementing for more dtypes +ctypedef fused groupby_t: + float64_t + float32_t + int64_t @cython.wraparound(False) @cython.boundscheck(False) -def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): +def group_max(ndarray[groupby_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[groupby_t, ndim=2] values, + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count - ndarray[{{dest_type2}}, ndim=2] maxx, nobs + groupby_t val, count, nan_val + ndarray[groupby_t, ndim=2] maxx, nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -641,7 +627,13 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, nobs = np.zeros_like(out) maxx = np.empty_like(out) - maxx.fill(-{{inf_val}}) + if groupby_t is int64_t: + # Note: evaluated at compile-time + maxx.fill(-_int64_max) + nan_val = iNaT + else: + maxx.fill(-np.inf) + nan_val = NAN N, K = (<object> values).shape @@ -656,37 +648,44 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} - if val == val and val != {{nan_val}}: - {{endif}} - nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if groupby_t is int64_t: + if val != nan_val: + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val + else: + if val == val and val != nan_val: + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = {{nan_val}} + out[i, j] = nan_val else: out[i, j] = maxx[i, j] +group_max_float64 = group_max["float64_t"] +group_max_float32 = group_max["float32_t"] +group_max_int64 = group_max["int64_t"] + + @cython.wraparound(False) @cython.boundscheck(False) -def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): +def group_min(ndarray[groupby_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[groupby_t, ndim=2] values, + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count - ndarray[{{dest_type2}}, ndim=2] minx, nobs + groupby_t val, count, nan_val + ndarray[groupby_t, ndim=2] minx, nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -696,7 +695,12 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, nobs = np.zeros_like(out) minx = np.empty_like(out) - minx.fill({{inf_val}}) + if groupby_t is int64_t: + minx.fill(_int64_max) + nan_val = iNaT + else: + minx.fill(np.inf) + nan_val = NAN N, K = (<object> values).shape @@ -711,41 +715,51 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} - if val == val and val != {{nan_val}}: - {{endif}} - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val + if groupby_t is int64_t: + if val != nan_val: + nobs[lab, j] += 1 + if val < minx[lab, j]: + minx[lab, j] = val + else: + if val == val and val != nan_val: + nobs[lab, j] += 1 + if val < minx[lab, j]: + minx[lab, j] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = {{nan_val}} + out[i, j] = nan_val else: out[i, j] = minx[i, j] +group_min_float64 = group_min["float64_t"] +group_min_float32 = group_min["float32_t"] +group_min_int64 = group_min["int64_t"] + + @cython.boundscheck(False) @cython.wraparound(False) -def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, - ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels, - bint is_datetimelike): +def group_cummin(ndarray[groupby_t, ndim=2] out, + ndarray[groupby_t, ndim=2] values, + ndarray[int64_t] labels, + bint is_datetimelike): """ Only transforms on axis=0 """ cdef: Py_ssize_t i, j, N, K, size - {{dest_type2}} val, mval - ndarray[{{dest_type2}}, ndim=2] accum + groupby_t val, mval + ndarray[groupby_t, ndim=2] accum int64_t lab N, K = (<object> values).shape accum = np.empty_like(values) - accum.fill({{inf_val}}) + if groupby_t is int64_t: + accum.fill(_int64_max) + else: + accum.fill(np.inf) with nogil: for i in range(N): @@ -757,37 +771,48 @@ def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # val = nan - {{if name == 'int64'}} - if is_datetimelike and val == {{nan_val}}: - out[i, j] = {{nan_val}} + if groupby_t is int64_t: + if is_datetimelike and val == iNaT: + out[i, j] = iNaT + else: + mval = accum[lab, j] + if val < mval: + accum[lab, j] = mval = val + out[i, j] = mval else: - {{else}} - if val == val: - {{endif}} - mval = accum[lab, j] - if val < mval: - accum[lab, j] = mval = val - out[i, j] = mval + if val == val: + mval = accum[lab, j] + if val < mval: + accum[lab, j] = mval = val + out[i, j] = mval + + +group_cummin_float64 = group_cummin["float64_t"] +group_cummin_float32 = group_cummin["float32_t"] +group_cummin_int64 = group_cummin["int64_t"] @cython.boundscheck(False) @cython.wraparound(False) -def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, - ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels, - bint is_datetimelike): +def group_cummax(ndarray[groupby_t, ndim=2] out, + ndarray[groupby_t, ndim=2] values, + ndarray[int64_t] labels, + bint is_datetimelike): """ Only transforms on axis=0 """ cdef: Py_ssize_t i, j, N, K, size - {{dest_type2}} val, mval - ndarray[{{dest_type2}}, ndim=2] accum + groupby_t val, mval + ndarray[groupby_t, ndim=2] accum int64_t lab N, K = (<object> values).shape accum = np.empty_like(values) - accum.fill(-{{inf_val}}) + if groupby_t is int64_t: + accum.fill(-_int64_max) + else: + accum.fill(-np.inf) with nogil: for i in range(N): @@ -798,16 +823,22 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for j in range(K): val = values[i, j] - {{if name == 'int64'}} - if is_datetimelike and val == {{nan_val}}: - out[i, j] = {{nan_val}} + if groupby_t is int64_t: + if is_datetimelike and val == iNaT: + out[i, j] = iNaT + else: + mval = accum[lab, j] + if val > mval: + accum[lab, j] = mval = val + out[i, j] = mval else: - {{else}} - if val == val: - {{endif}} - mval = accum[lab, j] - if val > mval: - accum[lab, j] = mval = val - out[i, j] = mval + if val == val: + mval = accum[lab, j] + if val > mval: + accum[lab, j] = mval = val + out[i, j] = mval -{{endfor}} + +group_cummax_float64 = group_cummax["float64_t"] +group_cummax_float32 = group_cummax["float32_t"] +group_cummax_int64 = group_cummax["int64_t"] diff --git a/pandas/_libs/join_func_helper.pxi.in b/pandas/_libs/join_func_helper.pxi.in index 72f24762838b4..b7f604d2fc951 100644 --- a/pandas/_libs/join_func_helper.pxi.in +++ b/pandas/_libs/join_func_helper.pxi.in @@ -210,34 +210,34 @@ def asof_join_nearest_{{on_dtype}}_by_{{by_dtype}}( {{endfor}} -#---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # asof_join -#---------------------------------------------------------------------- - -{{py: - -# on_dtype -dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', - 'int8_t', 'int16_t', 'int32_t', 'int64_t', - 'float', 'double'] - -}} - -{{for on_dtype in dtypes}} - - -def asof_join_backward_{{on_dtype}}( - ndarray[{{on_dtype}}] left_values, - ndarray[{{on_dtype}}] right_values, - bint allow_exact_matches=1, - tolerance=None): +# ---------------------------------------------------------------------- + +ctypedef fused asof_t: + uint8_t + uint16_t + uint32_t + uint64_t + int8_t + int16_t + int32_t + int64_t + float + double + + +def asof_join_backward(ndarray[asof_t] left_values, + ndarray[asof_t] right_values, + bint allow_exact_matches=1, + tolerance=None): cdef: Py_ssize_t left_pos, right_pos, left_size, right_size ndarray[int64_t] left_indexer, right_indexer bint has_tolerance = 0 - {{on_dtype}} tolerance_ = 0 - {{on_dtype}} diff = 0 + asof_t tolerance_ = 0 + asof_t diff = 0 # if we are using tolerance, set our objects if tolerance is not None: @@ -280,18 +280,29 @@ def asof_join_backward_{{on_dtype}}( return left_indexer, right_indexer -def asof_join_forward_{{on_dtype}}( - ndarray[{{on_dtype}}] left_values, - ndarray[{{on_dtype}}] right_values, - bint allow_exact_matches=1, - tolerance=None): +asof_join_backward_uint8_t = asof_join_backward["uint8_t"] +asof_join_backward_uint16_t = asof_join_backward["uint16_t"] +asof_join_backward_uint32_t = asof_join_backward["uint32_t"] +asof_join_backward_uint64_t = asof_join_backward["uint64_t"] +asof_join_backward_int8_t = asof_join_backward["int8_t"] +asof_join_backward_int16_t = asof_join_backward["int16_t"] +asof_join_backward_int32_t = asof_join_backward["int32_t"] +asof_join_backward_int64_t = asof_join_backward["int64_t"] +asof_join_backward_float = asof_join_backward["float"] +asof_join_backward_double = asof_join_backward["double"] + + +def asof_join_forward(ndarray[asof_t] left_values, + ndarray[asof_t] right_values, + bint allow_exact_matches=1, + tolerance=None): cdef: Py_ssize_t left_pos, right_pos, left_size, right_size ndarray[int64_t] left_indexer, right_indexer bint has_tolerance = 0 - {{on_dtype}} tolerance_ = 0 - {{on_dtype}} diff = 0 + asof_t tolerance_ = 0 + asof_t diff = 0 # if we are using tolerance, set our objects if tolerance is not None: @@ -335,16 +346,27 @@ def asof_join_forward_{{on_dtype}}( return left_indexer, right_indexer -def asof_join_nearest_{{on_dtype}}( - ndarray[{{on_dtype}}] left_values, - ndarray[{{on_dtype}}] right_values, - bint allow_exact_matches=1, - tolerance=None): +asof_join_forward_uint8_t = asof_join_forward["uint8_t"] +asof_join_forward_uint16_t = asof_join_forward["uint16_t"] +asof_join_forward_uint32_t = asof_join_forward["uint32_t"] +asof_join_forward_uint64_t = asof_join_forward["uint64_t"] +asof_join_forward_int8_t = asof_join_forward["int8_t"] +asof_join_forward_int16_t = asof_join_forward["int16_t"] +asof_join_forward_int32_t = asof_join_forward["int32_t"] +asof_join_forward_int64_t = asof_join_forward["int64_t"] +asof_join_forward_float = asof_join_forward["float"] +asof_join_forward_double = asof_join_forward["double"] + + +def asof_join_nearest(ndarray[asof_t] left_values, + ndarray[asof_t] right_values, + bint allow_exact_matches=1, + tolerance=None): cdef: Py_ssize_t left_size, right_size, i ndarray[int64_t] left_indexer, right_indexer, bli, bri, fli, fri - {{on_dtype}} bdiff, fdiff + asof_t bdiff, fdiff left_size = len(left_values) right_size = len(right_values) @@ -353,10 +375,10 @@ def asof_join_nearest_{{on_dtype}}( right_indexer = np.empty(left_size, dtype=np.int64) # search both forward and backward - bli, bri = asof_join_backward_{{on_dtype}}(left_values, right_values, - allow_exact_matches, tolerance) - fli, fri = asof_join_forward_{{on_dtype}}(left_values, right_values, - allow_exact_matches, tolerance) + bli, bri = asof_join_backward(left_values, right_values, + allow_exact_matches, tolerance) + fli, fri = asof_join_forward(left_values, right_values, + allow_exact_matches, tolerance) for i in range(len(bri)): # choose timestamp from right with smaller difference @@ -370,4 +392,14 @@ def asof_join_nearest_{{on_dtype}}( return left_indexer, right_indexer -{{endfor}} + +asof_join_nearest_uint8_t = asof_join_nearest["uint8_t"] +asof_join_nearest_uint16_t = asof_join_nearest["uint16_t"] +asof_join_nearest_uint32_t = asof_join_nearest["uint32_t"] +asof_join_nearest_uint64_t = asof_join_nearest["uint64_t"] +asof_join_nearest_int8_t = asof_join_nearest["int8_t"] +asof_join_nearest_int16_t = asof_join_nearest["int16_t"] +asof_join_nearest_int32_t = asof_join_nearest["int32_t"] +asof_join_nearest_int64_t = asof_join_nearest["int64_t"] +asof_join_nearest_float = asof_join_nearest["float"] +asof_join_nearest_double = asof_join_nearest["double"] diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in index 2843a3cf7dd28..d02a985de1d61 100644 --- a/pandas/_libs/sparse_op_helper.pxi.in +++ b/pandas/_libs/sparse_op_helper.pxi.in @@ -8,18 +8,12 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in # Sparse op #---------------------------------------------------------------------- -{{py: - -# dtype, float_group -dtypes = [('float64', True), ('int64', False)] - -}} +ctypedef fused sparse_t: + float64_t + int64_t -{{for dtype, float_group in dtypes}} -{{if float_group}} - -cdef inline {{dtype}}_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): +cdef inline float64_t __div__(sparse_t a, sparse_t b): if b == 0: if a > 0: return INF @@ -30,63 +24,41 @@ cdef inline {{dtype}}_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): else: return float(a) / b -cdef inline {{dtype}}_t __truediv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): - return __div_{{dtype}}(a, b) -cdef inline {{dtype}}_t __floordiv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): - if b == 0: - # numpy >= 1.11 returns NaN - # for a // 0, rather than +-inf - if _np_version_under1p11: - if a > 0: - return INF - elif a < 0: - return -INF - return NaN - else: - return a // b +cdef inline float64_t __truediv__(sparse_t a, sparse_t b): + return __div__(a, b) -cdef inline {{dtype}}_t __mod_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): - if b == 0: - return NaN - else: - return a % b - -{{else}} -cdef inline float64_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): +cdef inline sparse_t __mod__(sparse_t a, sparse_t b): if b == 0: - if a > 0: - return INF - elif a < 0: - return -INF - else: + if sparse_t is float64_t: return NaN + else: + return 0 else: - return float(a) / b + return a % b -cdef inline float64_t __truediv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): - return __div_{{dtype}}(a, b) -cdef inline {{dtype}}_t __floordiv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): +cdef inline sparse_t __floordiv__(sparse_t a, sparse_t b): if b == 0: - return 0 + if sparse_t is float64_t: + # numpy >= 1.11 returns NaN + # for a // 0, rather than +-inf + if _np_version_under1p11: + if a > 0: + return INF + elif a < 0: + return -INF + return NaN + else: + return 0 else: return a // b -cdef inline {{dtype}}_t __mod_{{dtype}}({{dtype}}_t a, {{dtype}}_t b): - if b == 0: - return 0 - else: - return a % b -{{endif}} - -{{endfor}} - -#---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # sparse array op -#---------------------------------------------------------------------- +# ---------------------------------------------------------------------- {{py: @@ -106,10 +78,10 @@ def get_op(tup): ops_dict = {'add': '{0} + {1}', 'sub': '{0} - {1}', 'mul': '{0} * {1}', - 'div': '__div_{2}({0}, {1})', - 'mod': '__mod_{2}({0}, {1})', - 'truediv': '__truediv_{2}({0}, {1})', - 'floordiv': '__floordiv_{2}({0}, {1})', + 'div': '__div__({0}, {1})', + 'mod': '__mod__({0}, {1})', + 'truediv': '__truediv__({0}, {1})', + 'floordiv': '__floordiv__({0}, {1})', 'pow': '{0} ** {1}', 'eq': '{0} == {1}', 'ne': '{0} != {1}',
Everything is passing locally, just want to run this through the CI for good measure before continuing down this path.
https://api.github.com/repos/pandas-dev/pandas/pulls/23022
2018-10-06T23:52:18Z
2018-10-17T12:36:19Z
2018-10-17T12:36:19Z
2018-10-17T16:00:00Z
DOC: Clarify rolling min_periods default value GH21489
diff --git a/pandas/core/window.py b/pandas/core/window.py index 4281d66a640e3..ea0ec79d655fb 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -462,7 +462,8 @@ class Window(_Window): min_periods : int, default None Minimum number of observations in window required to have a value (otherwise result is NA). For a window that is specified by an offset, - this will default to 1. + `min_periods` will default to 1. Otherwise, `min_periods` will default + to the size of the window. center : boolean, default False Set the labels at the center of the window. win_type : string, default None
- [ ] closes #21489 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew Clarifies the documentation of the default value for the min_periods argument of the rolling function.
https://api.github.com/repos/pandas-dev/pandas/pulls/23021
2018-10-06T21:50:54Z
2018-10-07T21:46:08Z
2018-10-07T21:46:08Z
2018-10-07T21:46:13Z
PERF: only output an html id if a style is applied
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index ad6ad5bcaf309..3b3238586b310 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -64,6 +64,11 @@ class Styler(object): a unique identifier to avoid CSS collisions; generated automatically caption: str, default None caption to attach to the table + cell_ids: bool, default True + If True, each cell will have an ``id`` attribute in their HTML tag. + The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>`` + where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row + number and ``<num_col>`` is the column number. Attributes ---------- @@ -112,7 +117,7 @@ class Styler(object): template = env.get_template("html.tpl") def __init__(self, data, precision=None, table_styles=None, uuid=None, - caption=None, table_attributes=None): + caption=None, table_attributes=None, cell_ids=True): self.ctx = defaultdict(list) self._todo = [] @@ -136,6 +141,7 @@ def __init__(self, data, precision=None, table_styles=None, uuid=None, self.table_attributes = table_attributes self.hidden_index = False self.hidden_columns = [] + self.cell_ids = cell_ids # display_funcs maps (row, col) -> formatting function @@ -306,14 +312,16 @@ def format_attr(pair): cs.extend(cell_context.get("data", {}).get(r, {}).get(c, [])) formatter = self._display_funcs[(r, c)] value = self.data.iloc[r, c] - row_es.append({ - "type": "td", - "value": value, - "class": " ".join(cs), - "id": "_".join(cs[1:]), - "display_value": formatter(value), - "is_visible": (c not in hidden_columns) - }) + row_dict = {"type": "td", + "value": value, + "class": " ".join(cs), + "display_value": formatter(value), + "is_visible": (c not in hidden_columns)} + # only add an id if the cell has a style + if (self.cell_ids or + not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')): + row_dict["id"] = "_".join(cs[1:]) + row_es.append(row_dict) props = [] for x in ctx[r, c]: # have to handle empty styles like [''] diff --git a/pandas/io/formats/templates/html.tpl b/pandas/io/formats/templates/html.tpl index 706db1ecdd961..01ecde7d081f5 100644 --- a/pandas/io/formats/templates/html.tpl +++ b/pandas/io/formats/templates/html.tpl @@ -50,17 +50,17 @@ {%- endblock thead %} {%- block tbody %} <tbody> - {%- block before_rows %}{%- endblock before_rows %} - {%- for r in body %} - {%- block tr scoped %} - <tr> - {%- for c in r %} - {%- if c.is_visible != False %} - <{{ c.type }} id="T_{{ uuid }}{{ c.id }}" class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}> - {%- endif %} - {%- endfor %} - </tr> - {%- endblock tr %} + {% block before_rows %}{% endblock before_rows %} + {% for r in body %} + {% block tr scoped %} + <tr> + {% for c in r %} + {% if c.is_visible != False %} + <{{ c.type }} {% if c.id is defined -%} id="T_{{ uuid }}{{ c.id }}" {%- endif %} class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}> + {% endif %} + {%- endfor %} + </tr> + {% endblock tr %} {%- endfor %} {%- block after_rows %}{%- endblock after_rows %} </tbody>
- [x] closes #20695 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Greatly reduces the amount of `id` tag in the cells by only assigning one when a style is applied to that cell. If that is the not correct approach for solving #20695, I am open to suggestions. Also I was confused by the `%-` tags to handle the whitespaces. Is there a reason the default options to handle whitespace are not used?
https://api.github.com/repos/pandas-dev/pandas/pulls/23019
2018-10-06T18:44:09Z
2018-10-14T16:10:09Z
2018-10-14T16:10:09Z
2018-10-14T20:52:51Z
TST: further clean up of frame/test_analytics
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b0b9f2815cbb9..5327e3fcbea76 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -25,22 +25,44 @@ import pandas.util._test_decorators as td -def _check_stat_op(name, alternative, main_frame, float_frame, - float_string_frame, has_skipna=True, - has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False, - skipna_alternative=None): - - f = getattr(main_frame, name) +def assert_stat_op_calc(opname, alternative, frame, has_skipna=True, + check_dtype=True, check_dates=False, + check_less_precise=False, skipna_alternative=None): + """ + Check that operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + check_dtype : bool, default True + Whether the dtypes of the result of "frame.opname()" and + "alternative(frame)" should be checked. + check_dates : bool, default false + Whether opname should be tested on a Datetime Series + check_less_precise : bool, default False + Whether results should only be compared approximately; + passed on to tm.assert_series_equal + skipna_alternative : function, default None + NaN-safe version of alternative + """ + + f = getattr(frame, opname) if check_dates: df = DataFrame({'b': date_range('1/1/2001', periods=2)}) - _f = getattr(df, name) - result = _f() + result = getattr(df, opname)() assert isinstance(result, Series) df['a'] = lrange(len(df)) - result = getattr(df, name)() + result = getattr(df, opname)() assert isinstance(result, Series) assert len(result) @@ -52,11 +74,11 @@ def wrapper(x): skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) - tm.assert_series_equal(result0, main_frame.apply(wrapper), + tm.assert_series_equal(result0, frame.apply(wrapper), check_dtype=check_dtype, check_less_precise=check_less_precise) # HACK: win32 - tm.assert_series_equal(result1, main_frame.apply(wrapper, axis=1), + tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), check_dtype=False, check_less_precise=check_less_precise) else: @@ -64,49 +86,83 @@ def wrapper(x): result0 = f(axis=0) result1 = f(axis=1) - tm.assert_series_equal(result0, main_frame.apply(skipna_wrapper), + tm.assert_series_equal(result0, frame.apply(skipna_wrapper), check_dtype=check_dtype, check_less_precise=check_less_precise) - if name in ['sum', 'prod']: - expected = main_frame.apply(skipna_wrapper, axis=1) + + if opname in ['sum', 'prod']: + expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal(result1, expected, check_dtype=False, check_less_precise=check_less_precise) # check dtypes if check_dtype: - lcd_dtype = main_frame.values.dtype + lcd_dtype = frame.values.dtype assert lcd_dtype == result0.dtype assert lcd_dtype == result1.dtype # bad axis tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2) - # make sure works on mixed-type frame - getattr(float_string_frame, name)(axis=0) - getattr(float_string_frame, name)(axis=1) - - if has_numeric_only: - getattr(float_string_frame, name)(axis=0, numeric_only=True) - getattr(float_string_frame, name)(axis=1, numeric_only=True) - getattr(float_frame, name)(axis=0, numeric_only=False) - getattr(float_frame, name)(axis=1, numeric_only=False) # all NA case if has_skipna: - all_na = float_frame * np.NaN - r0 = getattr(all_na, name)(axis=0) - r1 = getattr(all_na, name)(axis=1) - if name in ['sum', 'prod']: - unit = int(name == 'prod') + all_na = frame * np.NaN + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname in ['sum', 'prod']: + unit = 1 if opname == 'prod' else 0 # result for empty sum/prod expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) tm.assert_series_equal(r0, expected) expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) tm.assert_series_equal(r1, expected) -def _check_bool_op(name, alternative, frame, float_string_frame, - has_skipna=True, has_bool_only=False): +def assert_stat_op_api(opname, float_frame, float_string_frame, + has_numeric_only=False): + """ + Check that API for operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + float_frame : DataFrame + DataFrame with columns of type float + float_string_frame : DataFrame + DataFrame with both float and string columns + has_numeric_only : bool, default False + Whether the method "opname" has the kwarg "numeric_only" + """ + + # make sure works on mixed-type frame + getattr(float_string_frame, opname)(axis=0) + getattr(float_string_frame, opname)(axis=1) - f = getattr(frame, name) + if has_numeric_only: + getattr(float_string_frame, opname)(axis=0, numeric_only=True) + getattr(float_string_frame, opname)(axis=1, numeric_only=True) + getattr(float_frame, opname)(axis=0, numeric_only=False) + getattr(float_frame, opname)(axis=1, numeric_only=False) + + +def assert_bool_op_calc(opname, alternative, frame, has_skipna=True): + """ + Check that bool operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + """ + + f = getattr(frame, opname) if has_skipna: def skipna_wrapper(x): @@ -118,6 +174,7 @@ def wrapper(x): result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) + tm.assert_series_equal(result0, frame.apply(wrapper)) tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), check_dtype=False) # HACK: win32 @@ -127,18 +184,48 @@ def wrapper(x): result0 = f(axis=0) result1 = f(axis=1) + tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False) # bad axis - pytest.raises(ValueError, f, axis=2) + tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2) - # make sure works on mixed-type frame + # all NA case + if has_skipna: + all_na = frame * np.NaN + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname == 'any': + assert not r0.any() + assert not r1.any() + else: + assert r0.all() + assert r1.all() + + +def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, + has_bool_only=False): + """ + Check that API for boolean operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + float_frame : DataFrame + DataFrame with columns of type float + float_string_frame : DataFrame + DataFrame with both float and string columns + has_bool_only : bool, default False + Whether the method "opname" has the kwarg "bool_only" + """ + # make sure op works on mixed-type frame mixed = float_string_frame - mixed['_bool_'] = np.random.randn(len(mixed)) > 0 - getattr(mixed, name)(axis=0) - getattr(mixed, name)(axis=1) + mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5 + getattr(mixed, opname)(axis=0) + getattr(mixed, opname)(axis=1) class NonzeroFail(object): @@ -148,22 +235,10 @@ def __nonzero__(self): mixed['_nonzero_fail_'] = NonzeroFail() if has_bool_only: - getattr(mixed, name)(axis=0, bool_only=True) - getattr(mixed, name)(axis=1, bool_only=True) - getattr(frame, name)(axis=0, bool_only=False) - getattr(frame, name)(axis=1, bool_only=False) - - # all NA case - if has_skipna: - all_na = frame * np.NaN - r0 = getattr(all_na, name)(axis=0) - r1 = getattr(all_na, name)(axis=1) - if name == 'any': - assert not r0.any() - assert not r1.any() - else: - assert r0.all() - assert r1.all() + getattr(mixed, opname)(axis=0, bool_only=True) + getattr(mixed, opname)(axis=1, bool_only=True) + getattr(bool_frame_with_na, opname)(axis=0, bool_only=False) + getattr(bool_frame_with_na, opname)(axis=1, bool_only=False) class TestDataFrameAnalytics(): @@ -596,10 +671,10 @@ def test_reduce_mixed_frame(self): def test_count(self, float_frame_with_na, float_frame, float_string_frame): f = lambda s: notna(s).sum() - _check_stat_op('count', f, float_frame_with_na, float_frame, - float_string_frame, has_skipna=False, - has_numeric_only=True, check_dtype=False, - check_dates=True) + assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False, + check_dtype=False, check_dates=True) + assert_stat_op_api('count', float_frame, float_string_frame, + has_numeric_only=True) # corner case frame = DataFrame() @@ -628,9 +703,10 @@ def test_count(self, float_frame_with_na, float_frame, float_string_frame): def test_nunique(self, float_frame_with_na, float_frame, float_string_frame): f = lambda s: len(algorithms.unique1d(s.dropna())) - _check_stat_op('nunique', f, float_frame_with_na, - float_frame, float_string_frame, has_skipna=False, - check_dtype=False, check_dates=True) + assert_stat_op_calc('nunique', f, float_frame_with_na, + has_skipna=False, check_dtype=False, + check_dates=True) + assert_stat_op_api('nunique', float_frame, float_string_frame) df = DataFrame({'A': [1, 1, 1], 'B': [1, 2, 3], @@ -644,15 +720,13 @@ def test_nunique(self, float_frame_with_na, float_frame, def test_sum(self, float_frame_with_na, mixed_float_frame, float_frame, float_string_frame): - _check_stat_op('sum', np.sum, float_frame_with_na, float_frame, - float_string_frame, has_numeric_only=True, - skipna_alternative=np.nansum) - + assert_stat_op_api('sum', float_frame, float_string_frame, + has_numeric_only=True) + assert_stat_op_calc('sum', np.sum, float_frame_with_na, + skipna_alternative=np.nansum) # mixed types (with upcasting happening) - _check_stat_op('sum', np.sum, - mixed_float_frame.astype('float32'), float_frame, - float_string_frame, has_numeric_only=True, - check_dtype=False, check_less_precise=True) + assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'), + check_dtype=False, check_less_precise=True) @pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']) @@ -679,13 +753,14 @@ def test_stat_operators_attempt_obj_array(self, method): tm.assert_series_equal(result, expected) def test_mean(self, float_frame_with_na, float_frame, float_string_frame): - _check_stat_op('mean', np.mean, float_frame_with_na, - float_frame, float_string_frame, check_dates=True) + assert_stat_op_calc('mean', np.mean, float_frame_with_na, + check_dates=True) + assert_stat_op_api('mean', float_frame, float_string_frame) def test_product(self, float_frame_with_na, float_frame, float_string_frame): - _check_stat_op('product', np.prod, float_frame_with_na, - float_frame, float_string_frame) + assert_stat_op_calc('product', np.prod, float_frame_with_na) + assert_stat_op_api('product', float_frame, float_string_frame) # TODO: Ensure warning isn't emitted in the first place @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") @@ -696,18 +771,18 @@ def wrapper(x): return np.nan return np.median(x) - _check_stat_op('median', wrapper, float_frame_with_na, - float_frame, float_string_frame, check_dates=True) + assert_stat_op_calc('median', wrapper, float_frame_with_na, + check_dates=True) + assert_stat_op_api('median', float_frame, float_string_frame) def test_min(self, float_frame_with_na, int_frame, float_frame, float_string_frame): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) - _check_stat_op('min', np.min, float_frame_with_na, - float_frame, float_string_frame, - check_dates=True) - _check_stat_op('min', np.min, int_frame, float_frame, - float_string_frame) + assert_stat_op_calc('min', np.min, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('min', np.min, int_frame) + assert_stat_op_api('min', float_frame, float_string_frame) def test_cummin(self, datetime_frame): datetime_frame.loc[5:10, 0] = nan @@ -759,26 +834,25 @@ def test_max(self, float_frame_with_na, int_frame, float_frame, float_string_frame): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) - _check_stat_op('max', np.max, float_frame_with_na, - float_frame, float_string_frame, - check_dates=True) - _check_stat_op('max', np.max, int_frame, float_frame, - float_string_frame) + assert_stat_op_calc('max', np.max, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('max', np.max, int_frame) + assert_stat_op_api('max', float_frame, float_string_frame) def test_mad(self, float_frame_with_na, float_frame, float_string_frame): f = lambda x: np.abs(x - x.mean()).mean() - _check_stat_op('mad', f, float_frame_with_na, float_frame, - float_string_frame) + assert_stat_op_calc('mad', f, float_frame_with_na) + assert_stat_op_api('mad', float_frame, float_string_frame) def test_var_std(self, float_frame_with_na, datetime_frame, float_frame, float_string_frame): alt = lambda x: np.var(x, ddof=1) - _check_stat_op('var', alt, float_frame_with_na, float_frame, - float_string_frame) + assert_stat_op_calc('var', alt, float_frame_with_na) + assert_stat_op_api('var', float_frame, float_string_frame) alt = lambda x: np.std(x, ddof=1) - _check_stat_op('std', alt, float_frame_with_na, float_frame, - float_string_frame) + assert_stat_op_calc('std', alt, float_frame_with_na) + assert_stat_op_api('std', float_frame, float_string_frame) result = datetime_frame.std(ddof=4) expected = datetime_frame.apply(lambda x: x.std(ddof=4)) @@ -892,8 +966,8 @@ def test_cumprod(self, datetime_frame): def test_sem(self, float_frame_with_na, datetime_frame, float_frame, float_string_frame): alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) - _check_stat_op('sem', alt, float_frame_with_na, - float_frame, float_string_frame) + assert_stat_op_calc('sem', alt, float_frame_with_na) + assert_stat_op_api('sem', float_frame, float_string_frame) result = datetime_frame.sem(ddof=4) expected = datetime_frame.apply( @@ -917,8 +991,8 @@ def alt(x): return np.nan return skew(x, bias=False) - _check_stat_op('skew', alt, float_frame_with_na, - float_frame, float_string_frame) + assert_stat_op_calc('skew', alt, float_frame_with_na) + assert_stat_op_api('skew', float_frame, float_string_frame) @td.skip_if_no_scipy def test_kurt(self, float_frame_with_na, float_frame, float_string_frame): @@ -929,8 +1003,8 @@ def alt(x): return np.nan return kurtosis(x, bias=False) - _check_stat_op('kurt', alt, float_frame_with_na, - float_frame, float_string_frame) + assert_stat_op_calc('kurt', alt, float_frame_with_na) + assert_stat_op_api('kurt', float_frame, float_string_frame) index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], @@ -1205,9 +1279,9 @@ def wrapper(x): return np.nan return np.median(x) - _check_stat_op('median', wrapper, int_frame, float_frame, - float_string_frame, check_dtype=False, - check_dates=True) + assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, + check_dates=True) + assert_stat_op_api('median', float_frame, float_string_frame) # Miscellanea @@ -1262,13 +1336,12 @@ def test_idxmax(self, float_frame, int_frame): # ---------------------------------------------------------------------- # Logical reductions - def test_any_all(self, bool_frame_with_na, float_string_frame): - _check_bool_op('any', np.any, bool_frame_with_na, - float_string_frame, has_skipna=True, - has_bool_only=True) - _check_bool_op('all', np.all, bool_frame_with_na, - float_string_frame, has_skipna=True, - has_bool_only=True) + @pytest.mark.parametrize('opname', ['any', 'all']) + def test_any_all(self, opname, bool_frame_with_na, float_string_frame): + assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na, + has_skipna=True) + assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, + has_bool_only=True) def test_any_all_extra(self): df = DataFrame({
Follow-up to #22733, where some things were left undone to ease reviewing. Mainly: * cleaning up the jumble of `_check_stat_op`, splitting it into two functions with separate purposes * this removes redundant calls to the API-portion of the function in tests like `test_max` etc. * renaming according to review in #22733 * same for `_check_bool_op` * parametrize `test_any_all` according to review in #22733
https://api.github.com/repos/pandas-dev/pandas/pulls/23016
2018-10-06T16:18:40Z
2018-10-07T21:47:57Z
2018-10-07T21:47:57Z
2018-10-08T17:08:54Z
CLN GH22875 Replace bare excepts by explicit excepts in pandas/io/
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 0d564069c681f..70c978a3b62ed 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -42,7 +42,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover text, encoding=(kwargs.get('encoding') or get_option('display.encoding')) ) - except: + except AttributeError: pass # Excel copies into clipboard with \t separation diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index b8b28a0b0c98c..e347f6bce0168 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -100,7 +100,7 @@ def check_main(): try: return __IPYTHON__ or check_main() # noqa - except: + except NameError: return check_main() @@ -118,7 +118,7 @@ def in_qtconsole(): ip.config.get('IPKernelApp', {}).get('parent_appname', "")) if 'qtconsole' in front_end.lower(): return True - except: + except NameError: return False return False @@ -137,7 +137,7 @@ def in_ipnb(): ip.config.get('IPKernelApp', {}).get('parent_appname', "")) if 'notebook' in front_end.lower(): return True - except: + except NameError: return False return False @@ -149,7 +149,7 @@ def in_ipython_frontend(): try: ip = get_ipython() # noqa return 'zmq' in str(type(ip)).lower() - except: + except NameError: pass return False diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index dcd6f2cf4a718..2846525adbe6b 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -78,7 +78,7 @@ def _get_terminal_size_windows(): h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) - except: + except (AttributeError, ValueError): return None if res: import struct @@ -108,7 +108,7 @@ def _get_terminal_size_tput(): output = proc.communicate(input=None) rows = int(output[0]) return (cols, rows) - except: + except OSError: return None @@ -120,7 +120,7 @@ def ioctl_GWINSZ(fd): import struct cr = struct.unpack( 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) - except: + except (struct.error, IOError): return None return cr cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) @@ -129,13 +129,13 @@ def ioctl_GWINSZ(fd): fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) - except: + except OSError: pass if not cr or cr == (0, 0): try: from os import environ as env cr = (env['LINES'], env['COLUMNS']) - except: + except (ValueError, KeyError): return None return int(cr[1]), int(cr[0]) diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 7a1e72637f4ce..77fbb27d01f86 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -194,7 +194,7 @@ def read(fh): if should_close: try: path_or_buf.close() - except: # noqa: flake8 + except IOError: pass return l @@ -703,7 +703,7 @@ def create_block(b): dtype = dtype_for(obj[u'dtype']) try: return dtype(obj[u'data']) - except: + except (ValueError, TypeError): return dtype.type(obj[u'data']) elif typ == u'np_complex': return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j') diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a4f1155117b12..2def3b81c9518 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -459,7 +459,7 @@ def _read(filepath_or_buffer, kwds): if should_close: try: filepath_or_buffer.close() - except: # noqa: flake8 + except ValueError: pass return data @@ -1808,7 +1808,7 @@ def close(self): # close additional handles opened by C parser (for compression) try: self._reader.close() - except: + except ValueError: pass def _set_noconvert_columns(self): @@ -3034,7 +3034,7 @@ def converter(*date_cols): errors='ignore', infer_datetime_format=infer_datetime_format ) - except: + except ValueError: return tools.to_datetime( parsing.try_parse_dates(strs, dayfirst=dayfirst)) else: @@ -3263,7 +3263,7 @@ def _floatify_na_values(na_values): v = float(v) if not np.isnan(v): result.add(v) - except: + except (TypeError, ValueError, OverflowError): pass return result @@ -3284,11 +3284,11 @@ def _stringify_na_values(na_values): result.append(str(v)) result.append(v) - except: + except (TypeError, ValueError, OverflowError): pass try: result.append(int(x)) - except: + except (TypeError, ValueError, OverflowError): pass return set(result) diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 9c219d7fd6997..d52a571da0d61 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -163,18 +163,20 @@ def try_read(path, encoding=None): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) return read_wrapper(lambda f: pkl.load(f)) - except Exception: + except Exception: # noqa: E722 # reg/patched pickle + # compat not used in pandas/compat/pickle_compat.py::load + # TODO: remove except block OR modify pc.load to use compat try: return read_wrapper( lambda f: pc.load(f, encoding=encoding, compat=False)) # compat pickle - except: + except Exception: # noqa: E722 return read_wrapper( lambda f: pc.load(f, encoding=encoding, compat=True)) try: return try_read(path) - except: + except Exception: # noqa: E722 if PY3: return try_read(path, encoding='latin1') raise diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 14e7ad9682db6..385396909a07b 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -246,7 +246,7 @@ def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1', contents = filepath_or_buffer.read() try: contents = contents.encode(self._encoding) - except: + except UnicodeEncodeError: pass self.filepath_or_buffer = compat.BytesIO(contents) diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index b8a0bf5733158..d72996a8e6157 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -46,7 +46,7 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, format = "sas7bdat" else: raise ValueError("unable to infer format of SAS file") - except: + except ValueError: pass if format.lower() == 'xport': diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a582d32741ae9..882fa0092b2cf 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -382,7 +382,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, try: _is_table_name = pandas_sql.has_table(sql) - except: + except (ImportError, AttributeError): _is_table_name = False if _is_table_name: @@ -847,7 +847,7 @@ def _sqlalchemy_type(self, col): try: tz = col.tzinfo # noqa return DateTime(timezone=True) - except: + except AttributeError: return DateTime if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be " @@ -1360,7 +1360,7 @@ def run_transaction(self): try: yield cur self.con.commit() - except: + except Exception: self.con.rollback() raise finally: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index efd5f337fdf69..a321e315f5225 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1252,12 +1252,12 @@ def _read_old_header(self, first_char): try: self.typlist = [self.TYPE_MAP[typ] for typ in typlist] - except: + except ValueError: raise ValueError("cannot convert stata types [{0}]" .format(','.join(str(x) for x in typlist))) try: self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] - except: + except ValueError: raise ValueError("cannot convert stata dtypes [{0}]" .format(','.join(str(x) for x in typlist)))
- [X] closes #22875 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is a resubmit of a previous PR I submitted that was approved (#22916). I tried to close and reopen to get a rebuild on travis but I couldn't reopen it because I had deleted my old fork (after accidentally pushing to master).
https://api.github.com/repos/pandas-dev/pandas/pulls/23004
2018-10-05T04:49:25Z
2018-10-10T07:33:29Z
2018-10-10T07:33:29Z
2018-10-10T07:33:53Z
fix typos
diff --git a/README.md b/README.md index f26b9598bb5d3..b4dedecb4c697 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on -its way toward this goal. +its way towards this goal. ## Main Features Here are just a few of the things that pandas does well:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23003
2018-10-05T04:28:02Z
2018-10-05T11:58:41Z
2018-10-05T11:58:41Z
2018-10-05T14:28:10Z
CI: Fix yaml/yml inconsistency
diff --git a/ci/azure-macos-35.yml b/ci/azure-macos-35.yaml similarity index 100% rename from ci/azure-macos-35.yml rename to ci/azure-macos-35.yaml diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml index 5bf8d18d6cbb9..9bfaef04ea2fa 100644 --- a/ci/azure/macos.yml +++ b/ci/azure/macos.yml @@ -10,7 +10,7 @@ jobs: maxParallel: 11 matrix: py35_np_110: - ENV_FILE: ci/azure-macos-35.yml + ENV_FILE: ci/azure-macos-35.yaml CONDA_PY: "35" CONDA_ENV: pandas TEST_ARGS: "--skip-slow --skip-network" diff --git a/ci/azure/windows-py27.yml b/ci/azure/windows-py27.yml index 3e92c96263930..10251bc03b8dc 100644 --- a/ci/azure/windows-py27.yml +++ b/ci/azure/windows-py27.yml @@ -10,7 +10,7 @@ jobs: maxParallel: 11 matrix: py36_np14: - ENV_FILE: ci/azure-windows-27.yml + ENV_FILE: ci/azure-windows-27.yaml CONDA_PY: "27" CONDA_ENV: pandas diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 2ab8c6f320188..fe6c9c933d474 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -10,7 +10,7 @@ jobs: maxParallel: 11 matrix: py36_np14: - ENV_FILE: ci/azure-windows-36.yml + ENV_FILE: ci/azure-windows-36.yaml CONDA_PY: "36" CONDA_ENV: pandas
The ci/windows.yml and ci/windows-py27.yml files reference the environment files with ".yml". This PR moves the ".yaml" files to the indicated location. CI config is hard.
https://api.github.com/repos/pandas-dev/pandas/pulls/23001
2018-10-05T00:44:14Z
2018-10-08T20:09:22Z
2018-10-08T20:09:22Z
2018-10-09T00:53:16Z
Make DataFrame arithmetic ops with 2D arrays behave like numpy analogues
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 91575c311b409..66f3d4bd1c4f3 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -488,6 +488,39 @@ Previous Behavior: 0 NaT +.. _whatsnew_0240.api.dataframe_arithmetic_broadcasting: + +DataFrame Arithmetic Operations Broadcasting Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:class:`DataFrame` arithmetic operations when operating with 2-dimensional +``np.ndarray`` objects now broadcast in the same way as ``np.ndarray``s +broadcast. (:issue:`23000`) + +Previous Behavior: + +.. code-block:: ipython + + In [3]: arr = np.arange(6).reshape(3, 2) + In [4]: df = pd.DataFrame(arr) + In [5]: df + arr[[0], :] # 1 row, 2 columns + ... + ValueError: Unable to coerce to DataFrame, shape must be (3, 2): given (1, 2) + In [6]: df + arr[:, [1]] # 1 column, 3 rows + ... + ValueError: Unable to coerce to DataFrame, shape must be (3, 2): given (3, 1) + +*Current Behavior*: + +.. ipython:: python + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr) + df + +.. ipython:: python + df + arr[[0], :] # 1 row, 2 columns + df + arr[:, [1]] # 1 column, 3 rows + + .. _whatsnew_0240.api.extension: ExtensionType Changes diff --git a/pandas/core/ops.py b/pandas/core/ops.py index dc99faaf68f51..20559bca9caed 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1799,14 +1799,32 @@ def to_series(right): right = to_series(right) elif right.ndim == 2: - if left.shape != right.shape: + if right.shape == left.shape: + right = left._constructor(right, index=left.index, + columns=left.columns) + + elif right.shape[0] == left.shape[0] and right.shape[1] == 1: + # Broadcast across columns + try: + right = np.broadcast_to(right, left.shape) + except AttributeError: + # numpy < 1.10.0 + right = np.tile(right, (1, left.shape[1])) + + right = left._constructor(right, + index=left.index, + columns=left.columns) + + elif right.shape[1] == left.shape[1] and right.shape[0] == 1: + # Broadcast along rows + right = to_series(right[0, :]) + + else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) - right = left._constructor(right, index=left.index, - columns=left.columns) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 2eb11c3a2e2f7..b97c5e4f7d7c2 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -99,6 +99,7 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname): # Arithmetic class TestFrameFlexArithmetic(object): + def test_df_add_td64_columnwise(self): # GH#22534 Check that column-wise addition broadcasts correctly dti = pd.date_range('2016-01-01', periods=10) @@ -252,6 +253,99 @@ def test_arith_flex_zero_len_raises(self): class TestFrameArithmetic(object): + def test_df_add_2d_array_rowlike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C']) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + expected = pd.DataFrame([[2, 4], + [4, 6], + [6, 8]], + columns=df.columns, index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype) + result = df + rowlike + tm.assert_frame_equal(result, expected) + result = rowlike + df + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_collike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C']) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + expected = pd.DataFrame([[1, 2], + [5, 6], + [9, 10]], + columns=df.columns, index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype) + result = df + collike + tm.assert_frame_equal(result, expected) + result = collike + df + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_rowlike_broadcasts(self, + all_arithmetic_operators): + # GH#23000 + opname = all_arithmetic_operators + + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C']) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + exvals = [getattr(df.loc['A'], opname)(rowlike.squeeze()), + getattr(df.loc['B'], opname)(rowlike.squeeze()), + getattr(df.loc['C'], opname)(rowlike.squeeze())] + + expected = pd.DataFrame(exvals, columns=df.columns, index=df.index) + + if opname in ['__rmod__', '__rfloordiv__']: + # exvals will have dtypes [f8, i8, i8] so expected will be + # all-f8, but the DataFrame operation will return mixed dtypes + # use exvals[-1].dtype instead of "i8" for compat with 32-bit + # systems/pythons + expected[False] = expected[False].astype(exvals[-1].dtype) + + result = getattr(df, opname)(rowlike) + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_collike_broadcasts(self, + all_arithmetic_operators): + # GH#23000 + opname = all_arithmetic_operators + + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C']) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + exvals = {True: getattr(df[True], opname)(collike.squeeze()), + False: getattr(df[False], opname)(collike.squeeze())} + + dtype = None + if opname in ['__rmod__', '__rfloordiv__']: + # Series ops may return mixed int/float dtypes in cases where + # DataFrame op will return all-float. So we upcast `expected` + dtype = np.common_type(*[x.values for x in exvals.values()]) + + expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, + dtype=dtype) + + result = getattr(df, opname)(collike) + tm.assert_frame_equal(result, expected) + def test_df_bool_mul_int(self): # GH#22047, GH#22163 multiplication by 1 should result in int dtype, # not object dtype
- [x] closes #22686 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Change DataFrame arithmetic behavior when operating against 2D-ndarray such that `op(df, arr)` broadcasts like `op(df.values, arr)`. Related: #22880. Note this does NOT change the behavior of DataFrame comparison operations, so that PR (more specifically, the tests) will have to be updated if this is merged. @timlod can you confirm that this is what you ha din mind in #22686?
https://api.github.com/repos/pandas-dev/pandas/pulls/23000
2018-10-04T22:25:17Z
2018-10-07T22:38:27Z
2018-10-07T22:38:27Z
2018-10-08T00:13:33Z
BUG: Concat multiple different ExtensionArray types
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index dde098be2e5ae..3d7e7686b2db6 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -602,6 +602,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your - :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`). - Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) - Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) +- Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`) - Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) - Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) - :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`). diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index ac824708245d2..2b1778e5bcb2e 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -560,11 +560,6 @@ def _concat_sparse(to_concat, axis=0, typs=None): fill_values = [x.fill_value for x in to_concat if isinstance(x, SparseArray)] - - if len(set(fill_values)) > 1: - raise ValueError("Cannot concatenate SparseArrays with different " - "fill values") - fill_value = fill_values[0] # TODO: Fix join unit generation so we aren't passed this. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 3667d7c5e39dc..2646dbd33815d 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1636,8 +1636,7 @@ def concat(self, to_concat, new_axis): # check if all series are of the same block type: if len(non_empties) > 0: blocks = [obj.blocks[0] for obj in non_empties] - - if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa + if len({b.dtype for b in blocks}) == 1: new_block = blocks[0].concat_same_type(blocks) else: values = [x.values for x in blocks] diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index f84d24295b049..be1c61166e4b1 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -100,7 +100,9 @@ def test_hashable(self, dtype): class TestInterface(BaseDecimal, base.BaseInterfaceTests): - pass + + pytestmark = pytest.mark.skipif(compat.PY2, + reason="Unhashble dtype in Py2.") class TestConstructors(BaseDecimal, base.BaseConstructorsTests): @@ -112,7 +114,8 @@ def test_from_dtype(self, data): class TestReshaping(BaseDecimal, base.BaseReshapingTests): - pass + pytestmark = pytest.mark.skipif(compat.PY2, + reason="Unhashble dtype in Py2.") class TestGetitem(BaseDecimal, base.BaseGetitemTests): @@ -174,7 +177,8 @@ class TestCasting(BaseDecimal, base.BaseCastingTests): class TestGroupby(BaseDecimal, base.BaseGroupbyTests): - pass + pytestmark = pytest.mark.skipif(compat.PY2, + reason="Unhashble dtype in Py2.") class TestSetitem(BaseDecimal, base.BaseSetitemTests): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 2aaa04d571e69..d39c9fafe5749 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1,6 +1,7 @@ from warnings import catch_warnings, simplefilter from itertools import combinations from collections import deque +from decimal import Decimal import datetime as dt import dateutil @@ -8,17 +9,17 @@ from numpy.random import randn from datetime import datetime -from pandas.compat import StringIO, iteritems, PY2 +from pandas.compat import Iterable, StringIO, iteritems, PY2 import pandas as pd from pandas import (DataFrame, concat, read_csv, isna, Series, date_range, Index, Panel, MultiIndex, Timestamp, DatetimeIndex, Categorical) -from pandas.compat import Iterable from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.util import testing as tm from pandas.util.testing import (assert_frame_equal, makeCustomDataframe as mkdf) +from pandas.tests.extension.decimal import to_decimal import pytest @@ -2361,6 +2362,18 @@ def test_concat_datetime_timezone(self): index=idx1.append(idx1)) tm.assert_frame_equal(result, expected) + @pytest.mark.skipif(PY2, reason="Unhashable Decimal dtype") + def test_concat_different_extension_dtypes_upcasts(self): + a = pd.Series(pd.core.arrays.integer_array([1, 2])) + b = pd.Series(to_decimal([1, 2])) + + result = pd.concat([a, b], ignore_index=True) + expected = pd.Series([ + 1, 2, + Decimal(1), Decimal(2) + ], dtype=object) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel]) @pytest.mark.parametrize('dt', np.sctypes['float'])
xref https://github.com/pandas-dev/pandas/issues/22994 This builds on https://github.com/pandas-dev/pandas/pull/22996, since we need hashing. 2a1660c77e8ad0c28485c6d81227ac9d6b6de4f2 is the relevant commit. Sparse tests will fail for now. I'll revisit once SparseArray is in.
https://api.github.com/repos/pandas-dev/pandas/pulls/22997
2018-10-04T21:16:21Z
2018-10-18T12:01:21Z
2018-10-18T12:01:21Z
2018-10-18T12:01:25Z
API: ExtensionDtype Equality and Hashability
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index c9874b4dd03d6..a1467cbca963a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -492,6 +492,15 @@ Previous Behavior: ExtensionType Changes ^^^^^^^^^^^^^^^^^^^^^ +**:class:`pandas.api.extensions.ExtensionDtype` Equality and Hashability** + +Pandas now requires that extension dtypes be hashable. The base class implements +a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should +update the ``ExtensionDtype._metadata`` tuple to match the signature of your +``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`). + +**Other changes** + - ``ExtensionArray`` has gained the abstract methods ``.dropna()`` (:issue:`21185`) - ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index b0fa55e346613..ac4d6d1590f38 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -22,14 +22,17 @@ class _DtypeOpsMixin(object): # of the NA value, not the physical NA vaalue for storage. # e.g. for JSONArray, this is an empty dictionary. na_value = np.nan + _metadata = () def __eq__(self, other): """Check whether 'other' is equal to self. - By default, 'other' is considered equal if + By default, 'other' is considered equal if either * it's a string matching 'self.name'. - * it's an instance of this type. + * it's an instance of this type and all of the + the attributes in ``self._metadata`` are equal between + `self` and `other`. Parameters ---------- @@ -40,11 +43,19 @@ def __eq__(self, other): bool """ if isinstance(other, compat.string_types): - return other == self.name - elif isinstance(other, type(self)): - return True - else: - return False + try: + other = self.construct_from_string(other) + except TypeError: + return False + if isinstance(other, type(self)): + return all( + getattr(self, attr) == getattr(other, attr) + for attr in self._metadata + ) + return False + + def __hash__(self): + return hash(tuple(getattr(self, attr) for attr in self._metadata)) def __ne__(self, other): return not self.__eq__(other) @@ -161,6 +172,26 @@ class ExtensionDtype(_DtypeOpsMixin): The `na_value` class attribute can be used to set the default NA value for this type. :attr:`numpy.nan` is used by default. + ExtensionDtypes are required to be hashable. The base class provides + a default implementation, which relies on the ``_metadata`` class + attribute. ``_metadata`` should be a tuple containing the strings + that define your data type. For example, with ``PeriodDtype`` that's + the ``freq`` attribute. + + **If you have a parametrized dtype you should set the ``_metadata`` + class property**. + + Ideally, the attributes in ``_metadata`` will match the + parameters to your ``ExtensionDtype.__init__`` (if any). If any of + the attributes in ``_metadata`` don't implement the standard + ``__eq__`` or ``__hash__``, the default implementations here will not + work. + + .. versionchanged:: 0.24.0 + + Added ``_metadata``, ``__hash__``, and changed the default definition + of ``__eq__``. + This class does not inherit from 'abc.ABCMeta' for performance reasons. Methods and properties required by the interface raise ``pandas.errors.AbstractMethodError`` and no ``register`` method is diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index beda9bc02f4d5..611cae28877c3 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -101,7 +101,6 @@ class PandasExtensionDtype(_DtypeOpsMixin): base = None isbuiltin = 0 isnative = 0 - _metadata = [] _cache = {} def __unicode__(self): @@ -209,7 +208,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): kind = 'O' str = '|O08' base = np.dtype('O') - _metadata = ['categories', 'ordered'] + _metadata = ('categories', 'ordered') _cache = {} def __init__(self, categories=None, ordered=None): @@ -485,7 +484,7 @@ class DatetimeTZDtype(PandasExtensionDtype): str = '|M8[ns]' num = 101 base = np.dtype('M8[ns]') - _metadata = ['unit', 'tz'] + _metadata = ('unit', 'tz') _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache = {} @@ -589,7 +588,7 @@ class PeriodDtype(PandasExtensionDtype): str = '|O08' base = np.dtype('O') num = 102 - _metadata = ['freq'] + _metadata = ('freq',) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache = {} @@ -709,7 +708,7 @@ class IntervalDtype(PandasExtensionDtype, ExtensionDtype): str = '|O08' base = np.dtype('O') num = 103 - _metadata = ['subtype'] + _metadata = ('subtype',) _match = re.compile(r"(I|i)nterval\[(?P<subtype>.+)\]") _cache = {} diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 8d1f1cadcc23f..d5cf9571e3622 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -49,6 +49,10 @@ def test_eq_with_str(self, dtype): def test_eq_with_numpy_object(self, dtype): assert dtype != np.dtype('object') + def test_eq_with_self(self, dtype): + assert dtype == dtype + assert dtype != object() + def test_array_type(self, data, dtype): assert dtype.construct_array_type() is type(data) @@ -81,3 +85,6 @@ def test_check_dtype(self, data): index=list('ABCD')) result = df.dtypes.apply(str) == str(dtype) self.assert_series_equal(result, expected) + + def test_hashable(self, dtype): + hash(dtype) # no error diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 79e1a692f744a..a1ee3a4fefef2 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -15,15 +15,11 @@ class DecimalDtype(ExtensionDtype): type = decimal.Decimal name = 'decimal' na_value = decimal.Decimal('NaN') + _metadata = ('context',) def __init__(self, context=None): self.context = context or decimal.getcontext() - def __eq__(self, other): - if isinstance(other, type(self)): - return self.context == other.context - return super(DecimalDtype, self).__eq__(other) - def __repr__(self): return 'DecimalDtype(context={})'.format(self.context) diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index dd625d6e1eb3c..d65b4dde832e4 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -3,6 +3,7 @@ import numpy as np import pandas as pd +from pandas import compat import pandas.util.testing as tm import pytest @@ -93,7 +94,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs): class TestDtype(BaseDecimal, base.BaseDtypeTests): - pass + @pytest.mark.skipif(compat.PY2, reason="Context not hashable.") + def test_hashable(self, dtype): + pass class TestInterface(BaseDecimal, base.BaseInterfaceTests): diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 87876d84bef99..976511941042d 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -27,6 +27,7 @@ class JSONDtype(ExtensionDtype): type = compat.Mapping name = 'json' + try: na_value = collections.UserDict() except AttributeError:
Implements a default `__eq__` and `__hash__` for ExtensionDtype. Adds a test ensure that they're defined. Do people have thoughts on `_metadata`? I've tried to document everywhere the importance of it for parametrized extension dtypes. If you fail to set it correctly and use the default `__eq__` and `__hash__`, you'll end up with bizarre behavior like `Period('D') == Period("M")` being true, and having the same hash. A more heavyweight alternative is to do some metaclass trickery to inspect `ExtensionDtype.__init__` for parameters and set `_metadata` based on that, but I tend to stay away from metaclasses unless they're absolutely necessary. Closes https://github.com/pandas-dev/pandas/issues/22476
https://api.github.com/repos/pandas-dev/pandas/pulls/22996
2018-10-04T20:15:26Z
2018-10-08T21:21:08Z
2018-10-08T21:21:08Z
2018-10-08T21:21:12Z
use tm.assert_equal instead of parametrizing assert funcs
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 65e151feeba67..445f9a7e5e980 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -880,7 +880,7 @@ If your change involves checking that a warning is actually emitted, use .. code-block:: python - with tm.assert_prodcues_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning): df.some_operation() We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 3b7d6a709230b..74703e2837c4a 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -180,9 +180,9 @@ def test_to_datetime_format_weeks(self, cache): for s, format, dt in data: assert to_datetime(s, format=format, cache=cache) == dt - @pytest.mark.parametrize("box,const,assert_equal", [ - [True, pd.Index, 'assert_index_equal'], - [False, np.array, 'assert_numpy_array_equal']]) + @pytest.mark.parametrize("box,const", [ + [True, pd.Index], + [False, np.array]]) @pytest.mark.parametrize("fmt,dates,expected_dates", [ ['%Y-%m-%d %H:%M:%S %Z', ['2010-01-01 12:00:00 UTC'] * 2, @@ -215,12 +215,11 @@ def test_to_datetime_format_weeks(self, cache): pd.Timestamp('2010-01-01 12:00:00', tzinfo=pytz.FixedOffset(0))]]]) def test_to_datetime_parse_tzname_or_tzoffset(self, box, const, - assert_equal, fmt, - dates, expected_dates): + fmt, dates, expected_dates): # GH 13486 result = pd.to_datetime(dates, format=fmt, box=box) expected = const(expected_dates) - getattr(tm, assert_equal)(result, expected) + tm.assert_equal(result, expected) with pytest.raises(ValueError): pd.to_datetime(dates, format=fmt, box=box, utc=True) @@ -1049,17 +1048,16 @@ def test_to_datetime_types(self, cache): # assert result == expected @pytest.mark.parametrize('cache', [True, False]) - @pytest.mark.parametrize('box, klass, assert_method', [ - [True, Index, 'assert_index_equal'], - [False, np.array, 'assert_numpy_array_equal'] + @pytest.mark.parametrize('box, klass', [ + [True, Index], + [False, np.array] ]) - def test_to_datetime_unprocessable_input(self, cache, box, klass, - assert_method): + def test_to_datetime_unprocessable_input(self, cache, box, klass): # GH 4928 # GH 21864 result = to_datetime([1, '1'], errors='ignore', cache=cache, box=box) expected = klass(np.array([1, '1'], dtype='O')) - getattr(tm, assert_method)(result, expected) + tm.assert_equal(result, expected) pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise', cache=cache, box=box) diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index a6b217a37bd0c..bc8582d9b7d29 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -312,19 +312,16 @@ def test_nat_arithmetic_index(): tm.assert_index_equal(NaT - tdi, tdi_nat) -@pytest.mark.parametrize('box, assert_func', [ - (TimedeltaIndex, tm.assert_index_equal), - (Series, tm.assert_series_equal) -]) -def test_nat_arithmetic_td64_vector(box, assert_func): +@pytest.mark.parametrize('box', [TimedeltaIndex, Series]) +def test_nat_arithmetic_td64_vector(box): # GH#19124 vec = box(['1 day', '2 day'], dtype='timedelta64[ns]') box_nat = box([NaT, NaT], dtype='timedelta64[ns]') - assert_func(vec + NaT, box_nat) - assert_func(NaT + vec, box_nat) - assert_func(vec - NaT, box_nat) - assert_func(NaT - vec, box_nat) + tm.assert_equal(vec + NaT, box_nat) + tm.assert_equal(NaT + vec, box_nat) + tm.assert_equal(vec - NaT, box_nat) + tm.assert_equal(NaT - vec, box_nat) def test_nat_pinned_docstrings(): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index b8fabbf52159d..bda4d71d58e82 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -2516,10 +2516,8 @@ def test_onOffset(self, case): dt, expected = case assert_onOffset(SemiMonthEnd(), dt, expected) - @pytest.mark.parametrize('klass,assert_func', - [(Series, tm.assert_series_equal), - (DatetimeIndex, tm.assert_index_equal)]) - def test_vectorized_offset_addition(self, klass, assert_func): + @pytest.mark.parametrize('klass', [Series, DatetimeIndex]) + def test_vectorized_offset_addition(self, klass): s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), Timestamp('2000-02-15', tz='US/Central')], name='a') @@ -2527,8 +2525,8 @@ def test_vectorized_offset_addition(self, klass, assert_func): result2 = SemiMonthEnd() + s exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'), Timestamp('2000-02-29', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), Timestamp('2000-02-01', tz='US/Central')], name='a') @@ -2536,8 +2534,8 @@ def test_vectorized_offset_addition(self, klass, assert_func): result2 = SemiMonthEnd() + s exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), Timestamp('2000-02-15', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) class TestSemiMonthBegin(Base): @@ -2692,18 +2690,16 @@ def test_onOffset(self, case): dt, expected = case assert_onOffset(SemiMonthBegin(), dt, expected) - @pytest.mark.parametrize('klass,assert_func', - [(Series, tm.assert_series_equal), - (DatetimeIndex, tm.assert_index_equal)]) - def test_vectorized_offset_addition(self, klass, assert_func): + @pytest.mark.parametrize('klass', [Series, DatetimeIndex]) + def test_vectorized_offset_addition(self, klass): s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), Timestamp('2000-02-15', tz='US/Central')], name='a') result = s + SemiMonthBegin() result2 = SemiMonthBegin() + s exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'), Timestamp('2000-03-01', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), Timestamp('2000-02-01', tz='US/Central')], name='a') @@ -2711,8 +2707,8 @@ def test_vectorized_offset_addition(self, klass, assert_func): result2 = SemiMonthBegin() + s exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), Timestamp('2000-02-15', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) def test_Easter(): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 3db251e89842d..4e01e0feb004c 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1522,8 +1522,8 @@ def assert_equal(left, right, **kwargs): Parameters ---------- - left : Index, Series, or DataFrame - right : Index, Series, or DataFrame + left : Index, Series, DataFrame, ExtensionArray, or np.ndarray + right : Index, Series, DataFrame, ExtensionArray, or np.ndarray **kwargs """ if isinstance(left, pd.Index): @@ -1532,6 +1532,10 @@ def assert_equal(left, right, **kwargs): assert_series_equal(left, right, **kwargs) elif isinstance(left, pd.DataFrame): assert_frame_equal(left, right, **kwargs) + elif isinstance(left, ExtensionArray): + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) else: raise NotImplementedError(type(left))
https://api.github.com/repos/pandas-dev/pandas/pulls/22995
2018-10-04T19:03:07Z
2018-10-04T21:53:44Z
2018-10-04T21:53:44Z
2020-04-05T17:38:52Z
CircleCI -> Azure
diff --git a/.circleci/config.yml b/.circleci/config.yml index e947f30d285cd..5b10036818901 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,39 +1,6 @@ version: 2 jobs: - # -------------------------------------------------------------------------- - # 0. py27_compat - # -------------------------------------------------------------------------- - py27_compat: - docker: - - image: continuumio/miniconda:latest - # databases configuration - - image: circleci/postgres:9.6.5-alpine-ram - environment: - POSTGRES_USER: postgres - POSTGRES_DB: pandas_nosetest - - image: circleci/mysql:8-ram - environment: - MYSQL_USER: "root" - MYSQL_HOST: "localhost" - MYSQL_ALLOW_EMPTY_PASSWORD: "true" - MYSQL_DATABASE: "pandas_nosetest" - environment: - JOB: "2.7_COMPAT" - ENV_FILE: "ci/circle-27-compat.yaml" - LOCALE_OVERRIDE: "it_IT.UTF-8" - MINICONDA_DIR: /home/ubuntu/miniconda3 - steps: - - checkout - - run: - name: build - command: | - ./ci/install_circle.sh - ./ci/show_circle.sh - - run: - name: test - command: ./ci/run_circle.sh --skip-slow --skip-network - # -------------------------------------------------------------------------- # 1. py36_locale # -------------------------------------------------------------------------- @@ -62,86 +29,14 @@ jobs: - run: name: build command: | - ./ci/install_circle.sh - ./ci/show_circle.sh + ./ci/circle/install_circle.sh + ./ci/circle/show_circle.sh - run: name: test - command: ./ci/run_circle.sh --skip-slow --skip-network - - # -------------------------------------------------------------------------- - # 2. py36_locale_slow - # -------------------------------------------------------------------------- - py36_locale_slow: - docker: - - image: continuumio/miniconda:latest - # databases configuration - - image: circleci/postgres:9.6.5-alpine-ram - environment: - POSTGRES_USER: postgres - POSTGRES_DB: pandas_nosetest - - image: circleci/mysql:8-ram - environment: - MYSQL_USER: "root" - MYSQL_HOST: "localhost" - MYSQL_ALLOW_EMPTY_PASSWORD: "true" - MYSQL_DATABASE: "pandas_nosetest" - - environment: - JOB: "3.6_LOCALE_SLOW" - ENV_FILE: "ci/circle-36-locale_slow.yaml" - LOCALE_OVERRIDE: "zh_CN.UTF-8" - MINICONDA_DIR: /home/ubuntu/miniconda3 - steps: - - checkout - - run: - name: build - command: | - ./ci/install_circle.sh - ./ci/show_circle.sh - - run: - name: test - command: ./ci/run_circle.sh --only-slow --skip-network - - # -------------------------------------------------------------------------- - # 3. py35_ascii - # -------------------------------------------------------------------------- - py35_ascii: - docker: - - image: continuumio/miniconda:latest - # databases configuration - - image: circleci/postgres:9.6.5-alpine-ram - environment: - POSTGRES_USER: postgres - POSTGRES_DB: pandas_nosetest - - image: circleci/mysql:8-ram - environment: - MYSQL_USER: "root" - MYSQL_HOST: "localhost" - MYSQL_ALLOW_EMPTY_PASSWORD: "true" - MYSQL_DATABASE: "pandas_nosetest" - - environment: - JOB: "3.5_ASCII" - ENV_FILE: "ci/circle-35-ascii.yaml" - LOCALE_OVERRIDE: "C" - MINICONDA_DIR: /home/ubuntu/miniconda3 - steps: - - checkout - - run: - name: build - command: | - ./ci/install_circle.sh - ./ci/show_circle.sh - - run: - name: test - command: ./ci/run_circle.sh --skip-slow --skip-network - + command: ./ci/circle/run_circle.sh --skip-slow --skip-network workflows: version: 2 build_and_test: jobs: - - py27_compat - py36_locale - - py36_locale_slow - - py35_ascii diff --git a/.travis.yml b/.travis.yml index e8f7f3465bfd5..8ac4d827b0820 100644 --- a/.travis.yml +++ b/.travis.yml @@ -116,10 +116,10 @@ after_success: after_script: - echo "after_script start" - source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - - if [ -e /tmp/single.xml ]; then - ci/print_skipped.py /tmp/single.xml; + - if [ -e test-data-single.xml ]; then + ci/print_skipped.py test-data-single.xml; fi - - if [ -e /tmp/multiple.xml ]; then - ci/print_skipped.py /tmp/multiple.xml; + - if [ -e test-data-multiple.xml ]; then + ci/print_skipped.py test-data-multiple.xml; fi - echo "after_script done" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5d473bfc5a38c..373c22fdf8e62 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -7,10 +7,10 @@ jobs: parameters: name: macOS vmImage: xcode9-macos10.13 -# - template: ci/azure/linux.yml -# parameters: -# name: Linux -# vmImage: ubuntu-16.04 +- template: ci/azure/linux.yml + parameters: + name: Linux + vmImage: ubuntu-16.04 # Windows Python 2.7 needs VC 9.0 installed, and not sure # how to make that a conditional task, so for now these are diff --git a/ci/circle-27-compat.yaml b/ci/azure-27-compat.yaml similarity index 100% rename from ci/circle-27-compat.yaml rename to ci/azure-27-compat.yaml diff --git a/ci/circle-36-locale_slow.yaml b/ci/azure-36-locale_slow.yaml similarity index 100% rename from ci/circle-36-locale_slow.yaml rename to ci/azure-36-locale_slow.yaml diff --git a/ci/azure-37-locale.yaml b/ci/azure-37-locale.yaml new file mode 100644 index 0000000000000..ef97b85406709 --- /dev/null +++ b/ci/azure-37-locale.yaml @@ -0,0 +1,35 @@ +name: pandas +channels: + - defaults + - conda-forge +dependencies: + - beautifulsoup4 + - cython>=0.28.2 + - html5lib + - ipython + - jinja2 + - lxml + - matplotlib + - nomkl + - numexpr + - numpy + - openpyxl=2.5.5 + - psycopg2 + - pymysql + - pytables + - python-dateutil + - python=3.6* + - pytz + - s3fs + - scipy + - sqlalchemy + - xarray + - xlrd + - xlsxwriter + - xlwt + # universal + - pytest + - pytest-xdist + - moto + - pip: + - hypothesis>=3.58.0 diff --git a/ci/azure/linux.yml b/ci/azure/linux.yml new file mode 100644 index 0000000000000..f34cba69a6195 --- /dev/null +++ b/ci/azure/linux.yml @@ -0,0 +1,56 @@ +parameters: + name: '' + vmImage: '' + +jobs: +- job: ${{ parameters.name }} + pool: + vmImage: ${{ parameters.vmImage }} + strategy: + maxParallel: 11 + matrix: + py27_np_19: + ENV_FILE: ci/azure-27-compat.yaml + CONDA_PY: "27" + CONDA_ENV: pandas + TEST_ARGS: "--skip-slow --skip-network" + + py36_locale: + ENV_FILE: ci/azure-37-locale.yaml + CONDA_PY: "37" + CONDA_ENV: pandas + TEST_ARGS: "--skip-slow --skip-network" + LOCALE_OVERRIDE: "zh_CN.UTF-8" + + py36_locale_slow: + ENV_FILE: ci/azure-36-locale_slow.yaml + CONDA_PY: "36" + CONDA_ENV: pandas + TEST_ARGS: "--only-slow --skip-network" + + steps: + - script: | + if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386; fi + echo "Installing Miniconda"{ + ci/incremental/install_miniconda.sh + export PATH=$HOME/miniconda3/bin:$PATH + echo "Setting up Conda environment" + ci/incremental/setup_conda_environment.sh + displayName: 'Before Install' + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + ci/incremental/build.sh + displayName: 'Build' + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + ci/script_single.sh + ci/script_multi.sh + echo "[Test done]" + displayName: 'Test' + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd + - task: PublishTestResults@2 + inputs: + testResultsFiles: 'test-data-*.xml' + testRunTitle: 'Linux' \ No newline at end of file diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml index fb10d89731f26..53ce51c76683c 100644 --- a/ci/azure/macos.yml +++ b/ci/azure/macos.yml @@ -39,5 +39,5 @@ jobs: source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - task: PublishTestResults@2 inputs: - testResultsFiles: '/tmp/*.xml' + testResultsFiles: 'test-data-*.xml' testRunTitle: 'MacOS-35' diff --git a/ci/circle-35-ascii.yaml b/ci/circle-35-ascii.yaml deleted file mode 100644 index 281ed59e2deff..0000000000000 --- a/ci/circle-35-ascii.yaml +++ /dev/null @@ -1,15 +0,0 @@ -name: pandas -channels: - - defaults -dependencies: - - cython>=0.28.2 - - nomkl - - numpy - - python-dateutil - - python=3.5* - - pytz - # universal - - pytest - - pytest-xdist - - pip: - - hypothesis>=3.58.0 diff --git a/ci/install_circle.sh b/ci/circle/install_circle.sh similarity index 100% rename from ci/install_circle.sh rename to ci/circle/install_circle.sh diff --git a/ci/run_circle.sh b/ci/circle/run_circle.sh similarity index 100% rename from ci/run_circle.sh rename to ci/circle/run_circle.sh diff --git a/ci/show_circle.sh b/ci/circle/show_circle.sh similarity index 100% rename from ci/show_circle.sh rename to ci/circle/show_circle.sh diff --git a/ci/incremental/setup_conda_environment.sh b/ci/incremental/setup_conda_environment.sh index c716a39138644..f3ac99d5e7c5a 100755 --- a/ci/incremental/setup_conda_environment.sh +++ b/ci/incremental/setup_conda_environment.sh @@ -27,13 +27,17 @@ set -v # w/o removing anything else echo echo "[removing installed pandas]" -conda remove pandas -y --force -pip uninstall -y pandas +conda remove pandas -y --force || true +pip uninstall -y pandas || true echo echo "[no installed pandas]" conda list pandas +if [ -n "$LOCALE_OVERRIDE" ]; then + sudo locale-gen "$LOCALE_OVERRIDE" +fi + # # Install the compiler toolchain # if [[ $(uname) == Linux ]]; then # if [[ "$CONDA_SUBDIR" == "linux-32" || "$BITS32" == "yes" ]] ; then diff --git a/ci/script_multi.sh b/ci/script_multi.sh index dcc5a14d7b3b4..e076558e8fff3 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -27,17 +27,17 @@ if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then - echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas - pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas + echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas + pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas elif [ "$SLOW" ]; then TEST_ARGS="--only-slow --skip-network" - echo pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas - pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas + echo pytest -m "not single and slow" -v --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas + pytest -m "not single and slow" -v --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas else - echo pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas - pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas # TODO: doctest + echo pytest -n 2 -m "not single" --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas + pytest -n 2 -m "not single" --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas # TODO: doctest fi diff --git a/ci/script_single.sh b/ci/script_single.sh index 09e7446a2d876..42d326e0965ee 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -5,8 +5,9 @@ echo "[script_single]" source activate pandas if [ -n "$LOCALE_OVERRIDE" ]; then + echo "Setting LC_ALL and LANG to $LOCALE_OVERRIDE" export LC_ALL="$LOCALE_OVERRIDE"; - echo "Setting LC_ALL to $LOCALE_OVERRIDE" + export LANG="$LOCALE_OVERRIDE"; pycmd='import pandas; print("pandas detected console encoding: %s" % pandas.get_option("display.encoding"))' python -c "$pycmd" @@ -25,14 +26,13 @@ if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then - echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas - pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas - + echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=test-data-single.xml $TEST_ARGS pandas + pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=test-data-single.xml $TEST_ARGS pandas echo pytest -s --strict scripts pytest -s --strict scripts else - echo pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas - pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest + echo pytest -m "single" --junitxml=test-data-single.xml --strict $TEST_ARGS pandas + pytest -m "single" --junitxml=test-data-single.xml --strict $TEST_ARGS pandas fi diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 67b8d287d5d1a..66d545a0de6e9 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -684,7 +684,7 @@ Test-driven development/code writing ------------------------------------ *pandas* is serious about testing and strongly encourages contributors to embrace -`test-driven development (TDD) <http://en.wikipedia.org/wiki/Test-driven_development>`_. +`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_. This development process "relies on the repetition of a very short development cycle: first the developer writes an (initially failing) automated test case that defines a desired improvement or new function, then produces the minimum amount of code to pass that test."
Closes #22396 closes #22763 cc @jreback @h-vetinari @jorisvandenbossche @WillAyd The main thing we lose is redundancy on the sql tests. Those are down to just travis.
https://api.github.com/repos/pandas-dev/pandas/pulls/22992
2018-10-04T14:50:58Z
2018-10-26T12:07:44Z
2018-10-26T12:07:43Z
2018-10-26T12:08:08Z
CI: Pin IPython for doc build
diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml index 50626088d5bc4..8353659e7b9a9 100644 --- a/ci/travis-36-doc.yaml +++ b/ci/travis-36-doc.yaml @@ -12,7 +12,7 @@ dependencies: - html5lib - hypothesis>=3.58.0 - ipykernel - - ipython + - ipython==6.5.0 - ipywidgets - lxml - matplotlib
xref #22990 https://github.com/ipython/ipython/issues/11362
https://api.github.com/repos/pandas-dev/pandas/pulls/22991
2018-10-04T14:21:26Z
2018-10-04T15:15:58Z
2018-10-04T15:15:58Z
2018-10-05T04:21:17Z