title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Removed debugging code | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index a14d8e4471c23..062d1876141f8 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -253,9 +253,6 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
# local_dict={'lhs': lhs, 'rhs': rhs},
# engine=self.engine, parser=self.parser)
# except AssertionError:
- # import ipdb
- #
- # ipdb.set_trace()
# raise
else:
expected = _eval_single_bin(
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index bda486411e01e..4129184373a2a 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -865,7 +865,6 @@ class TestIndexing(object):
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
- # import pudb; pudb.set_trace()
mat = mgr.as_array()
# we maybe using an ndarray to test slicing and
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index a5ae1f6a4d960..6e88cd7f72dcd 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -638,35 +638,6 @@ def set_defaultencoding(encoding):
sys.setdefaultencoding(orig)
-# -----------------------------------------------------------------------------
-# Console debugging tools
-
-
-def debug(f, *args, **kwargs):
- from pdb import Pdb as OldPdb
- try:
- from IPython.core.debugger import Pdb
- kw = dict(color_scheme='Linux')
- except ImportError:
- Pdb = OldPdb
- kw = {}
- pdb = Pdb(**kw)
- return pdb.runcall(f, *args, **kwargs)
-
-
-def pudebug(f, *args, **kwargs):
- import pudb
- return pudb.runcall(f, *args, **kwargs)
-
-
-def set_trace():
- from IPython.core.debugger import Pdb
- try:
- Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
- except Exception:
- from pdb import Pdb as OldPdb
- OldPdb().set_trace(sys._getframe().f_back)
-
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
| I randomly came across this code when messing around with some Mypy configurations. It looks like this code depends on third party packages which we don't even use in the code base. They also haven't been touched in 7-8 years so I assume this is dead code.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25647 | 2019-03-11T02:29:58Z | 2019-03-11T12:10:55Z | 2019-03-11T12:10:55Z | 2019-03-11T17:38:09Z |
CLN: Remove Panel benchmarks | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 57ba9cd80e55c..b8e983c60b8b5 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -2,7 +2,7 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (Series, DataFrame, Panel, MultiIndex,
+from pandas import (Series, DataFrame, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
@@ -277,18 +277,6 @@ def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
-class PanelIndexing(object):
-
- def setup(self):
- with warnings.catch_warnings(record=True):
- self.p = Panel(np.random.randn(100, 100, 100))
- self.inds = range(0, 100, 10)
-
- def time_subset(self):
- with warnings.catch_warnings(record=True):
- self.p.ix[(self.inds, self.inds, self.inds)]
-
-
class MethodLookup(object):
def setup_cache(self):
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 6da8287a06d80..baad8b61bfd19 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -1,9 +1,8 @@
-import warnings
import string
import numpy as np
import pandas.util.testing as tm
-from pandas import (DataFrame, Series, Panel, MultiIndex,
+from pandas import (DataFrame, Series, MultiIndex,
date_range, concat, merge, merge_asof)
try:
@@ -66,31 +65,6 @@ def time_concat_mixed_ndims(self, axis):
concat(self.mixed_ndims, axis=axis)
-class ConcatPanels(object):
-
- params = ([0, 1, 2], [True, False])
- param_names = ['axis', 'ignore_index']
-
- def setup(self, axis, ignore_index):
- with warnings.catch_warnings(record=True):
- panel_c = Panel(np.zeros((10000, 200, 2),
- dtype=np.float32,
- order='C'))
- self.panels_c = [panel_c] * 20
- panel_f = Panel(np.zeros((10000, 200, 2),
- dtype=np.float32,
- order='F'))
- self.panels_f = [panel_f] * 20
-
- def time_c_ordered(self, axis, ignore_index):
- with warnings.catch_warnings(record=True):
- concat(self.panels_c, axis=axis, ignore_index=ignore_index)
-
- def time_f_ordered(self, axis, ignore_index):
- with warnings.catch_warnings(record=True):
- concat(self.panels_f, axis=axis, ignore_index=ignore_index)
-
-
class ConcatDataFrames(object):
params = ([0, 1], [True, False])
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
deleted file mode 100644
index 627705284481b..0000000000000
--- a/asv_bench/benchmarks/panel_ctor.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import warnings
-from datetime import datetime, timedelta
-
-from pandas import DataFrame, Panel, date_range
-
-
-class DifferentIndexes(object):
- def setup(self):
- self.data_frames = {}
- start = datetime(1990, 1, 1)
- end = datetime(2012, 1, 1)
- for x in range(100):
- end += timedelta(days=1)
- idx = date_range(start, end)
- df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
- self.data_frames[x] = df
-
- def time_from_dict(self):
- with warnings.catch_warnings(record=True):
- Panel.from_dict(self.data_frames)
-
-
-class SameIndexes(object):
-
- def setup(self):
- idx = date_range(start=datetime(1990, 1, 1),
- end=datetime(2012, 1, 1),
- freq='D')
- df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
- self.data_frames = dict(enumerate([df] * 100))
-
- def time_from_dict(self):
- with warnings.catch_warnings(record=True):
- Panel.from_dict(self.data_frames)
-
-
-class TwoIndexes(object):
-
- def setup(self):
- start = datetime(1990, 1, 1)
- end = datetime(2012, 1, 1)
- df1 = DataFrame({'a': 0, 'b': 1, 'c': 2},
- index=date_range(start=start, end=end, freq='D'))
- end += timedelta(days=1)
- df2 = DataFrame({'a': 0, 'b': 1, 'c': 2},
- index=date_range(start=start, end=end, freq='D'))
- dfs = [df1] * 50 + [df2] * 50
- self.data_frames = dict(enumerate(dfs))
-
- def time_from_dict(self):
- with warnings.catch_warnings(record=True):
- Panel.from_dict(self.data_frames)
-
-
-from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
deleted file mode 100644
index a4c12c082236e..0000000000000
--- a/asv_bench/benchmarks/panel_methods.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import warnings
-
-import numpy as np
-from pandas import Panel
-
-
-class PanelMethods(object):
-
- params = ['items', 'major', 'minor']
- param_names = ['axis']
-
- def setup(self, axis):
- with warnings.catch_warnings(record=True):
- self.panel = Panel(np.random.randn(100, 1000, 100))
-
- def time_pct_change(self, axis):
- with warnings.catch_warnings(record=True):
- self.panel.pct_change(1, axis=axis)
-
- def time_shift(self, axis):
- with warnings.catch_warnings(record=True):
- self.panel.shift(1, axis=axis)
-
-
-from .pandas_vb_common import setup # noqa: F401
| xref #25632
| https://api.github.com/repos/pandas-dev/pandas/pulls/25646 | 2019-03-11T02:21:29Z | 2019-03-14T16:50:05Z | 2019-03-14T16:50:05Z | 2019-03-14T16:50:09Z |
backport of 25643, 25638 on 0.24.x | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b97f5e0b6edf9..40a942c96ea2b 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -11,7 +11,7 @@
import pytest
from pandas.compat import (
- PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
+ PY2, PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
@@ -20,7 +20,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
- compat, date_range, isna)
+ _np_version_under1p13, compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -682,6 +682,8 @@ def test_constructor_ndarray(self):
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
@@ -698,6 +700,8 @@ def test_constructor_maskedarray(self):
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
@@ -765,6 +769,8 @@ def test_constructor_maskedarray_nonfloat(self):
assert frame['A'][1] is True
assert frame['C'][2] is False
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()
@@ -787,6 +793,8 @@ def test_constructor_maskedarray_hardened(self):
dtype=float)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedrecarray_dtype(self):
# Ensure constructor honors dtype
data = np.ma.array(
@@ -798,6 +806,8 @@ def test_constructor_maskedrecarray_dtype(self):
columns=['date', 'price'])
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
| xref #25630
PR #25643
PR #25638 | https://api.github.com/repos/pandas-dev/pandas/pulls/25645 | 2019-03-10T23:11:20Z | 2019-03-11T00:24:54Z | 2019-03-11T00:24:54Z | 2019-03-11T00:24:54Z |
BUG: Fix error in replace with strings that are large numbers (#25616) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index c07959c758780..5b5c9c78d10da 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -33,6 +33,7 @@ Fixed Regressions
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
- Fixed regression in :func:`to_timedelta` losing precision when converting floating data to ``Timedelta`` data (:issue:`25077`).
- Fixed pip installing from source into an environment without NumPy (:issue:`25193`)
+- Fixed regression in :meth:`DataFrame.replace` where large strings of numbers would be coerced into ``int64``, causing an ``OverflowError`` (:issue:`25616`)
- Fixed regression in :func:`factorize` when passing a custom ``na_sentinel`` value with ``sort=True`` (:issue:`25409`).
- Fixed regression in :meth:`DataFrame.to_csv` writing duplicate line endings with gzip compress (:issue:`25311`)
@@ -90,6 +91,7 @@ A total of 25 people contributed patches to this release. People with a "+" by t
* Joris Van den Bossche
* Josh
* Justin Zheng
+* Kendall Masse
* Matthew Roeschke
* Max Bolingbroke +
* rbenes +
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index ada663556899b..0375f782badcc 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1079,7 +1079,7 @@ def coerce_to_target_dtype(self, other):
try:
return self.astype(dtype)
- except (ValueError, TypeError):
+ except (ValueError, TypeError, OverflowError):
pass
return self.astype(object)
@@ -3210,7 +3210,7 @@ def _putmask_smart(v, m, n):
nv = v.copy()
nv[m] = nn_at
return nv
- except (ValueError, IndexError, TypeError):
+ except (ValueError, IndexError, TypeError, OverflowError):
pass
n = np.asarray(n)
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 40b28047080da..2e7b746f6c9f2 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -280,3 +280,17 @@ def test_replace_mixed_types_with_string(self):
result = s.replace([2, '4'], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
+
+ def test_replace_with_no_overflowerror(self):
+ # GH 25616
+ # casts to object without Exception from OverflowError
+ s = pd.Series([0, 1, 2, 3, 4])
+ result = s.replace([3], ['100000000000000000000'])
+ expected = pd.Series([0, 1, 2, '100000000000000000000', 4])
+ tm.assert_series_equal(result, expected)
+
+ s = pd.Series([0, '100000000000000000000',
+ '100000000000000000001'])
+ result = s.replace(['100000000000000000000'], [1])
+ expected = pd.Series([0, 1, '100000000000000000001'])
+ tm.assert_series_equal(result, expected)
| - [x] closes #25616
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
See discussion in #25616.
When `.replace` saw a value that looks like an int, it would try to convert it even if it caused an `OverflowError`. This issue is only happening in newer versions of pandas due to the addition of `coerce_to_target_dtype` in `_replace_coerce`. `coerce_to_target_dtype` is required to fix a lot of other issues, so the fix here was to prevent a coercion to an int that would cause an OverflowError by catching that exception, allowing the values to remain as objects.
I tried to play around with `coerce_to_target_dtype` as well (moving it until after the replace, only doing it when covert is True, etc.) but this caused various other coercion and replace tests to fail, so I left that untouched.
Tests have been added for both cases where I found `OverflowError` could occur with `replace`. | https://api.github.com/repos/pandas-dev/pandas/pulls/25644 | 2019-03-10T22:35:40Z | 2019-03-12T20:45:24Z | 2019-03-12T20:45:24Z | 2019-03-12T20:48:28Z |
TST: xref #25630 | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 92ce6369a5109..1d5cbfec8de52 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -684,7 +684,7 @@ def test_constructor_ndarray(self):
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
- @pytest.mark.skipif(PY2 & _np_version_under1p13,
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
reason="old numpy & py2")
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
@@ -702,6 +702,8 @@ def test_constructor_maskedarray(self):
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
@@ -769,6 +771,8 @@ def test_constructor_maskedarray_nonfloat(self):
assert frame['A'][1] is True
assert frame['C'][2] is False
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()
@@ -791,6 +795,8 @@ def test_constructor_maskedarray_hardened(self):
dtype=float)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedrecarray_dtype(self):
# Ensure constructor honors dtype
data = np.ma.array(
@@ -802,6 +808,8 @@ def test_constructor_maskedrecarray_dtype(self):
columns=['date', 'price'])
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2 and _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
| skip on PY2 & old numpy for masked arrays
| https://api.github.com/repos/pandas-dev/pandas/pulls/25643 | 2019-03-10T21:23:59Z | 2019-03-10T22:57:27Z | 2019-03-10T22:57:27Z | 2019-03-11T19:19:43Z |
Fixturize tests/frame/test_mutate_columns.py | diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 6bef7e3f65b21..211173371ac7e 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -8,14 +8,13 @@
from pandas.compat import PY36, lrange, range
from pandas import DataFrame, Index, MultiIndex, Series
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
# Column add, remove, delete.
-class TestDataFrameMutateColumns(TestData):
+class TestDataFrameMutateColumns():
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
@@ -193,9 +192,9 @@ def test_insert(self):
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
- def test_delitem(self):
- del self.frame['A']
- assert 'A' not in self.frame
+ def test_delitem(self, float_frame):
+ del float_frame['A']
+ assert 'A' not in float_frame
def test_delitem_multiindex(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
@@ -223,16 +222,16 @@ def test_delitem_multiindex(self):
with pytest.raises(KeyError):
del df['A']
- def test_pop(self):
- self.frame.columns.name = 'baz'
+ def test_pop(self, float_frame):
+ float_frame.columns.name = 'baz'
- self.frame.pop('A')
- assert 'A' not in self.frame
+ float_frame.pop('A')
+ assert 'A' not in float_frame
- self.frame['foo'] = 'bar'
- self.frame.pop('foo')
- assert 'foo' not in self.frame
- assert self.frame.columns.name == 'baz'
+ float_frame['foo'] = 'bar'
+ float_frame.pop('foo')
+ assert 'foo' not in float_frame
+ assert float_frame.columns.name == 'baz'
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
| One more steps towards #22471. | https://api.github.com/repos/pandas-dev/pandas/pulls/25642 | 2019-03-10T19:08:26Z | 2019-03-10T21:54:55Z | 2019-03-10T21:54:55Z | 2019-03-11T06:48:06Z |
Fixturize tests/frame/test_operators.py | diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index e9521fa1506af..9707ae80e6812 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -13,7 +13,7 @@
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, compat
import pandas.core.common as com
-from pandas.tests.frame.common import TestData, _check_mixed_float
+from pandas.tests.frame.common import _check_mixed_float
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_numpy_array_equal, assert_series_equal)
@@ -207,7 +207,7 @@ def test_logical_with_nas(self):
assert_series_equal(result, expected)
-class TestDataFrameOperators(TestData):
+class TestDataFrameOperators(object):
@pytest.mark.parametrize('op', [operator.add, operator.sub,
operator.mul, operator.truediv])
@@ -238,9 +238,9 @@ def test_operators_none_as_na(self, op):
('__ne__', True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
- def test_logical_typeerror_with_non_valid(self, op, res):
+ def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
- result = getattr(self.frame, op)('foo')
+ result = getattr(float_frame, op)('foo')
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
@@ -318,16 +318,17 @@ def test_dti_tz_convert_to_utc(self):
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
- def test_combineFrame(self):
- frame_copy = self.frame.reindex(self.frame.index[::2])
+ def test_combineFrame(self, float_frame, mixed_float_frame,
+ mixed_int_frame):
+ frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = np.nan
- added = self.frame + frame_copy
+ added = float_frame + frame_copy
indexer = added['A'].dropna().index
- exp = (self.frame['A'] * 2).copy()
+ exp = (float_frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
@@ -340,95 +341,94 @@ def test_combineFrame(self):
assert np.isnan(added['D']).all()
- self_added = self.frame + self.frame
- tm.assert_index_equal(self_added.index, self.frame.index)
+ self_added = float_frame + float_frame
+ tm.assert_index_equal(self_added.index, float_frame.index)
- added_rev = frame_copy + self.frame
+ added_rev = frame_copy + float_frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
- plus_empty = self.frame + self.empty
+ plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
- empty_plus = self.empty + self.frame
+ empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
- empty_empty = self.empty + self.empty
+ empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
- reverse = self.frame.reindex(columns=self.frame.columns[::-1])
+ reverse = float_frame.reindex(columns=float_frame.columns[::-1])
- assert_frame_equal(reverse + self.frame, self.frame * 2)
+ assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
- added = self.frame + self.mixed_float
+ added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype='float64')
- added = self.mixed_float + self.frame
+ added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
- added = self.mixed_float + self.mixed_float2
- _check_mixed_float(added, dtype=dict(C=None))
- added = self.mixed_float2 + self.mixed_float
+ added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
- added = self.frame + self.mixed_int
+ added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype='float64')
- def test_combineSeries(self):
+ def test_combineSeries(self, float_frame, mixed_float_frame,
+ mixed_int_frame, datetime_frame):
# Series
- series = self.frame.xs(self.frame.index[0])
+ series = float_frame.xs(float_frame.index[0])
- added = self.frame + series
+ added = float_frame + series
for key, s in compat.iteritems(added):
- assert_series_equal(s, self.frame[key] + series[key])
+ assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
- larger_added = self.frame + larger_series
+ larger_added = float_frame + larger_series
- for key, s in compat.iteritems(self.frame):
+ for key, s in compat.iteritems(float_frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
- added = self.mixed_float + series
+ added = mixed_float_frame + series
_check_mixed_float(added)
# vs mix (upcast) as needed
- added = self.mixed_float + series.astype('float32')
+ added = mixed_float_frame + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
- added = self.mixed_float + series.astype('float16')
+ added = mixed_float_frame + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
- # added = self.mixed_int + (100*series).astype('int64')
+ # added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
- # added = self.mixed_int + (100*series).astype('int32')
+ # added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
- ts = self.tsframe['A']
+ ts = datetime_frame['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
- added = self.tsframe.add(ts, axis='index')
+ added = datetime_frame.add(ts, axis='index')
- for key, col in compat.iteritems(self.tsframe):
+ for key, col in compat.iteritems(datetime_frame):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
@@ -437,52 +437,52 @@ def test_combineSeries(self):
else:
assert result.name is None
- smaller_frame = self.tsframe[:-5]
+ smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
- tm.assert_index_equal(smaller_added.index, self.tsframe.index)
+ tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
- smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
+ smaller_added2 = datetime_frame.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
- result = self.tsframe.add(ts[:0], axis='index')
- expected = DataFrame(np.nan, index=self.tsframe.index,
- columns=self.tsframe.columns)
+ result = datetime_frame.add(ts[:0], axis='index')
+ expected = DataFrame(np.nan, index=datetime_frame.index,
+ columns=datetime_frame.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
- result = self.tsframe[:0].add(ts, axis='index')
- expected = DataFrame(np.nan, index=self.tsframe.index,
- columns=self.tsframe.columns)
+ result = datetime_frame[:0].add(ts, axis='index')
+ expected = DataFrame(np.nan, index=datetime_frame.index,
+ columns=datetime_frame.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
- frame = self.tsframe[:1].reindex(columns=[])
+ frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
- def test_combineFunc(self):
- result = self.frame * 2
- tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
+ def test_combineFunc(self, float_frame, mixed_float_frame):
+ result = float_frame * 2
+ tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
- result = self.mixed_float * 2
+ result = mixed_float_frame * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
- s.values, self.mixed_float[c].values * 2)
+ s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
- result = self.empty * 2
- assert result.index is self.empty.index
+ result = DataFrame() * 2
+ assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
- def test_comparisons(self):
+ def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
- row = self.simple.xs('a')
+ row = simple_frame.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
@@ -493,17 +493,17 @@ def test_comp(func):
with pytest.raises(ValueError, match='dim must be <= 2'):
func(df1, ndim_5)
- result2 = func(self.simple, row)
+ result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(result2.values,
- func(self.simple.values, row.values))
+ func(simple_frame.values, row.values))
- result3 = func(self.frame, 0)
+ result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values,
- func(self.frame.values, 0))
+ func(float_frame.values, 0))
msg = 'Can only compare identically-labeled DataFrame'
with pytest.raises(ValueError, match=msg):
- func(self.simple, self.simple[:2])
+ func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
@@ -599,9 +599,9 @@ def test_boolean_comparison(self):
with pytest.raises(ValueError, match=msg1d):
result = df == tup
- def test_combine_generic(self):
- df1 = self.frame
- df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
+ def test_combine_generic(self, float_frame):
+ df1 = float_frame
+ df2 = float_frame.loc[float_frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
@@ -611,8 +611,8 @@ def test_combine_generic(self):
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
- exp = self.frame.loc[self.frame.index[:-5],
- ['A', 'B', 'C']].reindex_like(chunk) * 2
+ exp = float_frame.loc[float_frame.index[:-5],
+ ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
| One more steps towards #22471. | https://api.github.com/repos/pandas-dev/pandas/pulls/25641 | 2019-03-10T19:07:47Z | 2019-03-14T15:52:01Z | 2019-03-14T15:52:01Z | 2019-03-14T19:32:28Z |
TST: Fixturize tests/frame/test_missing.py | diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 189531c7b4459..94be24710362a 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -14,7 +14,7 @@
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
-from pandas.tests.frame.common import TestData, _check_mixed_float
+from pandas.tests.frame.common import _check_mixed_float
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -34,15 +34,15 @@ def _skip_if_no_pchip():
pytest.skip('scipy.interpolate.pchip missing')
-class TestDataFrameMissingData(TestData):
+class TestDataFrameMissingData():
- def test_dropEmptyRows(self):
- N = len(self.frame.index)
+ def test_dropEmptyRows(self, float_frame):
+ N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
- frame = DataFrame({'foo': mat}, index=self.frame.index)
- original = Series(mat, index=self.frame.index, name='foo')
+ frame = DataFrame({'foo': mat}, index=float_frame.index)
+ original = Series(mat, index=float_frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
@@ -58,21 +58,21 @@ def test_dropEmptyRows(self):
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
- def test_dropIncompleteRows(self):
- N = len(self.frame.index)
+ def test_dropIncompleteRows(self, float_frame):
+ N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
- frame = DataFrame({'foo': mat}, index=self.frame.index)
+ frame = DataFrame({'foo': mat}, index=float_frame.index)
frame['bar'] = 5
- original = Series(mat, index=self.frame.index, name='foo')
+ original = Series(mat, index=float_frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
- exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
+ exp = Series(mat[5:], index=float_frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
@@ -80,8 +80,8 @@ def test_dropIncompleteRows(self):
assert_series_equal(frame['foo'], original)
assert (frame['bar'] == 5).all()
inp_frame2.dropna(subset=['bar'], inplace=True)
- tm.assert_index_equal(samesize_frame.index, self.frame.index)
- tm.assert_index_equal(inp_frame2.index, self.frame.index)
+ tm.assert_index_equal(samesize_frame.index, float_frame.index)
+ tm.assert_index_equal(inp_frame2.index, float_frame.index)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_dropna(self):
@@ -160,17 +160,17 @@ def test_drop_and_dropna_caching(self):
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
- def test_dropna_corner(self):
+ def test_dropna_corner(self, float_frame):
# bad input
msg = "invalid how option: foo"
with pytest.raises(ValueError, match=msg):
- self.frame.dropna(how='foo')
+ float_frame.dropna(how='foo')
msg = "must specify how or thresh"
with pytest.raises(TypeError, match=msg):
- self.frame.dropna(how=None)
+ float_frame.dropna(how=None)
# non-existent column - 8303
with pytest.raises(KeyError, match=r"^\['X'\]$"):
- self.frame.dropna(subset=['A', 'X'])
+ float_frame.dropna(subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
@@ -215,35 +215,39 @@ def test_dropna_tz_aware_datetime(self):
index=[0, 3])
assert_frame_equal(result, expected)
- def test_fillna(self):
- tf = self.tsframe
+ def test_fillna_datetime(self, datetime_frame):
+ tf = datetime_frame
tf.loc[tf.index[:5], 'A'] = np.nan
tf.loc[tf.index[-5:], 'A'] = np.nan
- zero_filled = self.tsframe.fillna(0)
+ zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all()
- padded = self.tsframe.fillna(method='pad')
+ padded = datetime_frame.fillna(method='pad')
assert np.isnan(padded.loc[padded.index[:5], 'A']).all()
assert (padded.loc[padded.index[-5:], 'A'] ==
padded.loc[padded.index[-5], 'A']).all()
- # mixed type
- mf = self.mixed_frame
- mf.loc[mf.index[5:20], 'foo'] = np.nan
- mf.loc[mf.index[-10:], 'A'] = np.nan
- result = self.mixed_frame.fillna(value=0)
- result = self.mixed_frame.fillna(method='pad')
-
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
- self.tsframe.fillna()
+ datetime_frame.fillna()
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
- self.tsframe.fillna(5, method='ffill')
+ datetime_frame.fillna(5, method='ffill')
+
+ def test_fillna_mixed_type(self, float_string_frame):
+
+ mf = float_string_frame
+ mf.loc[mf.index[5:20], 'foo'] = np.nan
+ mf.loc[mf.index[-10:], 'A'] = np.nan
+ # TODO: make stronger assertion here, GH 25640
+ mf.fillna(value=0)
+ mf.fillna(method='pad')
+
+ def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
- mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
+ mf = mixed_float_frame.reindex(columns=['A', 'B', 'D'])
mf.loc[mf.index[-10:], 'A'] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
@@ -251,6 +255,7 @@ def test_fillna(self):
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
+ def test_fillna_other(self):
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
@@ -464,19 +469,19 @@ def test_fillna_datetime_columns(self):
index=pd.date_range('20130110', periods=3))
tm.assert_frame_equal(result, expected)
- def test_ffill(self):
- self.tsframe['A'][:5] = np.nan
- self.tsframe['A'][-5:] = np.nan
+ def test_ffill(self, datetime_frame):
+ datetime_frame['A'][:5] = np.nan
+ datetime_frame['A'][-5:] = np.nan
- assert_frame_equal(self.tsframe.ffill(),
- self.tsframe.fillna(method='ffill'))
+ assert_frame_equal(datetime_frame.ffill(),
+ datetime_frame.fillna(method='ffill'))
- def test_bfill(self):
- self.tsframe['A'][:5] = np.nan
- self.tsframe['A'][-5:] = np.nan
+ def test_bfill(self, datetime_frame):
+ datetime_frame['A'][:5] = np.nan
+ datetime_frame['A'][-5:] = np.nan
- assert_frame_equal(self.tsframe.bfill(),
- self.tsframe.fillna(method='bfill'))
+ assert_frame_equal(datetime_frame.bfill(),
+ datetime_frame.fillna(method='bfill'))
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
@@ -602,24 +607,24 @@ def test_fillna_columns(self):
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
- def test_fillna_invalid_method(self):
+ def test_fillna_invalid_method(self, float_frame):
with pytest.raises(ValueError, match='ffil'):
- self.frame.fillna(method='ffil')
+ float_frame.fillna(method='ffil')
- def test_fillna_invalid_value(self):
+ def test_fillna_invalid_value(self, float_frame):
# list
msg = ("\"value\" parameter must be a scalar or dict, but you passed"
" a \"{}\"")
with pytest.raises(TypeError, match=msg.format('list')):
- self.frame.fillna([1, 2])
+ float_frame.fillna([1, 2])
# tuple
with pytest.raises(TypeError, match=msg.format('tuple')):
- self.frame.fillna((1, 2))
+ float_frame.fillna((1, 2))
# frame with series
msg = ("\"value\" parameter must be a scalar, dict or Series, but you"
" passed a \"DataFrame\"")
with pytest.raises(TypeError, match=msg):
- self.frame.iloc[:, 0].fillna(self.frame)
+ float_frame.iloc[:, 0].fillna(float_frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
@@ -628,16 +633,16 @@ def test_fillna_col_reordering(self):
filled = df.fillna(method='ffill')
assert df.columns.tolist() == filled.columns.tolist()
- def test_fill_corner(self):
- mf = self.mixed_frame
+ def test_fill_corner(self, float_frame, float_string_frame):
+ mf = float_string_frame
mf.loc[mf.index[5:20], 'foo'] = np.nan
mf.loc[mf.index[-10:], 'A'] = np.nan
- filled = self.mixed_frame.fillna(value=0)
+ filled = float_string_frame.fillna(value=0)
assert (filled.loc[filled.index[5:20], 'foo'] == 0).all()
- del self.mixed_frame['foo']
+ del float_string_frame['foo']
- empty_float = self.frame.reindex(columns=[])
+ empty_float = float_frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
@@ -652,7 +657,7 @@ def test_fill_value_when_combine_const(self):
assert_frame_equal(res, exp)
-class TestDataFrameInterpolate(TestData):
+class TestDataFrameInterpolate():
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
| One more steps towards #22471. | https://api.github.com/repos/pandas-dev/pandas/pulls/25640 | 2019-03-10T18:39:48Z | 2019-03-19T04:21:55Z | 2019-03-19T04:21:55Z | 2019-03-19T06:34:25Z |
Fixturize tests/frame/test_join.py | diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
index 0508658766cd3..2c9fde652493d 100644
--- a/pandas/tests/frame/test_join.py
+++ b/pandas/tests/frame/test_join.py
@@ -4,7 +4,6 @@
import pytest
from pandas import DataFrame, Index, period_range
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -16,11 +15,6 @@ def frame_with_period_index():
index=period_range(start='2000', freq='A', periods=4))
-@pytest.fixture
-def frame():
- return TestData().frame
-
-
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@@ -63,11 +57,11 @@ def test_join(left, right, how, sort, expected):
tm.assert_frame_equal(result, expected)
-def test_join_index(frame):
+def test_join_index(float_frame):
# left / right
- f = frame.loc[frame.index[:10], ['A', 'B']]
- f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
+ f = float_frame.loc[float_frame.index[:10], ['A', 'B']]
+ f2 = float_frame.loc[float_frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
@@ -91,7 +85,7 @@ def test_join_index(frame):
# outer
joined = f.join(f2, how='outer')
- tm.assert_index_equal(joined.index, frame.index.sort_values())
+ tm.assert_index_equal(joined.index, float_frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
with pytest.raises(ValueError, match='join method'):
@@ -101,16 +95,16 @@ def test_join_index(frame):
msg = 'columns overlap but no suffix'
for how in ('outer', 'left', 'inner'):
with pytest.raises(ValueError, match=msg):
- frame.join(frame, how=how)
+ float_frame.join(float_frame, how=how)
-def test_join_index_more(frame):
- af = frame.loc[:, ['A', 'B']]
- bf = frame.loc[::2, ['C', 'D']]
+def test_join_index_more(float_frame):
+ af = float_frame.loc[:, ['A', 'B']]
+ bf = float_frame.loc[::2, ['C', 'D']]
expected = af.copy()
- expected['C'] = frame['C'][::2]
- expected['D'] = frame['D'][::2]
+ expected['C'] = float_frame['C'][::2]
+ expected['D'] = float_frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
@@ -122,28 +116,28 @@ def test_join_index_more(frame):
tm.assert_frame_equal(result, expected.loc[:, result.columns])
-def test_join_index_series(frame):
- df = frame.copy()
- s = df.pop(frame.columns[-1])
+def test_join_index_series(float_frame):
+ df = float_frame.copy()
+ s = df.pop(float_frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
- tm.assert_frame_equal(joined, frame, check_names=False)
+ tm.assert_frame_equal(joined, float_frame, check_names=False)
s.name = None
with pytest.raises(ValueError, match='must have a name'):
df.join(s)
-def test_join_overlap(frame):
- df1 = frame.loc[:, ['A', 'B', 'C']]
- df2 = frame.loc[:, ['B', 'C', 'D']]
+def test_join_overlap(float_frame):
+ df1 = float_frame.loc[:, ['A', 'B', 'C']]
+ df2 = float_frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
- no_overlap = frame.loc[:, ['A', 'D']]
+ no_overlap = float_frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
| One more steps towards #22471. | https://api.github.com/repos/pandas-dev/pandas/pulls/25639 | 2019-03-10T18:38:44Z | 2019-03-10T21:49:39Z | 2019-03-10T21:49:39Z | 2019-03-11T06:35:23Z |
Backport PR #25631 on branch 0.24.x (TST: failing wheel building on PY2 and old numpy) | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b97f5e0b6edf9..e1394af0cc022 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -20,7 +20,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
- compat, date_range, isna)
+ _np_version_under1p13, compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -682,6 +682,8 @@ def test_constructor_ndarray(self):
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
+ @pytest.mark.skipif(PY2 & _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
| Backport PR #25631: TST: failing wheel building on PY2 and old numpy | https://api.github.com/repos/pandas-dev/pandas/pulls/25638 | 2019-03-10T17:26:59Z | 2019-03-10T22:59:32Z | null | 2019-03-10T22:59:32Z |
TST: tests for maybe_promote (precursor to #23982) | diff --git a/pandas/conftest.py b/pandas/conftest.py
index c4285e9db038a..4bcd0ea8442e6 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -399,6 +399,10 @@ def tz_aware_fixture(request):
return request.param
+# Generate cartesian product of tz_aware_fixture:
+tz_aware_fixture2 = tz_aware_fixture
+
+
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
@@ -438,6 +442,46 @@ def string_dtype(request):
return request.param
+@pytest.fixture(params=BYTES_DTYPES)
+def bytes_dtype(request):
+ """Parametrized fixture for bytes dtypes.
+
+ * bytes
+ * 'bytes'
+ """
+ return request.param
+
+
+@pytest.fixture(params=OBJECT_DTYPES)
+def object_dtype(request):
+ """Parametrized fixture for object dtypes.
+
+ * object
+ * 'object'
+ """
+ return request.param
+
+
+@pytest.fixture(params=DATETIME64_DTYPES)
+def datetime64_dtype(request):
+ """Parametrized fixture for datetime64 dtypes.
+
+ * 'datetime64[ns]'
+ * 'M8[ns]'
+ """
+ return request.param
+
+
+@pytest.fixture(params=TIMEDELTA64_DTYPES)
+def timedelta64_dtype(request):
+ """Parametrized fixture for timedelta64 dtypes.
+
+ * 'timedelta64[ns]'
+ * 'm8[ns]'
+ """
+ return request.param
+
+
@pytest.fixture(params=FLOAT_DTYPES)
def float_dtype(request):
"""
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
new file mode 100644
index 0000000000000..5a5b5d47b3ccc
--- /dev/null
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -0,0 +1,677 @@
+"""
+These test the method maybe_promote from core/dtypes/cast.py
+"""
+
+import datetime
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import NaT, iNaT
+from pandas.compat import is_platform_windows
+
+from pandas.core.dtypes.cast import maybe_promote
+from pandas.core.dtypes.common import (
+ is_complex_dtype, is_datetime64_dtype, is_datetime_or_timedelta_dtype,
+ is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar,
+ is_string_dtype, is_timedelta64_dtype)
+from pandas.core.dtypes.dtypes import DatetimeTZDtype, PandasExtensionDtype
+
+import pandas as pd
+
+
+@pytest.fixture(params=[bool, 'uint8', 'int32', 'uint64', 'float32', 'float64',
+ 'complex64', 'complex128', 'M8[ns]', 'm8[ns]', str,
+ bytes, object])
+def any_numpy_dtype_reduced(request):
+ """
+ Parameterized fixture for numpy dtypes, reduced from any_numpy_dtype.
+
+ * bool
+ * 'int32'
+ * 'uint64'
+ * 'float32'
+ * 'float64'
+ * 'complex64'
+ * 'complex128'
+ * 'M8[ns]'
+ * 'M8[ns]'
+ * str
+ * bytes
+ * object
+ """
+ return request.param
+
+
+@pytest.fixture(params=[(True, None), (True, object), (False, None)],
+ ids=['True-None', 'True-object', 'False-None'])
+def box(request):
+ """
+ Parametrized fixture determining whether/how to transform fill_value.
+
+ Since fill_value is defined on a per-test basis, the actual transformation
+ (based on this fixture) is executed in _check_promote.
+
+ Returns
+ -------
+ boxed : Boolean
+ Whether fill_value should be wrapped in an np.array.
+ box_dtype : dtype
+ The dtype to pass to np.array([fill_value], dtype=box_dtype). If None,
+ then this is passed on unmodified, and corresponds to the numpy default
+ dtype for the given fill_value.
+
+ * (True, None) # fill_value wrapped in array with default dtype
+ * (True, object) # fill_value wrapped in array with object dtype
+ * (False, None) # fill_value passed on as scalar
+ """
+ return request.param
+
+
+def _safe_dtype_assert(left_dtype, right_dtype):
+ """
+ Compare two dtypes without raising TypeError.
+ """
+ if isinstance(right_dtype, PandasExtensionDtype):
+ # switch order of equality check because numpy dtypes (e.g. if
+ # left_dtype is np.object_) do not know some expected dtypes (e.g.
+ # DatetimeTZDtype) and would raise a TypeError in their __eq__-method.
+ assert right_dtype == left_dtype
+ else:
+ assert left_dtype == right_dtype
+
+
+def _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar=None, exp_val_for_array=None):
+ """
+ Auxiliary function to unify testing of scalar/array promotion.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The value to pass on as the first argument to maybe_promote.
+ fill_value : scalar
+ The value to pass on as the second argument to maybe_promote, either as
+ a scalar, or boxed into an array (depending on the parameter `boxed`).
+ boxed : Boolean
+ Parameter whether fill_value should be passed to maybe_promote
+ directly, or wrapped in an array (of dtype box_dtype).
+ box_dtype : dtype
+ The dtype to enforce when wrapping fill_value into an np.array.
+ expected_dtype : dtype
+ The expected dtype returned by maybe_promote (by design this is the
+ same regardless of whether fill_value was passed as a scalar or in an
+ array!).
+ exp_val_for_scalar : scalar
+ The expected value for the (potentially upcast) fill_value returned by
+ maybe_promote.
+ exp_val_for_array : scalar
+ The expected missing value marker for the expected_dtype (which is
+ returned by maybe_promote when it receives an array).
+ """
+ assert is_scalar(fill_value)
+
+ if boxed:
+ # in this case, we pass on fill_value wrapped in an array of specified
+ # box_dtype; the expected value returned from maybe_promote is the
+ # missing value marker for the returned dtype.
+ fill_array = np.array([fill_value], dtype=box_dtype)
+ result_dtype, result_fill_value = maybe_promote(dtype, fill_array)
+ expected_fill_value = exp_val_for_array
+ else:
+ # here, we pass on fill_value as a scalar directly; the expected value
+ # returned from maybe_promote is fill_value, potentially upcast to the
+ # returned dtype.
+ result_dtype, result_fill_value = maybe_promote(dtype, fill_value)
+ expected_fill_value = exp_val_for_scalar
+
+ _safe_dtype_assert(result_dtype, expected_dtype)
+
+ # for equal values, also check type (relevant e.g. for int vs float, resp.
+ # for different datetimes and timedeltas)
+ match_value = (result_fill_value == expected_fill_value
+ # disabled type check due to too many xfails; GH 23982/25425
+ # and type(result_fill_value) == type(expected_fill_value)
+ )
+
+ # for missing values, None == None and iNaT == iNaT (which is checked
+ # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
+ match_missing = ((result_fill_value is np.nan
+ and expected_fill_value is np.nan)
+ or (result_fill_value is NaT
+ and expected_fill_value is NaT))
+
+ assert match_value or match_missing
+
+
+def test_maybe_promote_int_with_int():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(True, None), (False, None)])
+def test_maybe_promote_int_with_float(any_int_dtype, float_dtype, box):
+ dtype = np.dtype(any_int_dtype)
+ fill_dtype = np.dtype(float_dtype)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if float_dtype == 'float32' and not boxed:
+ pytest.xfail('falsely upcasts to float64')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling int with float always upcasts to float64
+ expected_dtype = np.float64
+ # fill_value can be different float type
+ exp_val_for_scalar = np.float64(fill_value)
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(True, None), (False, None)])
+def test_maybe_promote_float_with_int(float_dtype, any_int_dtype, box):
+
+ dtype = np.dtype(float_dtype)
+ fill_dtype = np.dtype(any_int_dtype)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling float with int always keeps float dtype
+ # because: np.finfo('float32').max > np.iinfo('uint64').max
+ expected_dtype = dtype
+ # output is not a generic float, but corresponds to expected_dtype
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_float_with_float():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced, box):
+ dtype = np.dtype(bool)
+ fill_dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if boxed and fill_dtype == bool:
+ pytest.xfail('falsely upcasts to object')
+ if (boxed and box_dtype is None
+ and is_datetime_or_timedelta_dtype(fill_dtype)):
+ pytest.xfail('wrongly casts fill_value')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling bool with anything but bool casts to object
+ expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan if fill_dtype != bool else None
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ fill_value = True
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if boxed and dtype == bool:
+ pytest.xfail('falsely upcasts to object')
+ if boxed and dtype not in (str, object) and box_dtype is None:
+ pytest.xfail('falsely upcasts to object')
+ if not boxed and is_datetime_or_timedelta_dtype(dtype):
+ pytest.xfail('raises error')
+
+ # filling anything but bool with bool casts to object
+ expected_dtype = np.dtype(object) if dtype != bool else dtype
+ # output is not a generic bool, but corresponds to expected_dtype
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
+ exp_val_for_array = np.nan if dtype != bool else None
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_bytes_with_any():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+def test_maybe_promote_any_with_bytes():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+def test_maybe_promote_datetime64_with_any():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+# override parametrization of box to add special case for dt_dtype
+@pytest.mark.parametrize('box', [
+ (True, None), # fill_value wrapped in array with default dtype
+ # disabled due to too many xfails; see GH 23982 / 25425
+ # (True, 'dt_dtype'), # fill_value in array with explicit datetime dtype
+ # (True, object), # fill_value wrapped in array with object dtype
+ (False, None) # fill_value passed on as scalar
+])
+@pytest.mark.parametrize('fill_value', [
+ pd.Timestamp('now'), np.datetime64('now'),
+ datetime.datetime.now(), datetime.date.today()
+], ids=['pd.Timestamp', 'np.datetime64', 'datetime.datetime', 'datetime.date'])
+def test_maybe_promote_any_with_datetime64(any_numpy_dtype_reduced,
+ datetime64_dtype, fill_value, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if is_datetime64_dtype(dtype):
+ if (boxed and (box_dtype == object
+ or (box_dtype is None
+ and not is_datetime64_dtype(type(fill_value))))):
+ pytest.xfail('falsely upcasts to object')
+ else:
+ if (boxed and (box_dtype == 'dt_dtype'
+ or (box_dtype is None
+ and is_datetime64_dtype(type(fill_value))))):
+ pytest.xfail('mix of lack of upcasting, resp. wrong missing value')
+ if not boxed and is_timedelta64_dtype(dtype):
+ pytest.xfail('raises error')
+
+ # special case for box_dtype
+ box_dtype = (np.dtype(datetime64_dtype) if box_dtype == 'dt_dtype'
+ else box_dtype)
+
+ # filling datetime with anything but datetime casts to object
+ if is_datetime64_dtype(dtype):
+ expected_dtype = dtype
+ # for datetime dtypes, scalar values get cast to pd.Timestamp.value
+ exp_val_for_scalar = pd.Timestamp(fill_value).value
+ exp_val_for_array = iNaT
+ else:
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(True, object)])
+def test_maybe_promote_datetimetz_with_any_numpy_dtype(
+ tz_aware_fixture, any_numpy_dtype_reduced, box):
+ dtype = DatetimeTZDtype(tz=tz_aware_fixture)
+ fill_dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if box_dtype != object:
+ pytest.xfail('does not upcast correctly')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling datetimetz with any numpy dtype casts to object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(True, None), (True, object)])
+def test_maybe_promote_datetimetz_with_datetimetz(tz_aware_fixture,
+ tz_aware_fixture2, box):
+ dtype = DatetimeTZDtype(tz=tz_aware_fixture)
+ fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture2)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ from dateutil.tz import tzlocal
+ if is_platform_windows() and tz_aware_fixture2 == tzlocal():
+ pytest.xfail('Cannot process fill_value with this dtype, see GH 24310')
+ if dtype.tz == fill_dtype.tz and boxed:
+ pytest.xfail('falsely upcasts')
+ if dtype.tz != fill_dtype.tz and not boxed:
+ pytest.xfail('falsely upcasts')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0]
+
+ # filling datetimetz with datetimetz casts to object, unless tz matches
+ exp_val_for_scalar = fill_value
+ if dtype.tz == fill_dtype.tz:
+ expected_dtype = dtype
+ exp_val_for_array = NaT
+ else:
+ expected_dtype = np.dtype(object)
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+@pytest.mark.parametrize('fill_value', [None, np.nan, NaT, iNaT],
+ ids=['None', 'np.nan', 'pd.NaT', 'iNaT'])
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(False, None)])
+def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value, box):
+
+ dtype = DatetimeTZDtype(tz=tz_aware_fixture)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if (boxed and (box_dtype == object
+ or (box_dtype is None
+ and (fill_value is None or fill_value is NaT)))):
+ pytest.xfail('false upcasts to object')
+ # takes the opinion that DatetimeTZ should have single na-marker
+ # using iNaT would lead to errors elsewhere -> NaT
+ if not boxed and fill_value == iNaT:
+ pytest.xfail('wrong missing value marker')
+
+ expected_dtype = dtype
+ # DatetimeTZDtype does not use iNaT as missing value marker
+ exp_val_for_scalar = NaT
+ exp_val_for_array = NaT
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+@pytest.mark.parametrize('fill_value', [
+ pd.Timestamp('now'), np.datetime64('now'),
+ datetime.datetime.now(), datetime.date.today()
+], ids=['pd.Timestamp', 'np.datetime64', 'datetime.datetime', 'datetime.date'])
+def test_maybe_promote_any_numpy_dtype_with_datetimetz(
+ any_numpy_dtype_reduced, tz_aware_fixture, fill_value, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if is_datetime_or_timedelta_dtype(dtype) and not boxed:
+ pytest.xfail('raises error')
+
+ fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
+
+ # filling any numpy dtype with datetimetz casts to object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_timedelta64_with_any():
+ # placeholder due to too many xfails; see GH 23982 / 25425
+ pass
+
+
+@pytest.mark.parametrize('fill_value', [
+ pd.Timedelta(days=1), np.timedelta64(24, 'h'), datetime.timedelta(1)
+], ids=['pd.Timedelta', 'np.timedelta64', 'datetime.timedelta'])
+# override parametrization of box to add special case for td_dtype
+@pytest.mark.parametrize('box', [
+ (True, None), # fill_value wrapped in array with default dtype
+ # disabled due to too many xfails; see GH 23982 / 25425
+ # (True, 'td_dtype'), # fill_value in array with explicit timedelta dtype
+ (True, object), # fill_value wrapped in array with object dtype
+ (False, None) # fill_value passed on as scalar
+])
+def test_maybe_promote_any_with_timedelta64(
+ any_numpy_dtype_reduced, timedelta64_dtype, fill_value, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if is_timedelta64_dtype(dtype):
+ if (boxed and (box_dtype == object
+ or (box_dtype is None
+ and not is_timedelta64_dtype(type(fill_value))))):
+ pytest.xfail('falsely upcasts to object')
+ else:
+ if (boxed and box_dtype is None
+ and is_timedelta64_dtype(type(fill_value))):
+ pytest.xfail('does not upcast correctly')
+ if (not boxed and is_timedelta64_dtype(type(fill_value)) and (
+ is_integer_dtype(dtype) or is_float_dtype(dtype)
+ or is_complex_dtype(dtype)
+ or issubclass(dtype.type, np.bytes_))):
+ pytest.xfail('does not upcast correctly')
+ if box_dtype == 'td_dtype':
+ pytest.xfail('falsely upcasts')
+ if not boxed and is_datetime64_dtype(dtype):
+ pytest.xfail('raises error')
+
+ # special case for box_dtype
+ box_dtype = (np.dtype(timedelta64_dtype) if box_dtype == 'td_dtype'
+ else box_dtype)
+
+ # filling anything but timedelta with timedelta casts to object
+ if is_timedelta64_dtype(dtype):
+ expected_dtype = dtype
+ # for timedelta dtypes, scalar values get cast to pd.Timedelta.value
+ exp_val_for_scalar = pd.Timedelta(fill_value).value
+ exp_val_for_array = iNaT
+ else:
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_string_with_any(string_dtype,
+ any_numpy_dtype_reduced, box):
+ dtype = np.dtype(string_dtype)
+ fill_dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if (boxed and box_dtype is None
+ and is_datetime_or_timedelta_dtype(fill_dtype)):
+ pytest.xfail('wrong missing value marker')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling string with anything casts to object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+# override parametrization of box to add special case for str
+@pytest.mark.parametrize('box', [
+ # disabled due to too many xfails; see GH 23982 / 25425
+ # (True, None), # fill_value wrapped in array with default dtype
+ # (True, 'str'), # fill_value wrapped in array with generic string-dtype
+ (True, object), # fill_value wrapped in array with object dtype
+ (False, None) # fill_value passed on as scalar
+])
+def test_maybe_promote_any_with_string(any_numpy_dtype_reduced,
+ string_dtype, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ fill_dtype = np.dtype(string_dtype)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if is_datetime_or_timedelta_dtype(dtype) and box_dtype != object:
+ pytest.xfail('does not upcast or raises')
+ if (boxed and box_dtype in (None, 'str') and (
+ is_integer_dtype(dtype) or is_float_dtype(dtype)
+ or is_complex_dtype(dtype)
+ or issubclass(dtype.type, np.bytes_))):
+ pytest.xfail('does not upcast correctly')
+
+ # create array of given dtype
+ fill_value = 'abc'
+
+ # special case for box_dtype (cannot use fixture in parametrization)
+ box_dtype = fill_dtype if box_dtype == 'str' else box_dtype
+
+ # filling anything with a string casts to object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_object_with_any(object_dtype,
+ any_numpy_dtype_reduced, box):
+ dtype = np.dtype(object_dtype)
+ fill_dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if (boxed and box_dtype is None
+ and is_datetime_or_timedelta_dtype(fill_dtype)):
+ pytest.xfail('wrong missing value marker')
+
+ # create array of given dtype; casts "1" to correct dtype
+ fill_value = np.array([1], dtype=fill_dtype)[0]
+
+ # filling object with anything stays object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+def test_maybe_promote_any_with_object(any_numpy_dtype_reduced,
+ object_dtype, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if not boxed and is_datetime_or_timedelta_dtype(dtype):
+ pytest.xfail('raises error')
+
+ # create array of object dtype from a scalar value (i.e. passing
+ # dtypes.common.is_scalar), which can however not be cast to int/float etc.
+ fill_value = pd.DateOffset(1)
+
+ # filling object with anything stays object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+@pytest.mark.parametrize('fill_value', [None, np.nan, NaT, iNaT],
+ ids=['None', 'np.nan', 'pd.NaT', 'iNaT'])
+# override parametrization due to to many xfails; see GH 23982 / 25425
+@pytest.mark.parametrize('box', [(False, None)])
+def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced,
+ fill_value, box):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+ boxed, box_dtype = box # read from parametrized fixture
+
+ if (dtype == bytes and not boxed
+ and fill_value is not None and fill_value is not NaT):
+ pytest.xfail('does not upcast to object')
+ elif dtype == 'uint64' and not boxed and fill_value == iNaT:
+ pytest.xfail('does not upcast correctly')
+ elif is_datetime_or_timedelta_dtype(dtype) and boxed:
+ pytest.xfail('falsely upcasts to object')
+ elif (boxed and (is_integer_dtype(dtype) or is_float_dtype(dtype)
+ or is_complex_dtype(dtype))
+ and fill_value is not NaT and dtype != 'uint64'):
+ pytest.xfail('falsely upcasts to object')
+ elif (boxed and dtype == 'uint64'
+ and (fill_value is np.nan or fill_value is None)):
+ pytest.xfail('falsely upcasts to object')
+ # below: opinionated that iNaT should be interpreted as missing value
+ elif (not boxed and (is_float_dtype(dtype) or is_complex_dtype(dtype))
+ and fill_value == iNaT):
+ pytest.xfail('does not cast to missing value marker correctly')
+ elif ((is_string_dtype(dtype) or dtype == bool)
+ and not boxed and fill_value == iNaT):
+ pytest.xfail('does not cast to missing value marker correctly')
+
+ if is_integer_dtype(dtype) and dtype == 'uint64' and fill_value == iNaT:
+ # uint64 + negative int casts to object; iNaT is considered as missing
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = np.nan
+ elif is_integer_dtype(dtype) and fill_value == iNaT:
+ # other integer + iNaT casts to int64
+ expected_dtype = np.int64
+ exp_val_for_scalar = iNaT
+ elif is_integer_dtype(dtype) and fill_value is not NaT:
+ # integer + other missing value (np.nan / None) casts to float
+ expected_dtype = np.float64
+ exp_val_for_scalar = np.nan
+ elif is_object_dtype(dtype) and (fill_value == iNaT or fill_value is NaT):
+ # inserting into object does not cast the value
+ # but *does* cast None to np.nan
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = fill_value
+ elif is_datetime_or_timedelta_dtype(dtype):
+ # datetime / timedelta cast all missing values to iNaT
+ expected_dtype = dtype
+ exp_val_for_scalar = iNaT
+ elif fill_value is NaT:
+ # NaT upcasts everything that's not datetime/timedelta to object
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = NaT
+ elif is_float_dtype(dtype) or is_complex_dtype(dtype):
+ # float / complex + missing value (!= NaT) stays the same
+ expected_dtype = dtype
+ exp_val_for_scalar = np.nan
+ else:
+ # all other cases cast to object, and use np.nan as missing value
+ expected_dtype = np.dtype(object)
+ exp_val_for_scalar = np.nan
+
+ # array case has same expected_dtype; but returns corresponding na-marker
+ if is_integer_dtype(expected_dtype):
+ # integers cannot hold NaNs; maybe_promote_with_array returns None
+ exp_val_for_array = None
+ elif is_datetime_or_timedelta_dtype(expected_dtype):
+ exp_val_for_array = iNaT
+ else: # expected_dtype = float / complex / object
+ exp_val_for_array = np.nan
+
+ _check_promote(dtype, fill_value, boxed, box_dtype, expected_dtype,
+ exp_val_for_scalar, exp_val_for_array)
+
+
+@pytest.mark.parametrize('dim', [0, 2, 3])
+def test_maybe_promote_dimensions(any_numpy_dtype_reduced, dim):
+ dtype = np.dtype(any_numpy_dtype_reduced)
+
+ # create 0-dim array of given dtype; casts "1" to correct dtype
+ fill_array = np.array(1, dtype=dtype)
+
+ # expand to desired dimension:
+ for _ in range(dim):
+ fill_array = np.expand_dims(fill_array, 0)
+
+ # test against 1-dimensional case
+ expected_dtype, expected_missing_value = maybe_promote(
+ dtype, np.array([1], dtype=dtype))
+
+ result_dtype, result_missing_value = maybe_promote(dtype, fill_array)
+
+ assert result_dtype == expected_dtype
+ # None == None, iNaT == iNaT, but np.nan != np.nan
+ assert ((result_missing_value == expected_missing_value)
+ or (result_missing_value is np.nan
+ and expected_missing_value is np.nan))
| First step towards #23833, resp. precursor to #23982.
TL;DR: `maybe_promote` is quite broken. #23982 tries to come up with tests that it should pass, and #25425 tries to fix the implementation. However, #23982 is quite big, so @jreback asked for a smaller version that (mostly) just tests existing behaviour. | https://api.github.com/repos/pandas-dev/pandas/pulls/25637 | 2019-03-10T17:26:32Z | 2019-06-21T15:52:25Z | 2019-06-21T15:52:25Z | 2019-06-23T17:30:49Z |
Fixturize tests/frame/test_dtypes.py | diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index 61a8ea0c384ba..3232c400bd8ce 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -231,6 +231,18 @@ def mixed_int_frame():
return df
+@pytest.fixture
+def mixed_type_frame():
+ """
+ Fixture for DataFrame of float/int/string columns with RangeIndex
+ Columns are ['a', 'b', 'c', 'float32', 'int32'].
+ """
+ return DataFrame({'a': 1., 'b': 2, 'c': 'foo',
+ 'float32': np.array([1.] * 10, dtype='float32'),
+ 'int32': np.array([1] * 10, dtype='int32')},
+ index=np.arange(10))
+
+
@pytest.fixture
def timezone_frame():
"""
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 7ed601e4f7046..f68770d796292 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -11,13 +11,19 @@
Categorical, DataFrame, Series, Timedelta, Timestamp,
_np_version_under1p14, concat, date_range, option_context)
from pandas.core.arrays import integer_array
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_series_equal, makeCustomDataframe as mkdf)
-class TestDataFrameDataTypes(TestData):
+def _check_cast(df, v):
+ """
+ Check if all dtypes of df are equal to v
+ """
+ assert all(s.dtype.name == v for _, s in df.items())
+
+
+class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
@@ -400,10 +406,10 @@ def test_select_dtypes_typecodes(self):
FLOAT_TYPES = list(np.typecodes['AllFloat'])
assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
- def test_dtypes_gh8722(self):
- self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
- result = self.mixed_frame.dtypes
- expected = Series({k: v.dtype for k, v in self.mixed_frame.items()},
+ def test_dtypes_gh8722(self, float_string_frame):
+ float_string_frame['bool'] = float_string_frame['A'] > 0
+ result = float_string_frame.dtypes
+ expected = Series({k: v.dtype for k, v in float_string_frame.items()},
index=result.index)
assert_series_equal(result, expected)
@@ -413,8 +419,8 @@ def test_dtypes_gh8722(self):
result = df.dtypes
assert_series_equal(result, Series({0: np.dtype('int64')}))
- def test_ftypes(self):
- frame = self.mixed_float
+ def test_ftypes(self, mixed_float_frame):
+ frame = mixed_float_frame
expected = Series(dict(A='float32:dense',
B='float32:dense',
C='float16:dense',
@@ -425,32 +431,39 @@ def test_ftypes(self):
result = frame.ftypes.sort_values()
assert_series_equal(result, expected)
- def test_astype(self):
- casted = self.frame.astype(int)
- expected = DataFrame(self.frame.values.astype(int),
- index=self.frame.index,
- columns=self.frame.columns)
+ def test_astype_float(self, float_frame):
+ casted = float_frame.astype(int)
+ expected = DataFrame(float_frame.values.astype(int),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
- casted = self.frame.astype(np.int32)
- expected = DataFrame(self.frame.values.astype(np.int32),
- index=self.frame.index,
- columns=self.frame.columns)
+ casted = float_frame.astype(np.int32)
+ expected = DataFrame(float_frame.values.astype(np.int32),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
- self.frame['foo'] = '5'
- casted = self.frame.astype(int)
- expected = DataFrame(self.frame.values.astype(int),
- index=self.frame.index,
- columns=self.frame.columns)
+ float_frame['foo'] = '5'
+ casted = float_frame.astype(int)
+ expected = DataFrame(float_frame.values.astype(int),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
+ def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
- def _check_cast(df, v):
- assert (list({s.dtype.name for
- _, s in df.items()})[0] == v)
+ casted = mixed_float_frame.reindex(
+ columns=['A', 'B']).astype('float32')
+ _check_cast(casted, 'float32')
+
+ casted = mixed_float_frame.reindex(
+ columns=['A', 'B']).astype('float16')
+ _check_cast(casted, 'float16')
- mn = self.all_mixed._get_numeric_data().copy()
+ def test_astype_mixed_type(self, mixed_type_frame):
+ # mixed casting
+ mn = mixed_type_frame._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
mn['big_float'] = np.array(123456789101112., dtype='float64')
@@ -460,15 +473,9 @@ def _check_cast(df, v):
casted = mn.astype('int64')
_check_cast(casted, 'int64')
- casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float32')
- _check_cast(casted, 'float32')
-
casted = mn.reindex(columns=['little_float']).astype('float16')
_check_cast(casted, 'float16')
- casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float16')
- _check_cast(casted, 'float16')
-
casted = mn.astype('float32')
_check_cast(casted, 'float32')
@@ -479,39 +486,40 @@ def _check_cast(df, v):
casted = mn.astype('O')
_check_cast(casted, 'object')
- def test_astype_with_exclude_string(self):
- df = self.frame.copy()
- expected = self.frame.astype(int)
+ def test_astype_with_exclude_string(self, float_frame):
+ df = float_frame.copy()
+ expected = float_frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
- df = self.frame.copy()
- expected = self.frame.astype(np.int32)
+ df = float_frame.copy()
+ expected = float_frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
- def test_astype_with_view(self):
-
- tf = self.mixed_float.reindex(columns=['A', 'B', 'C'])
-
- casted = tf.astype(np.int64)
-
- casted = tf.astype(np.float32)
+ def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
- tf = np.round(self.frame).astype(np.int32)
+ tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
- tf = self.frame.astype(np.float64)
+ tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
+ def test_astype_with_view_mixed_float(self, mixed_float_frame):
+
+ tf = mixed_float_frame.reindex(columns=['A', 'B', 'C'])
+
+ casted = tf.astype(np.int64)
+ casted = tf.astype(np.float32) # noqa
+
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
@@ -927,12 +935,12 @@ def test_asarray_homogenous(self):
tm.assert_numpy_array_equal(result, expected)
-class TestDataFrameDatetimeWithTZ(TestData):
+class TestDataFrameDatetimeWithTZ:
- def test_interleave(self):
+ def test_interleave(self, timezone_frame):
# interleave with object
- result = self.tzframe.assign(D='foo').values
+ result = timezone_frame.assign(D='foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
@@ -948,7 +956,7 @@ def test_interleave(self):
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
- result = self.tzframe.values
+ result = timezone_frame.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
@@ -963,7 +971,7 @@ def test_interleave(self):
tz='CET')]], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
- def test_astype(self):
+ def test_astype(self, timezone_frame):
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
@@ -979,12 +987,12 @@ def test_astype(self):
tz='CET')]],
dtype=object).T
expected = DataFrame(expected,
- index=self.tzframe.index,
- columns=self.tzframe.columns, dtype=object)
- result = self.tzframe.astype(object)
+ index=timezone_frame.index,
+ columns=timezone_frame.columns, dtype=object)
+ result = timezone_frame.astype(object)
assert_frame_equal(result, expected)
- result = self.tzframe.astype('datetime64[ns]')
+ result = timezone_frame.astype('datetime64[ns]')
expected = DataFrame({'A': date_range('20130101', periods=3),
'B': (date_range('20130101', periods=3,
tz='US/Eastern')
@@ -998,19 +1006,19 @@ def test_astype(self):
expected.iloc[1, 2] = pd.NaT
assert_frame_equal(result, expected)
- def test_astype_str(self):
+ def test_astype_str(self, timezone_frame):
# str formatting
- result = self.tzframe.astype(str)
+ result = timezone_frame.astype(str)
expected = DataFrame([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']],
- columns=self.tzframe.columns)
+ columns=timezone_frame.columns)
tm.assert_frame_equal(result, expected)
with option_context('display.max_columns', 20):
- result = str(self.tzframe)
+ result = str(timezone_frame)
assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
'2013-01-01 00:00:00+01:00') in result
assert ('1 2013-01-02 '
| One more steps towards #22471. Again, needing to re-add some fixtures that were removed in #24885. | https://api.github.com/repos/pandas-dev/pandas/pulls/25636 | 2019-03-10T17:13:51Z | 2019-06-28T15:15:03Z | 2019-06-28T15:15:03Z | 2019-06-28T17:03:11Z |
Fixturize tests/frame/test_constructors.py | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 434ee2f8bf0af..981dc8b32b8cc 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -17,7 +17,6 @@
from pandas import (
Categorical, DataFrame, Index, MultiIndex, RangeIndex, Series, Timedelta,
Timestamp, date_range, isna)
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
@@ -25,7 +24,7 @@
'int32', 'int64']
-class TestDataFrameConstructors(TestData):
+class TestDataFrameConstructors:
@pytest.mark.parametrize('constructor', [
lambda: DataFrame(),
@@ -60,14 +59,14 @@ def test_emptylike_constructor(
result = DataFrame(emptylike)
tm.assert_frame_equal(result, expected)
- def test_constructor_mixed(self):
+ def test_constructor_mixed(self, float_string_frame):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
- assert self.mixed_frame['foo'].dtype == np.object_
+ assert float_string_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
@@ -181,11 +180,11 @@ def test_constructor_dtype_str_na_values(self, string_dtype):
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
- def test_constructor_rec(self):
- rec = self.frame.to_records(index=False)
+ def test_constructor_rec(self, float_frame):
+ rec = float_frame.to_records(index=False)
rec.dtype.names = list(rec.dtype.names)[::-1]
- index = self.frame.index
+ index = float_frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
@@ -244,24 +243,29 @@ def test_constructor_ordereddict(self):
assert expected == list(df.columns)
def test_constructor_dict(self):
- frame = DataFrame({'col1': self.ts1,
- 'col2': self.ts2})
+ datetime_series = tm.makeTimeSeries(nper=30)
+ # test expects index shifted by 5
+ datetime_series_short = tm.makeTimeSeries(nper=30)[5:]
+
+ frame = DataFrame({'col1': datetime_series,
+ 'col2': datetime_series_short})
# col2 is padded with NaN
- assert len(self.ts1) == 30
- assert len(self.ts2) == 25
+ assert len(datetime_series) == 30
+ assert len(datetime_series_short) == 25
- tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
+ tm.assert_series_equal(frame['col1'], datetime_series.rename('col1'))
- exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
- index=self.ts1.index, name='col2')
+ exp = pd.Series(np.concatenate([[np.nan] * 5,
+ datetime_series_short.values]),
+ index=datetime_series.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
- frame = DataFrame({'col1': self.ts1,
- 'col2': self.ts2},
+ frame = DataFrame({'col1': datetime_series,
+ 'col2': datetime_series_short},
columns=['col2', 'col3', 'col4'])
- assert len(frame) == len(self.ts2)
+ assert len(frame) == len(datetime_series_short)
assert 'col1' not in frame
assert isna(frame['col3']).all()
@@ -361,18 +365,24 @@ def test_constructor_dict_nan_tuple_key(self, value):
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
+ datetime_series = tm.makeTimeSeries(nper=30)
+ datetime_series_short = tm.makeTimeSeries(nper=25)
+
# GH19018
# initialization ordering: by insertion order if python>= 3.6
- d = {'b': self.ts2, 'a': self.ts1}
+ d = {'b': datetime_series_short, 'a': datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
+ datetime_series = tm.makeTimeSeries(nper=30)
+ datetime_series_short = tm.makeTimeSeries(nper=25)
+
# GH19018
# initialization ordering: by value if python<3.6
- d = {'b': self.ts2, 'a': self.ts1}
+ d = {'b': datetime_series_short, 'a': datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
@@ -462,7 +472,7 @@ def test_constructor_with_embedded_frames(self):
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
- def test_constructor_subclass_dict(self):
+ def test_constructor_subclass_dict(self, float_frame):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
@@ -478,13 +488,13 @@ def test_constructor_subclass_dict(self):
# try with defaultdict
from collections import defaultdict
data = {}
- self.frame['B'][:10] = np.nan
- for k, v in self.frame.items():
+ float_frame['B'][:10] = np.nan
+ for k, v in float_frame.items():
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
- tm.assert_frame_equal(self.frame.sort_index(), frame)
+ tm.assert_frame_equal(float_frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
@@ -923,14 +933,14 @@ def test_constructor_arrays_and_scalars(self):
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
- def test_constructor_DataFrame(self):
- df = DataFrame(self.frame)
- tm.assert_frame_equal(df, self.frame)
+ def test_constructor_DataFrame(self, float_frame):
+ df = DataFrame(float_frame)
+ tm.assert_frame_equal(df, float_frame)
- df_casted = DataFrame(self.frame, dtype=np.int64)
+ df_casted = DataFrame(float_frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
- def test_constructor_more(self):
+ def test_constructor_more(self, float_frame):
# used to be in test_matrix.py
arr = np.random.randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
@@ -956,8 +966,8 @@ def test_constructor_more(self):
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
- dm = DataFrame(DataFrame(self.frame._series))
- tm.assert_frame_equal(dm, self.frame)
+ dm = DataFrame(DataFrame(float_frame._series))
+ tm.assert_frame_equal(dm, float_frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
@@ -1223,8 +1233,9 @@ def test_constructor_scalar(self):
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
- def test_constructor_Series_copy_bug(self):
- df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
+ def test_constructor_Series_copy_bug(self, float_frame):
+ df = DataFrame(float_frame['A'], index=float_frame.index,
+ columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
@@ -1286,10 +1297,10 @@ def test_constructor_list_of_namedtuples(self):
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
- def test_constructor_orient(self):
- data_dict = self.mixed_frame.T._series
+ def test_constructor_orient(self, float_string_frame):
+ data_dict = float_string_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
- expected = self.mixed_frame.sort_index()
+ expected = float_string_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
@@ -1393,38 +1404,38 @@ def test_constructor_Series_differently_indexed(self):
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
- def test_constructor_manager_resize(self):
- index = list(self.frame.index[:5])
- columns = list(self.frame.columns[:3])
+ def test_constructor_manager_resize(self, float_frame):
+ index = list(float_frame.index[:5])
+ columns = list(float_frame.columns[:3])
- result = DataFrame(self.frame._data, index=index,
+ result = DataFrame(float_frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
- def test_constructor_from_items(self):
- items = [(c, self.frame[c]) for c in self.frame.columns]
+ def test_constructor_from_items(self, float_frame, float_string_frame):
+ items = [(c, float_frame[c]) for c in float_frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
- tm.assert_frame_equal(recons, self.frame)
+ tm.assert_frame_equal(recons, float_frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
- tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
+ tm.assert_frame_equal(recons, float_frame.loc[:, ['C', 'B', 'A']])
# orient='index'
- row_items = [(idx, self.mixed_frame.xs(idx))
- for idx in self.mixed_frame.index]
+ row_items = [(idx, float_string_frame.xs(idx))
+ for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
+ columns=float_string_frame.columns,
orient='index')
- tm.assert_frame_equal(recons, self.mixed_frame)
+ tm.assert_frame_equal(recons, float_string_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
@@ -1435,16 +1446,16 @@ def test_constructor_from_items(self):
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
- [('bar', 'baz')] * len(self.mixed_frame))
- self.mixed_frame['foo'] = arr
- row_items = [(idx, list(self.mixed_frame.xs(idx)))
- for idx in self.mixed_frame.index]
+ [('bar', 'baz')] * len(float_string_frame))
+ float_string_frame['foo'] = arr
+ row_items = [(idx, list(float_string_frame.xs(idx)))
+ for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
+ columns=float_string_frame.columns,
orient='index')
- tm.assert_frame_equal(recons, self.mixed_frame)
+ tm.assert_frame_equal(recons, float_string_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
@@ -1485,14 +1496,15 @@ def test_from_items_deprecation(self):
columns=['col1', 'col2', 'col3'],
orient='index')
- def test_constructor_mix_series_nonseries(self):
- df = DataFrame({'A': self.frame['A'],
- 'B': list(self.frame['B'])}, columns=['A', 'B'])
- tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
+ def test_constructor_mix_series_nonseries(self, float_frame):
+ df = DataFrame({'A': float_frame['A'],
+ 'B': list(float_frame['B'])}, columns=['A', 'B'])
+ tm.assert_frame_equal(df, float_frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
- DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
+ DataFrame({'A': float_frame['A'],
+ 'B': list(float_frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
@@ -1752,24 +1764,24 @@ def test_constructor_for_list_with_dtypes(self):
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
- def test_constructor_frame_copy(self):
- cop = DataFrame(self.frame, copy=True)
+ def test_constructor_frame_copy(self, float_frame):
+ cop = DataFrame(float_frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
- assert not (self.frame['A'] == 5).all()
+ assert not (float_frame['A'] == 5).all()
- def test_constructor_ndarray_copy(self):
- df = DataFrame(self.frame.values)
+ def test_constructor_ndarray_copy(self, float_frame):
+ df = DataFrame(float_frame.values)
- self.frame.values[5] = 5
+ float_frame.values[5] = 5
assert (df.values[5] == 5).all()
- df = DataFrame(self.frame.values, copy=True)
- self.frame.values[6] = 6
+ df = DataFrame(float_frame.values, copy=True)
+ float_frame.values[6] = 6
assert not (df.values[6] == 6).all()
- def test_constructor_series_copy(self):
- series = self.frame._series
+ def test_constructor_series_copy(self, float_frame):
+ series = float_frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
@@ -2318,7 +2330,7 @@ class List(list):
tm.assert_frame_equal(result, expected)
-class TestDataFrameConstructorWithDatetimeTZ(TestData):
+class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
| One more steps towards #22471. Again, needing to re-add some fixtures that were removed in #24885. | https://api.github.com/repos/pandas-dev/pandas/pulls/25635 | 2019-03-10T17:11:33Z | 2019-06-28T15:15:59Z | 2019-06-28T15:15:59Z | 2019-06-28T17:03:04Z |
Fixturize tests/frame/test_combine_concat.py | diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index c2364dc135a9a..c803d15a690c4 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -11,12 +11,11 @@
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestDataFrameConcatCommon(TestData):
+class TestDataFrameConcatCommon():
def test_concat_multiple_frames_dtypes(self):
@@ -515,7 +514,7 @@ def test_concat_astype_dup_col(self):
tm.assert_frame_equal(result, expected)
-class TestDataFrameCombineFirst(TestData):
+class TestDataFrameCombineFirst():
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=lrange(2))
@@ -531,22 +530,22 @@ def test_combine_first_mixed(self):
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
- def test_combine_first(self):
+ def test_combine_first(self, float_frame):
# disjoint
- head, tail = self.frame[:5], self.frame[5:]
+ head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
- reordered_frame = self.frame.reindex(combined.index)
+ reordered_frame = float_frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
- assert tm.equalContents(combined.columns, self.frame.columns)
+ assert tm.equalContents(combined.columns, float_frame.columns)
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
- fcopy = self.frame.copy()
+ fcopy = float_frame.copy()
fcopy['A'] = 1
del fcopy['C']
- fcopy2 = self.frame.copy()
+ fcopy2 = float_frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
@@ -570,20 +569,20 @@ def test_combine_first(self):
assert (combined['A'][:10] == 0).all()
# no overlap
- f = self.frame[:10]
- g = self.frame[10:]
+ f = float_frame[:10]
+ g = float_frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
- comb = self.frame.combine_first(self.empty)
- assert_frame_equal(comb, self.frame)
+ comb = float_frame.combine_first(DataFrame({}))
+ assert_frame_equal(comb, float_frame)
- comb = self.empty.combine_first(self.frame)
- assert_frame_equal(comb, self.frame)
+ comb = DataFrame({}).combine_first(float_frame)
+ assert_frame_equal(comb, float_frame)
- comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
+ comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
@@ -850,7 +849,7 @@ def test_concat_datetime_datetime64_frame(self):
pd.concat([df1, df2_obj])
-class TestDataFrameUpdate(TestData):
+class TestDataFrameUpdate():
def test_update_nan(self):
# #15593 #15617
| One more steps towards #22471 | https://api.github.com/repos/pandas-dev/pandas/pulls/25634 | 2019-03-10T17:10:19Z | 2019-03-10T21:45:02Z | 2019-03-10T21:45:01Z | 2019-03-11T07:22:41Z |
Fixturize tests/frame/test_indexing.py | diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index d8a590bc492a4..61a8ea0c384ba 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -253,6 +253,17 @@ def timezone_frame():
return df
+@pytest.fixture
+def uint64_frame():
+ """
+ Fixture for DataFrame with uint64 values
+
+ Columns are ['A', 'B']
+ """
+ return DataFrame({'A': np.arange(3), 'B': [2**63, 2**63 + 5, 2**63 + 10]},
+ dtype=np.uint64)
+
+
@pytest.fixture
def simple_frame():
"""
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 3c9558d5cbd10..3b8daa28227f8 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -25,9 +25,9 @@
class TestDataFrameIndexing(TestData):
- def test_getitem(self):
+ def test_getitem(self, float_frame):
# Slicing
- sl = self.frame[:20]
+ sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
@@ -35,14 +35,14 @@ def test_getitem(self):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
- for key, _ in self.frame._series.items():
- assert self.frame[key] is not None
+ for key, _ in float_frame._series.items():
+ assert float_frame[key] is not None
- assert 'random' not in self.frame
+ assert 'random' not in float_frame
with pytest.raises(KeyError, match='random'):
- self.frame['random']
+ float_frame['random']
- df = self.frame.copy()
+ df = float_frame.copy()
df['$10'] = np.random.randn(len(df))
ad = np.random.randn(len(df))
@@ -59,13 +59,13 @@ def test_getitem_dupe_cols(self):
with pytest.raises(KeyError):
df[['baf']]
- def test_get(self):
- b = self.frame.get('B')
- assert_series_equal(b, self.frame['B'])
+ def test_get(self, float_frame):
+ b = float_frame.get('B')
+ assert_series_equal(b, float_frame['B'])
- assert self.frame.get('foo') is None
- assert_series_equal(self.frame.get('foo', self.frame['B']),
- self.frame['B'])
+ assert float_frame.get('foo') is None
+ assert_series_equal(float_frame.get('foo', float_frame['B']),
+ float_frame['B'])
@pytest.mark.parametrize("df", [
DataFrame(),
@@ -76,10 +76,10 @@ def test_get_none(self, df):
# see gh-5652
assert df.get(None) is None
- def test_loc_iterable(self):
+ def test_loc_iterable(self, float_frame):
idx = iter(['A', 'B', 'C'])
- result = self.frame.loc[:, idx]
- expected = self.frame.loc[:, ['A', 'B', 'C']]
+ result = float_frame.loc[:, idx]
+ expected = float_frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
@@ -89,11 +89,11 @@ def test_loc_iterable(self):
lambda l: dict(zip(l, range(len(l)))).keys()],
ids=["list", "iter", "Index", "set", "dict", "dict_keys"])
@pytest.mark.parametrize("levels", [1, 2])
- def test_getitem_listlike(self, idx_type, levels):
+ def test_getitem_listlike(self, idx_type, levels, float_frame):
# GH 21294
if levels == 1:
- frame, missing = self.frame, 'food'
+ frame, missing = float_frame, 'food'
else:
# MultiIndex columns
frame = DataFrame(np.random.randn(8, 3),
@@ -129,30 +129,30 @@ def test_loc_uint64(self, val, expected):
expected.name = val
tm.assert_series_equal(result, expected)
- def test_getitem_callable(self):
+ def test_getitem_callable(self, float_frame):
# GH 12533
- result = self.frame[lambda x: 'A']
- tm.assert_series_equal(result, self.frame.loc[:, 'A'])
+ result = float_frame[lambda x: 'A']
+ tm.assert_series_equal(result, float_frame.loc[:, 'A'])
- result = self.frame[lambda x: ['A', 'B']]
- tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
+ result = float_frame[lambda x: ['A', 'B']]
+ tm.assert_frame_equal(result, float_frame.loc[:, ['A', 'B']])
- df = self.frame[:3]
+ df = float_frame[:3]
result = df[lambda x: [True, False, True]]
- tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
+ tm.assert_frame_equal(result, float_frame.iloc[[0, 2], :])
- def test_setitem_list(self):
+ def test_setitem_list(self, float_frame):
- self.frame['E'] = 'foo'
- data = self.frame[['A', 'B']]
- self.frame[['B', 'A']] = data
+ float_frame['E'] = 'foo'
+ data = float_frame[['A', 'B']]
+ float_frame[['B', 'A']] = data
- assert_series_equal(self.frame['B'], data['A'], check_names=False)
- assert_series_equal(self.frame['A'], data['B'], check_names=False)
+ assert_series_equal(float_frame['B'], data['A'], check_names=False)
+ assert_series_equal(float_frame['A'], data['B'], check_names=False)
msg = 'Columns must be same length as key'
with pytest.raises(ValueError, match=msg):
- data[['A']] = self.frame[['A', 'B']]
+ data[['A']] = float_frame[['A', 'B']]
msg = 'Length of values does not match length of index'
with pytest.raises(ValueError, match=msg):
@@ -172,17 +172,17 @@ def test_setitem_list(self):
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
- def test_setitem_list_not_dataframe(self):
- data = np.random.randn(len(self.frame), 2)
- self.frame[['A', 'B']] = data
- assert_almost_equal(self.frame[['A', 'B']].values, data)
+ def test_setitem_list_not_dataframe(self, float_frame):
+ data = np.random.randn(len(float_frame), 2)
+ float_frame[['A', 'B']] = data
+ assert_almost_equal(float_frame[['A', 'B']].values, data)
- def test_setitem_list_of_tuples(self):
- tuples = list(zip(self.frame['A'], self.frame['B']))
- self.frame['tuples'] = tuples
+ def test_setitem_list_of_tuples(self, float_frame):
+ tuples = list(zip(float_frame['A'], float_frame['B']))
+ float_frame['tuples'] = tuples
- result = self.frame['tuples']
- expected = Series(tuples, index=self.frame.index, name='tuples')
+ result = float_frame['tuples']
+ expected = Series(tuples, index=float_frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
@@ -229,29 +229,30 @@ def inc(x):
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
- def test_getitem_boolean(self):
+ def test_getitem_boolean(self, float_string_frame, mixed_float_frame,
+ mixed_int_frame, datetime_frame):
# boolean indexing
- d = self.tsframe.index[10]
- indexer = self.tsframe.index > d
+ d = datetime_frame.index[10]
+ indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
- subindex = self.tsframe.index[indexer]
- subframe = self.tsframe[indexer]
+ subindex = datetime_frame.index[indexer]
+ subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match='Item wrong length'):
- self.tsframe[indexer[:-1]]
+ datetime_frame[indexer[:-1]]
- subframe_obj = self.tsframe[indexer_obj]
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match='boolean values only'):
- self.tsframe[self.tsframe]
+ datetime_frame[datetime_frame]
# test that Series work
- indexer_obj = Series(indexer_obj, self.tsframe.index)
+ indexer_obj = Series(indexer_obj, datetime_frame.index)
- subframe_obj = self.tsframe[indexer_obj]
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
@@ -259,14 +260,14 @@ def test_getitem_boolean(self):
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
- indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
- subframe_obj = self.tsframe[indexer_obj]
+ indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
- for df in [self.tsframe, self.mixed_frame,
- self.mixed_float, self.mixed_int]:
- if df is self.mixed_frame:
+ for df in [datetime_frame, float_string_frame,
+ mixed_float_frame, mixed_int_frame]:
+ if df is float_string_frame:
continue
data = df._get_numeric_data()
@@ -286,10 +287,10 @@ def test_getitem_boolean(self):
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
- def test_getitem_boolean_casting(self):
+ def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
- df = self.tsframe.copy()
+ df = datetime_frame.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
@@ -379,26 +380,26 @@ def test_getitem_ix_mixed_integer(self):
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
- def test_getitem_setitem_ix_negative_integers(self):
+ def test_getitem_setitem_ix_negative_integers(self, float_frame):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- result = self.frame.ix[:, -1]
- assert_series_equal(result, self.frame['D'])
+ result = float_frame.ix[:, -1]
+ assert_series_equal(result, float_frame['D'])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- result = self.frame.ix[:, [-1]]
- assert_frame_equal(result, self.frame[['D']])
+ result = float_frame.ix[:, [-1]]
+ assert_frame_equal(result, float_frame[['D']])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- result = self.frame.ix[:, [-1, -2]]
- assert_frame_equal(result, self.frame[['D', 'C']])
+ result = float_frame.ix[:, [-1, -2]]
+ assert_frame_equal(result, float_frame[['D', 'C']])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- self.frame.ix[:, [-1]] = 0
- assert (self.frame['D'] == 0).all()
+ float_frame.ix[:, [-1]] = 0
+ assert (float_frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
# ix does label-based indexing when having an integer index
@@ -425,11 +426,11 @@ def test_getitem_setitem_ix_negative_integers(self):
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
- def test_getattr(self):
- assert_series_equal(self.frame.A, self.frame['A'])
+ def test_getattr(self, float_frame):
+ assert_series_equal(float_frame.A, float_frame['A'])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
- self.frame.NONEXISTENT_NAME
+ float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=range(10))
@@ -437,43 +438,43 @@ def test_setattr_column(self):
df.foobar = 5
assert (df.foobar == 5).all()
- def test_setitem(self):
+ def test_setitem(self, float_frame):
# not sure what else to do here
- series = self.frame['A'][::2]
- self.frame['col5'] = series
- assert 'col5' in self.frame
+ series = float_frame['A'][::2]
+ float_frame['col5'] = series
+ assert 'col5' in float_frame
assert len(series) == 15
- assert len(self.frame) == 30
+ assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
- exp = Series(exp, index=self.frame.index, name='col5')
- tm.assert_series_equal(self.frame['col5'], exp)
+ exp = Series(exp, index=float_frame.index, name='col5')
+ tm.assert_series_equal(float_frame['col5'], exp)
- series = self.frame['A']
- self.frame['col6'] = series
- tm.assert_series_equal(series, self.frame['col6'], check_names=False)
+ series = float_frame['A']
+ float_frame['col6'] = series
+ tm.assert_series_equal(series, float_frame['col6'], check_names=False)
with pytest.raises(KeyError):
- self.frame[np.random.randn(len(self.frame) + 1)] = 1
+ float_frame[np.random.randn(len(float_frame) + 1)] = 1
# set ndarray
- arr = np.random.randn(len(self.frame))
- self.frame['col9'] = arr
- assert (self.frame['col9'] == arr).all()
+ arr = np.random.randn(len(float_frame))
+ float_frame['col9'] = arr
+ assert (float_frame['col9'] == arr).all()
- self.frame['col7'] = 5
- assert((self.frame['col7'] == 5).all())
+ float_frame['col7'] = 5
+ assert((float_frame['col7'] == 5).all())
- self.frame['col0'] = 3.14
- assert((self.frame['col0'] == 3.14).all())
+ float_frame['col0'] = 3.14
+ assert((float_frame['col0'] == 3.14).all())
- self.frame['col8'] = 'foo'
- assert((self.frame['col8'] == 'foo').all())
+ float_frame['col8'] = 'foo'
+ assert((float_frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
- smaller = self.frame[:2]
+ smaller = float_frame[:2]
with pytest.raises(com.SettingWithCopyError):
smaller['col10'] = ['1', '2']
@@ -492,27 +493,27 @@ def test_setitem(self):
assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
- def test_setitem_dtype(self, dtype):
- arr = np.random.randn(len(self.frame))
+ def test_setitem_dtype(self, dtype, float_frame):
+ arr = np.random.randn(len(float_frame))
- self.frame[dtype] = np.array(arr, dtype=dtype)
- assert self.frame[dtype].dtype.name == dtype
+ float_frame[dtype] = np.array(arr, dtype=dtype)
+ assert float_frame[dtype].dtype.name == dtype
- def test_setitem_tuple(self):
- self.frame['A', 'B'] = self.frame['A']
- assert_series_equal(self.frame['A', 'B'], self.frame[
+ def test_setitem_tuple(self, float_frame):
+ float_frame['A', 'B'] = float_frame['A']
+ assert_series_equal(float_frame['A', 'B'], float_frame[
'A'], check_names=False)
- def test_setitem_always_copy(self):
- s = self.frame['A'].copy()
- self.frame['E'] = s
+ def test_setitem_always_copy(self, float_frame):
+ s = float_frame['A'].copy()
+ float_frame['E'] = s
- self.frame['E'][5:10] = np.nan
+ float_frame['E'][5:10] = np.nan
assert notna(s[5:10]).all()
- def test_setitem_boolean(self):
- df = self.frame.copy()
- values = self.frame.values
+ def test_setitem_boolean(self, float_frame):
+ df = float_frame.copy()
+ values = float_frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
@@ -565,10 +566,10 @@ def test_setitem_boolean(self):
[lambda df: df > np.abs(df) / 2,
lambda df: (df > np.abs(df) / 2).values],
ids=['dataframe', 'array'])
- def test_setitem_boolean_mask(self, mask_type):
+ def test_setitem_boolean_mask(self, mask_type, float_frame):
# Test for issue #18582
- df = self.frame.copy()
+ df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
@@ -579,34 +580,34 @@ def test_setitem_boolean_mask(self, mask_type):
expected.values[np.array(mask)] = np.nan
assert_frame_equal(result, expected)
- def test_setitem_cast(self):
- self.frame['D'] = self.frame['D'].astype('i8')
- assert self.frame['D'].dtype == np.int64
+ def test_setitem_cast(self, float_frame):
+ float_frame['D'] = float_frame['D'].astype('i8')
+ assert float_frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
- self.frame['B'] = 0
- assert self.frame['B'].dtype == np.int64
+ float_frame['B'] = 0
+ assert float_frame['B'].dtype == np.int64
# cast if pass array of course
- self.frame['B'] = np.arange(len(self.frame))
- assert issubclass(self.frame['B'].dtype.type, np.integer)
+ float_frame['B'] = np.arange(len(float_frame))
+ assert issubclass(float_frame['B'].dtype.type, np.integer)
- self.frame['foo'] = 'bar'
- self.frame['foo'] = 0
- assert self.frame['foo'].dtype == np.int64
+ float_frame['foo'] = 'bar'
+ float_frame['foo'] = 0
+ assert float_frame['foo'].dtype == np.int64
- self.frame['foo'] = 'bar'
- self.frame['foo'] = 2.5
- assert self.frame['foo'].dtype == np.float64
+ float_frame['foo'] = 'bar'
+ float_frame['foo'] = 2.5
+ assert float_frame['foo'].dtype == np.float64
- self.frame['something'] = 0
- assert self.frame['something'].dtype == np.int64
- self.frame['something'] = 2
- assert self.frame['something'].dtype == np.int64
- self.frame['something'] = 2.5
- assert self.frame['something'].dtype == np.float64
+ float_frame['something'] = 0
+ assert float_frame['something'].dtype == np.int64
+ float_frame['something'] = 2
+ assert float_frame['something'].dtype == np.int64
+ float_frame['something'] = 2.5
+ assert float_frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
@@ -624,14 +625,14 @@ def test_setitem_cast(self):
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
- def test_setitem_boolean_column(self):
- expected = self.frame.copy()
- mask = self.frame['A'] > 0
+ def test_setitem_boolean_column(self, float_frame):
+ expected = float_frame.copy()
+ mask = float_frame['A'] > 0
- self.frame.loc[mask, 'B'] = 0
+ float_frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
- assert_frame_equal(self.frame, expected)
+ assert_frame_equal(float_frame, expected)
def test_frame_setitem_timestamp(self):
# GH#2155
@@ -642,7 +643,7 @@ def test_frame_setitem_timestamp(self):
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
- def test_setitem_corner(self):
+ def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
@@ -659,7 +660,7 @@ def test_setitem_corner(self):
df[datetime.now()] = 5.
# what to do when empty frame with index
- dm = DataFrame(index=self.frame.index)
+ dm = DataFrame(index=float_frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
@@ -735,16 +736,16 @@ def test_setitem_clear_caches(self):
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
- def test_setitem_None(self):
+ def test_setitem_None(self, float_frame):
# GH #766
- self.frame[None] = self.frame['A']
+ float_frame[None] = float_frame['A']
assert_series_equal(
- self.frame.iloc[:, -1], self.frame['A'], check_names=False)
- assert_series_equal(self.frame.loc[:, None], self.frame[
+ float_frame.iloc[:, -1], float_frame['A'], check_names=False)
+ assert_series_equal(float_frame.loc[:, None], float_frame[
'A'], check_names=False)
- assert_series_equal(self.frame[None], self.frame[
+ assert_series_equal(float_frame[None], float_frame[
'A'], check_names=False)
- repr(self.frame)
+ repr(float_frame)
def test_setitem_empty(self):
# GH 9596
@@ -785,8 +786,8 @@ def test_getitem_empty_frame_with_boolean(self):
df2 = df[df > 0]
assert_frame_equal(df, df2)
- def test_delitem_corner(self):
- f = self.frame.copy()
+ def test_delitem_corner(self, float_frame):
+ f = float_frame.copy()
del f['D']
assert len(f.columns) == 3
with pytest.raises(KeyError, match=r"^'D'$"):
@@ -794,15 +795,15 @@ def test_delitem_corner(self):
del f['B']
assert len(f.columns) == 2
- def test_getitem_fancy_2d(self):
- f = self.frame
+ def test_getitem_fancy_2d(self, float_frame):
+ f = float_frame
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
- subidx = self.frame.index[[5, 4, 1]]
+ subidx = float_frame.index[[5, 4, 1]]
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[subidx, ['B', 'A']],
@@ -891,10 +892,10 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
- def test_setitem_fancy_2d(self):
+ def test_setitem_fancy_2d(self, float_frame):
# case 1
- frame = self.frame.copy()
+ frame = float_frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
@@ -905,12 +906,12 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame, expected)
# case 2
- frame = self.frame.copy()
- frame2 = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
expected = frame.copy()
- subidx = self.frame.index[[5, 4, 1]]
+ subidx = float_frame.index[[5, 4, 1]]
values = np.random.randn(3, 2)
with catch_warnings(record=True):
@@ -925,18 +926,18 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
- frame = self.frame.copy()
+ frame = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- expected1 = self.frame.copy()
+ expected1 = float_frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- expected2 = self.frame.copy()
+ expected2 = float_frame.copy()
arr = np.random.randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
@@ -945,7 +946,7 @@ def test_setitem_fancy_2d(self):
# case 4
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame = self.frame.copy()
+ frame = float_frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
@@ -954,10 +955,10 @@ def test_setitem_fancy_2d(self):
# case 5
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame = self.frame.copy()
- frame2 = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
- expected = self.frame.copy()
+ expected = float_frame.copy()
values = np.random.randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
@@ -973,8 +974,8 @@ def test_setitem_fancy_2d(self):
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
@@ -983,9 +984,9 @@ def test_setitem_fancy_2d(self):
# case 7: slice columns
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame = self.frame.copy()
- frame2 = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
+ expected = float_frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
@@ -1006,18 +1007,18 @@ def test_setitem_fancy_2d(self):
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
- def test_fancy_getitem_slice_mixed(self):
- sliced = self.mixed_frame.iloc[:, -3:]
+ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
+ sliced = float_string_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
- sliced = self.frame.iloc[:, -3:]
+ sliced = float_frame.iloc[:, -3:]
with pytest.raises(com.SettingWithCopyError):
sliced['C'] = 4.
- assert (self.frame['C'] == 4).all()
+ assert (float_frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
@@ -1078,7 +1079,7 @@ def test_fancy_getitem_int_labels(self):
expected = df[3]
assert_series_equal(result, expected)
- def test_fancy_index_int_labels_exceptions(self):
+ def test_fancy_index_int_labels_exceptions(self, float_frame):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
@@ -1092,17 +1093,17 @@ def test_fancy_index_int_labels_exceptions(self):
msg = (r"None of \[Index\(\['foo', 'bar', 'baz'\],"
r" dtype='object'\)\] are in the \[index\]")
with pytest.raises(KeyError, match=msg):
- self.frame.ix[['foo', 'bar', 'baz']] = 1
+ float_frame.ix[['foo', 'bar', 'baz']] = 1
msg = (r"None of \[Index\(\['E'\], dtype='object'\)\] are in the"
r" \[columns\]")
with pytest.raises(KeyError, match=msg):
- self.frame.ix[:, ['E']] = 1
+ float_frame.ix[:, ['E']] = 1
# partial setting now allows this GH2578
- # pytest.raises(KeyError, self.frame.ix.__setitem__,
+ # pytest.raises(KeyError, float_frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
- def test_setitem_fancy_mixed_2d(self):
+ def test_setitem_fancy_mixed_2d(self, float_string_frame):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
@@ -1110,11 +1111,12 @@ def test_setitem_fancy_mixed_2d(self):
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
- self.mixed_frame.ix[5] = np.nan
- assert isna(self.mixed_frame.ix[5]).all()
+ float_string_frame.ix[5] = np.nan
+ assert isna(float_string_frame.ix[5]).all()
- self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
- assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
+ float_string_frame.ix[5] = float_string_frame.ix[6]
+ assert_series_equal(float_string_frame.ix[5],
+ float_string_frame.ix[6],
check_names=False)
# #1432
@@ -1273,8 +1275,8 @@ def test_ix_dup(self):
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
- def test_getitem_fancy_1d(self):
- f = self.frame
+ def test_getitem_fancy_1d(self, float_frame, float_string_frame):
+ f = float_frame
# return self if no slicing...for now
with catch_warnings(record=True):
@@ -1329,15 +1331,15 @@ def test_getitem_fancy_1d(self):
# slice of mixed-frame
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- xs = self.mixed_frame.ix[5]
- exp = self.mixed_frame.xs(self.mixed_frame.index[5])
+ xs = float_string_frame.ix[5]
+ exp = float_string_frame.xs(float_string_frame.index[5])
tm.assert_series_equal(xs, exp)
- def test_setitem_fancy_1d(self):
+ def test_setitem_fancy_1d(self, float_frame):
# case 1: set cross-section for indices
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
@@ -1349,13 +1351,13 @@ def test_setitem_fancy_1d(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame2 = self.frame.copy()
+ frame2 = float_frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
@@ -1366,13 +1368,13 @@ def test_setitem_fancy_1d(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- frame2 = self.frame.copy()
+ frame2 = float_frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
@@ -1387,8 +1389,8 @@ def test_setitem_fancy_1d(self):
assert_frame_equal(frame, expected)
# single column
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
@@ -1396,8 +1398,8 @@ def test_setitem_fancy_1d(self):
expected['A'] = 7.
assert_frame_equal(frame, expected)
- def test_getitem_fancy_scalar(self):
- f = self.frame
+ def test_getitem_fancy_scalar(self, float_frame):
+ f = float_frame
ix = f.loc
# individual value
@@ -1406,9 +1408,9 @@ def test_getitem_fancy_scalar(self):
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
- def test_setitem_fancy_scalar(self):
- f = self.frame
- expected = self.frame.copy()
+ def test_setitem_fancy_scalar(self, float_frame):
+ f = float_frame
+ expected = float_frame.copy()
ix = f.loc
# individual value
@@ -1422,8 +1424,8 @@ def test_setitem_fancy_scalar(self):
ix[idx, col] = val
assert_frame_equal(f, expected)
- def test_getitem_fancy_boolean(self):
- f = self.frame
+ def test_getitem_fancy_boolean(self, float_frame):
+ f = float_frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
@@ -1446,49 +1448,49 @@ def test_getitem_fancy_boolean(self):
columns=['C', 'D'])
assert_frame_equal(result, expected)
- def test_setitem_fancy_boolean(self):
+ def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
- def test_getitem_fancy_ints(self):
- result = self.frame.iloc[[1, 4, 7]]
- expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
+ def test_getitem_fancy_ints(self, float_frame):
+ result = float_frame.iloc[[1, 4, 7]]
+ expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
- result = self.frame.iloc[:, [2, 0, 1]]
- expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
+ result = float_frame.iloc[:, [2, 0, 1]]
+ expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
- def test_getitem_setitem_fancy_exceptions(self):
- ix = self.frame.iloc
+ def test_getitem_setitem_fancy_exceptions(self, float_frame):
+ ix = float_frame.iloc
with pytest.raises(IndexingError, match='Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
- def test_getitem_setitem_boolean_misaligned(self):
+ def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
- mask = self.frame['A'][::-1] > 1
+ mask = float_frame['A'][::-1] > 1
- result = self.frame.loc[mask]
- expected = self.frame.loc[mask[::-1]]
+ result = float_frame.loc[mask]
+ expected = float_frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
- cp = self.frame.copy()
- expected = self.frame.copy()
+ cp = float_frame.copy()
+ expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
@@ -1651,17 +1653,18 @@ def test_setitem_mixed_datetime(self):
df.loc[[4, 5], ['a', 'b']] = A
assert_frame_equal(df, expected)
- def test_setitem_frame(self):
- piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
- self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
- result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
+ def test_setitem_frame_float(self, float_frame):
+ piece = float_frame.loc[float_frame.index[:2], ['A', 'B']]
+ float_frame.loc[float_frame.index[-2]:, ['A', 'B']] = piece.values
+ result = float_frame.loc[float_frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
+ def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
@@ -1670,7 +1673,7 @@ def test_setitem_frame(self):
piece.values)
# rows unaligned
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
@@ -1680,7 +1683,7 @@ def test_setitem_frame(self):
piece.values[0:2])
# key is unaligned with values
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
@@ -1690,13 +1693,14 @@ def test_setitem_frame(self):
piece.values)
# ndarray
- f = self.mixed_frame.copy()
- piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
+ f = float_string_frame.copy()
+ piece = float_string_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
+ def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
@@ -1706,12 +1710,12 @@ def test_setitem_frame(self):
expected['C'] = df['C']
assert_frame_equal(df2, expected)
- def test_setitem_frame_align(self):
- piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
- piece.index = self.frame.index[-2:]
+ def test_setitem_frame_align(self, float_frame):
+ piece = float_frame.loc[float_frame.index[:2], ['A', 'B']]
+ piece.index = float_frame.index[-2:]
piece.columns = ['A', 'B']
- self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
- result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
+ float_frame.loc[float_frame.index[-2:], ['A', 'B']] = piece
+ result = float_frame.loc[float_frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
@@ -1775,87 +1779,94 @@ def test_getitem_list_duplicates(self):
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
- def test_get_value(self):
- for idx in self.frame.index:
- for col in self.frame.columns:
+ def test_get_value(self, float_frame):
+ for idx in float_frame.index:
+ for col in float_frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- result = self.frame.get_value(idx, col)
- expected = self.frame[col][idx]
+ result = float_frame.get_value(idx, col)
+ expected = float_frame[col][idx]
assert result == expected
- def test_lookup(self):
- def alt(df, rows, cols, dtype):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = [df.get_value(r, c) for r, c in zip(rows, cols)]
- return np.array(result, dtype=dtype)
+ def test_lookup_float(self, float_frame):
+ df = float_frame
+ rows = list(df.index) * len(df.columns)
+ cols = list(df.columns) * len(df.index)
+ result = df.lookup(rows, cols)
- def testit(df):
- rows = list(df.index) * len(df.columns)
- cols = list(df.columns) * len(df.index)
- result = df.lookup(rows, cols)
- expected = alt(df, rows, cols, dtype=np.object_)
- tm.assert_almost_equal(result, expected, check_dtype=False)
+ expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)])
+ tm.assert_numpy_array_equal(result, expected)
- testit(self.mixed_frame)
- testit(self.frame)
+ def test_lookup_mixed(self, float_string_frame):
+ df = float_string_frame
+ rows = list(df.index) * len(df.columns)
+ cols = list(df.columns) * len(df.index)
+ result = df.lookup(rows, cols)
+ expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)],
+ dtype=np.object_)
+ tm.assert_almost_equal(result, expected)
+
+ def test_lookup_bool(self):
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
- exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
+
+ exp_mask = np.array([
+ df.loc[r, c] for r, c in zip(df.index, 'mask_' + df['label'])])
+
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
+ def test_lookup_raises(self, float_frame):
with pytest.raises(KeyError):
- self.frame.lookup(['xyz'], ['A'])
+ float_frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
- self.frame.lookup([self.frame.index[0]], ['xyz'])
+ float_frame.lookup([float_frame.index[0]], ['xyz'])
with pytest.raises(ValueError, match='same size'):
- self.frame.lookup(['a', 'b', 'c'], ['a'])
+ float_frame.lookup(['a', 'b', 'c'], ['a'])
- def test_set_value(self):
- for idx in self.frame.index:
- for col in self.frame.columns:
+ def test_set_value(self, float_frame):
+ for idx in float_frame.index:
+ for col in float_frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- self.frame.set_value(idx, col, 1)
- assert self.frame[col][idx] == 1
+ float_frame.set_value(idx, col, 1)
+ assert float_frame[col][idx] == 1
- def test_set_value_resize(self):
+ def test_set_value_resize(self, float_frame):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- res = self.frame.set_value('foobar', 'B', 0)
- assert res is self.frame
+ res = float_frame.set_value('foobar', 'B', 0)
+ assert res is float_frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 0
- self.frame.loc['foobar', 'qux'] = 0
+ float_frame.loc['foobar', 'qux'] = 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- assert self.frame.get_value('foobar', 'qux') == 0
+ assert float_frame.get_value('foobar', 'qux') == 0
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 5)
@@ -1907,16 +1918,16 @@ def test_get_set_value_no_partial_indexing(self):
with pytest.raises(KeyError, match=r"^0$"):
df.get_value(0, 1)
- def test_single_element_ix_dont_upcast(self):
- self.frame['E'] = 1
- assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
+ def test_single_element_ix_dont_upcast(self, float_frame):
+ float_frame['E'] = 1
+ assert issubclass(float_frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
- result = self.frame.ix[self.frame.index[5], 'E']
+ result = float_frame.ix[float_frame.index[5], 'E']
assert is_integer(result)
- result = self.frame.loc[self.frame.index[5], 'E']
+ result = float_frame.loc[float_frame.index[5], 'E']
assert is_integer(result)
# GH 11617
@@ -2079,12 +2090,12 @@ def test_iloc_sparse_propegate_fill_value(self):
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
- def test_iat(self):
+ def test_iat(self, float_frame):
- for i, row in enumerate(self.frame.index):
- for j, col in enumerate(self.frame.columns):
- result = self.frame.iat[i, j]
- expected = self.frame.at[row, col]
+ for i, row in enumerate(float_frame.index):
+ for j, col in enumerate(float_frame.columns):
+ result = float_frame.iat[i, j]
+ expected = float_frame.at[row, col]
assert result == expected
def test_nested_exception(self):
@@ -2433,14 +2444,14 @@ def test_at_time_between_time_datetimeindex(self):
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
- def test_xs(self):
- idx = self.frame.index[5]
- xs = self.frame.xs(idx)
+ def test_xs(self, float_frame, datetime_frame):
+ idx = float_frame.index[5]
+ xs = float_frame.xs(idx)
for item, value in xs.items():
if np.isnan(value):
- assert np.isnan(self.frame[item][idx])
+ assert np.isnan(float_frame[item][idx])
else:
- assert value == self.frame[item][idx]
+ assert value == float_frame[item][idx]
# mixed-type xs
test_data = {
@@ -2454,15 +2465,15 @@ def test_xs(self):
assert xs['B'] == '1'
with pytest.raises(KeyError):
- self.tsframe.xs(self.tsframe.index[0] - BDay())
+ datetime_frame.xs(datetime_frame.index[0] - BDay())
# xs get column
- series = self.frame.xs('A', axis=1)
- expected = self.frame['A']
+ series = float_frame.xs('A', axis=1)
+ expected = float_frame['A']
assert_series_equal(series, expected)
# view is returned if possible
- series = self.frame.xs('A', axis=1)
+ series = float_frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
@@ -2582,7 +2593,8 @@ def test_boolean_indexing_mixed(self):
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
- def test_where(self):
+ def test_where(self, float_string_frame, mixed_float_frame,
+ mixed_int_frame):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
@@ -2610,9 +2622,9 @@ def _check_get(df, cond, check_dtypes=True):
assert (rs.dtypes == df.dtypes).all()
# check getting
- for df in [default_frame, self.mixed_frame,
- self.mixed_float, self.mixed_int]:
- if df is self.mixed_frame:
+ for df in [default_frame, float_string_frame,
+ mixed_float_frame, mixed_int_frame]:
+ if df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -2662,8 +2674,8 @@ def _check_align(df, cond, other, check_dtypes=True):
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
- for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
- if df is self.mixed_frame:
+ for df in [float_string_frame, mixed_float_frame, mixed_int_frame]:
+ if df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -2716,9 +2728,9 @@ def _check_set(df, cond, check_dtypes=True):
v = np.dtype('float64')
assert dfi[k].dtype == v
- for df in [default_frame, self.mixed_frame, self.mixed_float,
- self.mixed_int]:
- if df is self.mixed_frame:
+ for df in [default_frame, float_string_frame, mixed_float_frame,
+ mixed_int_frame]:
+ if df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -3166,20 +3178,20 @@ def test_mask_callable(self):
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
- def test_head_tail(self):
- assert_frame_equal(self.frame.head(), self.frame[:5])
- assert_frame_equal(self.frame.tail(), self.frame[-5:])
+ def test_head_tail(self, float_frame):
+ assert_frame_equal(float_frame.head(), float_frame[:5])
+ assert_frame_equal(float_frame.tail(), float_frame[-5:])
- assert_frame_equal(self.frame.head(0), self.frame[0:0])
- assert_frame_equal(self.frame.tail(0), self.frame[0:0])
+ assert_frame_equal(float_frame.head(0), float_frame[0:0])
+ assert_frame_equal(float_frame.tail(0), float_frame[0:0])
- assert_frame_equal(self.frame.head(-1), self.frame[:-1])
- assert_frame_equal(self.frame.tail(-1), self.frame[1:])
- assert_frame_equal(self.frame.head(1), self.frame[:1])
- assert_frame_equal(self.frame.tail(1), self.frame[-1:])
+ assert_frame_equal(float_frame.head(-1), float_frame[:-1])
+ assert_frame_equal(float_frame.tail(-1), float_frame[1:])
+ assert_frame_equal(float_frame.head(1), float_frame[:1])
+ assert_frame_equal(float_frame.tail(1), float_frame[-1:])
# with a float index
- df = self.frame.copy()
- df.index = np.arange(len(self.frame)) + 0.1
+ df = float_frame.copy()
+ df.index = np.arange(len(float_frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
@@ -3243,16 +3255,10 @@ def test_interval_index(self):
class TestDataFrameIndexingDatetimeWithTZ(TestData):
- def setup_method(self, method):
- self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
- name='foo')
- self.dr = date_range('20130110', periods=3)
- self.df = DataFrame({'A': self.idx, 'B': self.dr})
-
- def test_setitem(self):
+ def test_setitem(self, timezone_frame):
- df = self.df
- idx = self.idx
+ df = timezone_frame
+ idx = df['B'].rename('foo')
# setitem
df['C'] = idx
@@ -3281,7 +3287,8 @@ def test_setitem(self):
def test_set_reset(self):
- idx = self.idx
+ idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
+ name='foo')
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
@@ -3291,11 +3298,11 @@ def test_set_reset(self):
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
- def test_transpose(self):
+ def test_transpose(self, timezone_frame):
- result = self.df.T
- expected = DataFrame(self.df.values.T)
- expected.index = ['A', 'B']
+ result = timezone_frame.T
+ expected = DataFrame(timezone_frame.values.T)
+ expected.index = ['A', 'B', 'C']
assert_frame_equal(result, expected)
def test_scalar_assignment(self):
@@ -3309,16 +3316,10 @@ def test_scalar_assignment(self):
class TestDataFrameIndexingUInt64(TestData):
- def setup_method(self, method):
- self.ir = Index(np.arange(3), dtype=np.uint64)
- self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
-
- self.df = DataFrame({'A': self.idx, 'B': self.ir})
-
- def test_setitem(self):
+ def test_setitem(self, uint64_frame):
- df = self.df
- idx = self.idx
+ df = uint64_frame
+ idx = df['A'].rename('foo')
# setitem
df['C'] = idx
@@ -3343,7 +3344,7 @@ def test_setitem(self):
def test_set_reset(self):
- idx = self.idx
+ idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
@@ -3353,10 +3354,10 @@ def test_set_reset(self):
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
- def test_transpose(self):
+ def test_transpose(self, uint64_frame):
- result = self.df.T
- expected = DataFrame(self.df.values.T)
+ result = uint64_frame.T
+ expected = DataFrame(uint64_frame.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
| One more step towards #22471. Happy to move the fixture to the `test_indexing` module if that's now the preferred form. | https://api.github.com/repos/pandas-dev/pandas/pulls/25633 | 2019-03-10T17:09:20Z | 2019-06-28T12:13:43Z | 2019-06-28T12:13:43Z | 2019-06-28T12:27:44Z |
TST: failing wheel building on PY2 and old numpy | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index fc642d211b30c..92ce6369a5109 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -21,7 +21,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
- compat, date_range, isna)
+ _np_version_under1p13, compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -684,6 +684,8 @@ def test_constructor_ndarray(self):
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
+ @pytest.mark.skipif(PY2 & _np_version_under1p13,
+ reason="old numpy & py2")
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
| closes #25630
| https://api.github.com/repos/pandas-dev/pandas/pulls/25631 | 2019-03-10T16:17:09Z | 2019-03-10T17:26:34Z | 2019-03-10T17:26:34Z | 2019-03-10T17:26:34Z |
Suppress incorrect warning in nargsort for timezone-aware DatetimeIndex | diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ef69939d6e978..0b5b017bec9ac 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -1,4 +1,5 @@
""" miscellaneous sorting / groupby utilities """
+import warnings
import numpy as np
@@ -254,7 +255,13 @@ def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
sorted_idx = np.roll(sorted_idx, cnt_null)
return sorted_idx
- items = np.asanyarray(items)
+ with warnings.catch_warnings():
+ # https://github.com/pandas-dev/pandas/issues/25439
+ # can be removed once ExtensionArrays are properly handled by nargsort
+ warnings.filterwarnings(
+ "ignore", category=FutureWarning,
+ message="Converting timezone-aware DatetimeArray to")
+ items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 7528566e8326e..fa8fbddd59118 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -9,7 +9,8 @@
from pandas.compat import PY2
-from pandas import DataFrame, MultiIndex, Series, compat, concat, merge
+from pandas import (
+ DataFrame, MultiIndex, Series, compat, concat, merge, to_datetime)
from pandas.core import common as com
from pandas.core.sorting import (
decons_group_index, get_group_index, is_int64_overflow_possible,
@@ -183,6 +184,13 @@ def test_nargsort(self):
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
+ def test_nargsort_datetimearray_warning(self):
+ # https://github.com/pandas-dev/pandas/issues/25439
+ # can be removed once the FutureWarning for np.array(DTA) is removed
+ data = to_datetime([0, 2, 0, 1]).tz_localize('Europe/Brussels')
+ with tm.assert_produces_warning(None):
+ nargsort(data)
+
class TestMerge(object):
| Closes #25439, alternative to https://github.com/pandas-dev/pandas/pull/25595 for 0.24.2 | https://api.github.com/repos/pandas-dev/pandas/pulls/25629 | 2019-03-10T14:36:26Z | 2019-03-11T12:41:37Z | 2019-03-11T12:41:37Z | 2019-03-11T19:18:47Z |
Fixturize tests/frame/test_asof.py | diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py
index 0947e6f252dab..4ba3431d102df 100644
--- a/pandas/tests/frame/test_asof.py
+++ b/pandas/tests/frame/test_asof.py
@@ -6,21 +6,26 @@
from pandas import DataFrame, Series, Timestamp, date_range, to_datetime
import pandas.util.testing as tm
-from .common import TestData
+@pytest.fixture
+def date_range_frame():
+ """
+ Fixture for DataFrame of ints with date_range index
-class TestFrameAsof(TestData):
- def setup_method(self, method):
- self.N = N = 50
- self.rng = date_range('1/1/1990', periods=N, freq='53s')
- self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
- index=self.rng)
+ Columns are ['A', 'B'].
+ """
+ N = 50
+ rng = date_range('1/1/1990', periods=N, freq='53s')
+ return DataFrame({'A': np.arange(N), 'B': np.arange(N)}, index=rng)
- def test_basic(self):
- df = self.df.copy()
+
+class TestFrameAsof():
+
+ def test_basic(self, date_range_frame):
+ df = date_range_frame
+ N = 50
df.loc[15:30, 'A'] = np.nan
- dates = date_range('1/1/1990', periods=self.N * 3,
- freq='25s')
+ dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = df.asof(dates)
assert result.notna().all(1).all()
@@ -35,11 +40,9 @@ def test_basic(self):
rs = result[mask]
assert (rs == 14).all(1).all()
- def test_subset(self):
+ def test_subset(self, date_range_frame):
N = 10
- rng = date_range('1/1/1990', periods=N, freq='53s')
- df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
- index=rng)
+ df = date_range_frame.iloc[:N].copy()
df.loc[4:8, 'A'] = np.nan
dates = date_range('1/1/1990', periods=N * 3,
freq='25s')
@@ -54,20 +57,18 @@ def test_subset(self):
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
- # B gives self.df.asof
+ # B gives df.asof
result = df.asof(dates, subset='B')
expected = df.resample('25s', closed='right').ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
- def test_missing(self):
+ def test_missing(self, date_range_frame):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
- rng = date_range('1/1/1990', periods=N, freq='53s')
- df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
- index=rng)
+ df = date_range_frame.iloc[:N].copy()
result = df.asof('1989-12-31')
expected = Series(index=['A', 'B'], name=Timestamp('1989-12-31'))
@@ -78,7 +79,7 @@ def test_missing(self):
columns=['A', 'B'], dtype='float64')
tm.assert_frame_equal(result, expected)
- def test_all_nans(self):
+ def test_all_nans(self, date_range_frame):
# GH 15713
# DataFrame is all nans
result = DataFrame([np.nan]).asof([0])
@@ -86,14 +87,16 @@ def test_all_nans(self):
tm.assert_frame_equal(result, expected)
# testing non-default indexes, multiple inputs
- dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
- result = DataFrame(np.nan, index=self.rng, columns=['A']).asof(dates)
+ N = 150
+ rng = date_range_frame.index
+ dates = date_range('1/1/1990', periods=N, freq='25s')
+ result = DataFrame(np.nan, index=rng, columns=['A']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A'])
tm.assert_frame_equal(result, expected)
# testing multiple columns
- dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
- result = DataFrame(np.nan, index=self.rng,
+ dates = date_range('1/1/1990', periods=N, freq='25s')
+ result = DataFrame(np.nan, index=rng,
columns=['A', 'B', 'C']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
| Picking up #22471 again. Added one new fixture. | https://api.github.com/repos/pandas-dev/pandas/pulls/25628 | 2019-03-10T14:33:55Z | 2019-03-10T21:41:27Z | 2019-03-10T21:41:27Z | 2019-03-10T21:44:48Z |
Fixturize tests/frame/test_axis_select_reindex.py | diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 42f98d5c96aa5..b4fde43ff3055 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -8,12 +8,11 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna)
-from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
-class TestDataFrameSelectReindex(TestData):
+class TestDataFrameSelectReindex:
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
@@ -204,34 +203,36 @@ def test_merge_join_different_levels(self):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
- def test_reindex(self):
- newFrame = self.frame.reindex(self.ts1.index)
+ def test_reindex(self, float_frame):
+ datetime_series = tm.makeTimeSeries(nper=30)
+
+ newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
- if idx in self.frame.index:
+ if idx in float_frame.index:
if np.isnan(val):
- assert np.isnan(self.frame[col][idx])
+ assert np.isnan(float_frame[col][idx])
else:
- assert val == self.frame[col][idx]
+ assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
- emptyFrame = self.frame.reindex(Index([]))
+ emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
- nonContigFrame = self.frame.reindex(self.ts1.index[::2])
+ nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
- if idx in self.frame.index:
+ if idx in float_frame.index:
if np.isnan(val):
- assert np.isnan(self.frame[col][idx])
+ assert np.isnan(float_frame[col][idx])
else:
- assert val == self.frame[col][idx]
+ assert val == float_frame[col][idx]
else:
assert np.isnan(val)
@@ -241,28 +242,28 @@ def test_reindex(self):
# corner cases
# Same index, copies values but not index if copy=False
- newFrame = self.frame.reindex(self.frame.index, copy=False)
- assert newFrame.index is self.frame.index
+ newFrame = float_frame.reindex(float_frame.index, copy=False)
+ assert newFrame.index is float_frame.index
# length zero
- newFrame = self.frame.reindex([])
+ newFrame = float_frame.reindex([])
assert newFrame.empty
- assert len(newFrame.columns) == len(self.frame.columns)
+ assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
- newFrame = self.frame.reindex([])
- newFrame = newFrame.reindex(self.frame.index)
- assert len(newFrame.index) == len(self.frame.index)
- assert len(newFrame.columns) == len(self.frame.columns)
+ newFrame = float_frame.reindex([])
+ newFrame = newFrame.reindex(float_frame.index)
+ assert len(newFrame.index) == len(float_frame.index)
+ assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
- newFrame = self.frame.reindex(list(self.ts1.index))
- tm.assert_index_equal(newFrame.index, self.ts1.index)
+ newFrame = float_frame.reindex(list(datetime_series.index))
+ tm.assert_index_equal(newFrame.index, datetime_series.index)
# copy with no axes
- result = self.frame.reindex()
- assert_frame_equal(result, self.frame)
- assert result is not self.frame
+ result = float_frame.reindex()
+ assert_frame_equal(result, float_frame)
+ assert result is not float_frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
@@ -305,32 +306,32 @@ def test_reindex_name_remains(self):
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
- def test_reindex_int(self):
- smaller = self.intframe.reindex(self.intframe.index[::2])
+ def test_reindex_int(self, int_frame):
+ smaller = int_frame.reindex(int_frame.index[::2])
assert smaller['A'].dtype == np.int64
- bigger = smaller.reindex(self.intframe.index)
+ bigger = smaller.reindex(int_frame.index)
assert bigger['A'].dtype == np.float64
- smaller = self.intframe.reindex(columns=['A', 'B'])
+ smaller = int_frame.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
- def test_reindex_like(self):
- other = self.frame.reindex(index=self.frame.index[:10],
- columns=['C', 'B'])
+ def test_reindex_like(self, float_frame):
+ other = float_frame.reindex(index=float_frame.index[:10],
+ columns=['C', 'B'])
- assert_frame_equal(other, self.frame.reindex_like(other))
+ assert_frame_equal(other, float_frame.reindex_like(other))
- def test_reindex_columns(self):
- new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
+ def test_reindex_columns(self, float_frame):
+ new_frame = float_frame.reindex(columns=['A', 'B', 'E'])
- tm.assert_series_equal(new_frame['B'], self.frame['B'])
+ tm.assert_series_equal(new_frame['B'], float_frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
- new_frame = self.frame.reindex(columns=[])
+ new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
@@ -545,41 +546,41 @@ def test_reindex_api_equivalence(self):
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
- def test_align(self):
- af, bf = self.frame.align(self.frame)
- assert af._data is not self.frame._data
+ def test_align_float(self, float_frame):
+ af, bf = float_frame.align(float_frame)
+ assert af._data is not float_frame._data
- af, bf = self.frame.align(self.frame, copy=False)
- assert af._data is self.frame._data
+ af, bf = float_frame.align(float_frame, copy=False)
+ assert af._data is float_frame._data
# axis = 0
- other = self.frame.iloc[:-5, :3]
- af, bf = self.frame.align(other, axis=0, fill_value=-1)
+ other = float_frame.iloc[:-5, :3]
+ af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
- join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.difference(join_idx)
+ join_idx = float_frame.index.join(other.index)
+ diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
- af, bf = self.frame.align(other, join='right', axis=0)
+ af, bf = float_frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
- other = self.frame.iloc[:-5, :3].copy()
- af, bf = self.frame.align(other, axis=1)
- tm.assert_index_equal(bf.columns, self.frame.columns)
+ other = float_frame.iloc[:-5, :3].copy()
+ af, bf = float_frame.align(other, axis=1)
+ tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
- join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.difference(join_idx)
+ join_idx = float_frame.index.join(other.index)
+ diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
@@ -588,55 +589,38 @@ def test_align(self):
assert (diff_a_vals == -1).all()
- af, bf = self.frame.align(other, join='inner', axis=1)
- tm.assert_index_equal(bf.columns, other.columns)
-
- af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
+ af, bf = float_frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
- # test other non-float types
- af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
+ af, bf = float_frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
- af, bf = self.mixed_frame.align(self.mixed_frame,
- join='inner', axis=1, method='pad')
- tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
-
- af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
- method=None, fill_value=None)
- tm.assert_index_equal(bf.index, Index([]))
-
- af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
- method=None, fill_value=0)
- tm.assert_index_equal(bf.index, Index([]))
-
- # mixed floats/ints
- af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
- method=None, fill_value=0)
+ af, bf = float_frame.align(other.iloc[:, 0], join='inner', axis=1,
+ method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
- af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
- method=None, fill_value=0)
+ af, bf = float_frame.align(other.iloc[:, 0], join='inner', axis=1,
+ method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
- self.frame.align(af.iloc[0, :3], join='inner', axis=2)
+ float_frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
- idx = self.frame.index
+ idx = float_frame.index
s = Series(range(len(idx)), index=idx)
- left, right = self.frame.align(s, axis=0)
- tm.assert_index_equal(left.index, self.frame.index)
- tm.assert_index_equal(right.index, self.frame.index)
+ left, right = float_frame.align(s, axis=0)
+ tm.assert_index_equal(left.index, float_frame.index)
+ tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
- left, right = self.frame.align(s, broadcast_axis=1)
- tm.assert_index_equal(left.index, self.frame.index)
- expected = {c: s for c in self.frame.columns}
- expected = DataFrame(expected, index=self.frame.index,
- columns=self.frame.columns)
+ left, right = float_frame.align(s, broadcast_axis=1)
+ tm.assert_index_equal(left.index, float_frame.index)
+ expected = {c: s for c in float_frame.columns}
+ expected = DataFrame(expected, index=float_frame.index,
+ columns=float_frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
@@ -649,6 +633,34 @@ def test_align(self):
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
+ def test_align_int(self, int_frame):
+ # test other non-float types
+ other = DataFrame(index=range(5), columns=['A', 'B', 'C'])
+
+ af, bf = int_frame.align(other, join='inner', axis=1, method='pad')
+ tm.assert_index_equal(bf.columns, other.columns)
+
+ def test_align_mixed_type(self, float_string_frame):
+
+ af, bf = float_string_frame.align(float_string_frame,
+ join='inner', axis=1, method='pad')
+ tm.assert_index_equal(bf.columns, float_string_frame.columns)
+
+ def test_align_mixed_float(self, mixed_float_frame):
+ # mixed floats/ints
+ other = DataFrame(index=range(5), columns=['A', 'B', 'C'])
+
+ af, bf = mixed_float_frame.align(other.iloc[:, 0], join='inner',
+ axis=1, method=None, fill_value=0)
+ tm.assert_index_equal(bf.index, Index([]))
+
+ def test_align_mixed_int(self, mixed_int_frame):
+ other = DataFrame(index=range(5), columns=['A', 'B', 'C'])
+
+ af, bf = mixed_int_frame.align(other.iloc[:, 0], join='inner', axis=1,
+ method=None, fill_value=0)
+ tm.assert_index_equal(bf.index, Index([]))
+
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
@@ -676,13 +688,14 @@ def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
@pytest.mark.parametrize('ax', [0, 1, None])
@pytest.mark.parametrize('fax', [0, 1])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
- def test_align_fill_method(self, how, meth, ax, fax):
- self._check_align_fill(how, meth, ax, fax)
+ def test_align_fill_method(self, how, meth, ax, fax, float_frame):
+ df = float_frame
+ self._check_align_fill(df, how, meth, ax, fax)
- def _check_align_fill(self, kind, meth, ax, fax):
- left = self.frame.iloc[0:4, :10]
- right = self.frame.iloc[2:, 6:]
- empty = self.frame.iloc[:0, :0]
+ def _check_align_fill(self, frame, kind, meth, ax, fax):
+ left = frame.iloc[0:4, :10]
+ right = frame.iloc[2:, 6:]
+ empty = frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
@@ -775,24 +788,24 @@ def test_align_series_combinations(self):
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
- def test_filter(self):
+ def test_filter(self, float_frame, float_string_frame):
# Items
- filtered = self.frame.filter(['A', 'B', 'E'])
+ filtered = float_frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
- filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
+ filtered = float_frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
- idx = self.frame.index[0:4]
- filtered = self.frame.filter(idx, axis='index')
- expected = self.frame.reindex(index=idx)
+ idx = float_frame.index[0:4]
+ filtered = float_frame.filter(idx, axis='index')
+ expected = float_frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
- fcopy = self.frame.copy()
+ fcopy = float_frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
@@ -819,35 +832,35 @@ def test_filter(self):
# pass in None
with pytest.raises(TypeError, match='Must pass'):
- self.frame.filter()
+ float_frame.filter()
with pytest.raises(TypeError, match='Must pass'):
- self.frame.filter(items=None)
+ float_frame.filter(items=None)
with pytest.raises(TypeError, match='Must pass'):
- self.frame.filter(axis=1)
+ float_frame.filter(axis=1)
# test mutually exclusive arguments
with pytest.raises(TypeError, match='mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
+ float_frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with pytest.raises(TypeError, match='mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
+ float_frame.filter(items=['one', 'three'], regex='e$', axis=1)
with pytest.raises(TypeError, match='mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$')
+ float_frame.filter(items=['one', 'three'], regex='e$')
with pytest.raises(TypeError, match='mutually exclusive'):
- self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
+ float_frame.filter(items=['one', 'three'], like='bbi', axis=0)
with pytest.raises(TypeError, match='mutually exclusive'):
- self.frame.filter(items=['one', 'three'], like='bbi')
+ float_frame.filter(items=['one', 'three'], like='bbi')
# objects
- filtered = self.mixed_frame.filter(like='foo')
+ filtered = float_string_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
- df = self.frame.rename(columns={'B': '\u2202'})
+ df = float_frame.rename(columns={'B': '\u2202'})
filtered = df.filter(like='C')
assert 'C' in filtered
- def test_filter_regex_search(self):
- fcopy = self.frame.copy()
+ def test_filter_regex_search(self, float_frame):
+ fcopy = float_frame.copy()
fcopy['AA'] = 1
# regex
@@ -895,10 +908,10 @@ def test_filter_corner(self):
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
- def test_take(self):
+ def test_take(self, float_frame):
# homogeneous
order = [3, 1, 2, 0]
- for df in [self.frame]:
+ for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -911,7 +924,7 @@ def test_take(self):
# negative indices
order = [2, 1, -1]
- for df in [self.frame]:
+ for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -941,9 +954,11 @@ def test_take(self):
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -5], axis=1)
+ def test_take_mixed_type(self, float_string_frame):
+
# mixed-dtype
order = [4, 1, 2, 0, 3]
- for df in [self.mixed_frame]:
+ for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -956,7 +971,7 @@ def test_take(self):
# negative indices
order = [4, 1, -2]
- for df in [self.mixed_frame]:
+ for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -967,9 +982,10 @@ def test_take(self):
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
+ def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame):
# by dtype
order = [1, 2, 0, 3]
- for df in [self.mixed_float, self.mixed_int]:
+ for df in [mixed_float_frame, mixed_int_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -993,49 +1009,49 @@ def test_reindex_boolean(self):
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
- def test_reindex_objects(self):
- reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
+ def test_reindex_objects(self, float_string_frame):
+ reindexed = float_string_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
- reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
+ reindexed = float_string_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
- def test_reindex_corner(self):
+ def test_reindex_corner(self, int_frame):
index = Index(['a', 'b', 'c'])
- dm = self.empty.reindex(index=[1, 2, 3])
+ dm = DataFrame({}).reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
- smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
+ smaller = int_frame.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
- def test_reindex_axis(self):
+ def test_reindex_axis(self, float_frame, int_frame):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
- reindexed1 = self.intframe.reindex_axis(cols, axis=1)
+ reindexed1 = int_frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
- reindexed2 = self.intframe.reindex(columns=cols)
+ reindexed2 = int_frame.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
- rows = self.intframe.index[0:5]
+ rows = int_frame.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
- reindexed1 = self.intframe.reindex_axis(rows, axis=0)
+ reindexed1 = int_frame.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
- reindexed2 = self.intframe.reindex(index=rows)
+ reindexed2 = int_frame.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
- self.intframe.reindex_axis(rows, axis=2)
+ int_frame.reindex_axis(rows, axis=2)
# no-op case
- cols = self.frame.columns.copy()
+ cols = float_frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
- newFrame = self.frame.reindex_axis(cols, axis=1)
+ newFrame = float_frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
- assert_frame_equal(newFrame, self.frame)
+ assert_frame_equal(newFrame, float_frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
| Picking up #22471 again. Needed to readd some fixtures that were removed by #24885.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25627 | 2019-03-10T14:33:05Z | 2019-06-28T12:13:22Z | 2019-06-28T12:13:22Z | 2019-06-28T12:28:09Z |
BUG: to_csv line endings with compression | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 2c6d1e01ed89b..0f603515c61cc 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -32,6 +32,7 @@ Fixed Regressions
- Fixed regression in creating a period-dtype array from a read-only NumPy array of period objects. (:issue:`25403`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
- Fixed pip installing from source into an environment without NumPy (:issue:`25193`)
+- Fixed regression in :meth:`DataFrame.to_csv` writing duplicate line endings with gzip compress (:issue:`25311`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index ad054d77b3bc8..c1cacf39c5b08 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -434,7 +434,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
if (compat.PY3 and is_text and
(compression or isinstance(f, need_text_wrapping))):
from io import TextIOWrapper
- f = TextIOWrapper(f, encoding=encoding)
+ f = TextIOWrapper(f, encoding=encoding, newline='')
handles.append(f)
if memory_map and hasattr(f, 'fileno'):
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 54a8712a9c645..59bf3d00f979c 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -1221,3 +1221,15 @@ def test_multi_index_header(self):
'1,5,6,7,8']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
+
+ def test_gz_lineend(self):
+ # GH 25311
+ df = pd.DataFrame({'a': [1, 2]})
+ expected_rows = ['a', '1', '2']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ with ensure_clean('__test_gz_lineend.csv.gz') as path:
+ df.to_csv(path, index=False)
+ with tm.decompress_file(path, compression='gzip') as f:
+ result = f.read().decode('utf-8')
+
+ assert result == expected
| - [x] closes #25311
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25625 | 2019-03-10T02:24:06Z | 2019-03-11T15:26:55Z | 2019-03-11T15:26:54Z | 2019-03-11T15:28:59Z |
DOC: file obj for to_csv must be newline='' | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index b23a0f10e9e2b..1b5d96fa9c146 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -1689,7 +1689,7 @@ The ``Series`` and ``DataFrame`` objects have an instance method ``to_csv`` whic
allows storing the contents of the object as a comma-separated-values file. The
function takes a number of arguments. Only the first is required.
-* ``path_or_buf``: A string path to the file to write or a StringIO
+* ``path_or_buf``: A string path to the file to write or a file object. If a file object it must be opened with `newline=''`
* ``sep`` : Field delimiter for the output file (default ",")
* ``na_rep``: A string representation of a missing value (default '')
* ``float_format``: Format string for floating point numbers
@@ -1702,7 +1702,7 @@ function takes a number of arguments. Only the first is required.
* ``mode`` : Python write mode, default 'w'
* ``encoding``: a string representing the encoding to use if the contents are
non-ASCII, for Python versions prior to 3
-* ``line_terminator``: Character sequence denoting line end (default '\\n')
+* ``line_terminator``: Character sequence denoting line end (default `os.linesep`)
* ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL). Note that if you have set a `float_format` then floats are converted to strings and csv.QUOTE_NONNUMERIC will treat them as non-numeric
* ``quotechar``: Character used to quote fields (default '"')
* ``doublequote``: Control quoting of ``quotechar`` in fields (default True)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0b81576404e2f..a37b745d3082a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2920,7 +2920,8 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
- a string.
+ a string. If a file object is passed it should be opened with
+ `newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
| - [x] closes #25048
| https://api.github.com/repos/pandas-dev/pandas/pulls/25624 | 2019-03-10T01:51:41Z | 2019-03-11T13:58:13Z | 2019-03-11T13:58:12Z | 2019-03-11T13:58:13Z |
WIP for MyPy CI Integration | diff --git a/.gitignore b/.gitignore
index 816aff376fc83..91e9d9be54b81 100644
--- a/.gitignore
+++ b/.gitignore
@@ -61,6 +61,7 @@ dist
.coverage
coverage.xml
coverage_html_report
+*.mypy_cache
*.pytest_cache
# hypothesis test database
.hypothesis/
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 0000000000000..f8b37ee5b8663
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,6 @@
+[mypy]
+ignore_missing_imports=True
+follow_imports=silent
+
+[mypy-pandas.conftest,pandas.tests.*]
+ignore_errors=True
\ No newline at end of file
diff --git a/mypy_whitelist.txt b/mypy_whitelist.txt
new file mode 100644
index 0000000000000..89af127af1be7
--- /dev/null
+++ b/mypy_whitelist.txt
@@ -0,0 +1,9 @@
+pandas/core/dtypes/base.py
+pandas/core/groupby/groupby.py
+pandas/core/internals/managers.py
+pandas/core/common.py
+pandas/core/arrays/timedeltas.py
+pandas/core/arrays/datetimes.py
+pandas/core/arrays/base.py
+pandas/core/frame.py
+pandas/core/indexes/base.py
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py
index 41d623c7efd9c..db3e9805b531d 100644
--- a/pandas/core/arrays/array_.py
+++ b/pandas/core/arrays/array_.py
@@ -1,8 +1,13 @@
+from typing import Optional, Sequence, Union
+
+import numpy as np
+
from pandas._libs import lib, tslibs
+from pandas.core.arrays.base import ExtensionArray
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype, is_extension_array_dtype, is_timedelta64_ns_dtype)
-from pandas.core.dtypes.dtypes import registry
+from pandas.core.dtypes.dtypes import ExtensionDtype, registry
from pandas import compat
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index e770281596134..f7d427ce26e6a 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -6,6 +6,7 @@
without warning.
"""
import operator
+from typing import Any, Callable, Optional, Sequence, Tuple, Union
import numpy as np
@@ -15,6 +16,7 @@
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_list_like
+from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
@@ -365,7 +367,7 @@ def isna(self):
raise AbstractMethodError(self)
def _values_for_argsort(self):
- # type: () -> ndarray
+ # type: () -> np.ndarray
"""
Return values for sorting.
@@ -597,7 +599,7 @@ def searchsorted(self, value, side="left", sorter=None):
return arr.searchsorted(value, side=side, sorter=sorter)
def _values_for_factorize(self):
- # type: () -> Tuple[ndarray, Any]
+ # type: () -> Tuple[np.ndarray, Any]
"""
Return an array and missing value suitable for factorization.
@@ -622,7 +624,7 @@ def _values_for_factorize(self):
return self.astype(object), np.nan
def factorize(self, na_sentinel=-1):
- # type: (int) -> Tuple[ndarray, ExtensionArray]
+ # type: (int) -> Tuple[np.ndarray, ExtensionArray]
"""
Encode the extension array as an enumerated type.
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 94668c74c1693..aded02d2dbde0 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2,15 +2,17 @@
from datetime import datetime, timedelta
import operator
import warnings
+from typing import Union, Sequence, Tuple
import numpy as np
from pandas._libs import NaT, algos, iNaT, lib
+from pandas._libs.tslibs.nattype import NaTType
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ, IncompatibleFrequency, Period)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import (
- RoundTo, maybe_integer_op_deprecated, round_nsint64)
+ RoundTo, maybe_integer_op_deprecated, round_nsint64, Timestamp)
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import (
@@ -350,7 +352,7 @@ def __iter__(self):
@property
def asi8(self):
- # type: () -> ndarray
+ # type: () -> np.ndarray
"""
Integer representation of the values.
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 75cf658423210..c2d6a0228e154 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2,6 +2,7 @@
from datetime import datetime, time, timedelta
import textwrap
import warnings
+from typing import Union
import numpy as np
from pytz import utc
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index fd90aec3b5e8c..0144d04c6e197 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -510,7 +510,7 @@ def value_counts(self, dropna=True):
return Series(array, index=index)
def _values_for_argsort(self):
- # type: () -> ndarray
+ # type: () -> np.ndarray
"""Return values for sorting.
Returns
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 0ec1bc7a84231..fff4734479792 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1,17 +1,21 @@
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
+from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT, frequencies as libfrequencies, iNaT, period as libperiod)
+from pandas._libs.tslibs.nattype import NaTType
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr,
period_asfreq_arr)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
import pandas.compat as compat
+from pandas.core.arrays.base import ExtensionArray
+from pandas.core.indexes.base import Index
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
@@ -132,7 +136,7 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
_scalar_type = Period
# Names others delegate to us
- _other_ops = []
+ _other_ops = [] # type: List[str]
_bool_ops = ['is_leap_year']
_object_ops = ['start_time', 'end_time', 'freq']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 9be2c9af169e8..3cce41cde4865 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -7,12 +7,13 @@
import operator
import re
import warnings
+from typing import Any, Callable, Type, Union
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
-from pandas._libs.sparse import BlockIndex, IntIndex
+from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
@@ -79,7 +80,7 @@ class SparseDtype(ExtensionDtype):
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
- # type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
+ # type: (Union[str, np.dtype, 'ExtensionDtype', Type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
@@ -372,7 +373,7 @@ def _subtype_with_str(self):
def _get_fill(arr):
- # type: (SparseArray) -> ndarray
+ # type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 74fe8072e6924..d57fa0cfc2cbb 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -4,6 +4,7 @@
from datetime import timedelta
import textwrap
import warnings
+from typing import List
import numpy as np
@@ -134,8 +135,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
_scalar_type = Timedelta
__array_priority__ = 1000
# define my properties & methods for delegation
- _other_ops = []
- _bool_ops = []
+ _other_ops = [] # type: List[str]
+ _bool_ops = [] # type: List[str]
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f896596dd5216..b357a47563f49 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -15,6 +15,7 @@
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
+from pandas.core.arrays.base import ExtensionArray
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetimelike,
is_extension_array_dtype, is_extension_type, is_list_like, is_object_dtype,
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 5b83cb344b1e7..77b7b94e7a1f7 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -9,6 +9,7 @@
from datetime import datetime, timedelta
from functools import partial
import inspect
+from typing import Any
import numpy as np
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 88bbdcf342d66..8269f8c88ffd3 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -1,4 +1,6 @@
"""Extend pandas with custom array types"""
+from typing import List, Optional, Type
+
import numpy as np
from pandas.errors import AbstractMethodError
@@ -211,7 +213,7 @@ def __str__(self):
@property
def type(self):
- # type: () -> type
+ # type: () -> Type
"""
The scalar type for the array, e.g. ``int``
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eadffb779734f..ccf6d32a335cb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -19,6 +19,7 @@
import sys
import warnings
from textwrap import dedent
+from typing import List, Set, Union
import numpy as np
import numpy.ma as ma
@@ -365,7 +366,7 @@ def _constructor(self):
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['get_value', 'set_value', 'from_csv', 'from_items'])
- _accessors = set()
+ _accessors = set() # type: Set[str]
@property
def _constructor_expanddim(self):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 36dcb692bb079..8b91a7b244bf5 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -13,10 +13,11 @@ class providing the base-class of operations.
from functools import partial, wraps
import types
import warnings
+from typing import FrozenSet, Optional, Type
import numpy as np
-from pandas._libs import Timestamp, groupby as libgroupby
+from pandas._libs import Timestamp, groupby as libgroupby # type: ignore
import pandas.compat as compat
from pandas.compat import range, set_function_name, zip
from pandas.compat.numpy import function as nv
@@ -325,7 +326,7 @@ def _group_selection_context(groupby):
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
- _apply_whitelist = frozenset()
+ _apply_whitelist = frozenset() # type: FrozenSet[str]
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
@@ -1041,7 +1042,7 @@ def _bool_agg(self, val_test, skipna):
"""
def objs_to_bool(vals):
- # type: (np.ndarray) -> (np.ndarray, typing.Type)
+ # type: (np.ndarray) -> (np.ndarray, Type)
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
@@ -1050,7 +1051,7 @@ def objs_to_bool(vals):
return vals.view(np.uint8), np.bool
def result_to_bool(result, inference):
- # type: (np.ndarray, typing.Type) -> np.ndarray
+ # type: (np.ndarray, Type) -> np.ndarray
return result.astype(inference, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
@@ -1743,7 +1744,7 @@ def quantile(self, q=0.5, interpolation='linear'):
"""
def pre_processor(vals):
- # type: (np.ndarray) -> (np.ndarray, Optional[typing.Type])
+ # type: (np.ndarray) -> (np.ndarray, Optional[Type])
if is_object_dtype(vals):
raise TypeError("'quantile' cannot be performed against "
"'object' dtypes!")
@@ -1758,7 +1759,7 @@ def pre_processor(vals):
return vals, inference
def post_processor(vals, inference):
- # type: (np.ndarray, Optional[typing.Type]) -> np.ndarray
+ # type: (np.ndarray, Optional[Type]) -> np.ndarray
if inference:
# Check for edge case
if not (is_integer_dtype(inference) and
@@ -2021,7 +2022,7 @@ def _get_cythonized_result(self, how, grouper, aggregate=False,
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
- (ndarray, typing.Type).
+ (ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index dee181fc1c569..653b4d3966804 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2,10 +2,11 @@
import operator
from textwrap import dedent
import warnings
+from typing import Union
import numpy as np
-from pandas._libs import (
+from pandas._libs import ( # type: ignore
algos as libalgos, index as libindex, join as libjoin, lib)
from pandas._libs.lib import is_datetime_array
from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index aa7332472fc07..4e7d1b8180b1b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -4,6 +4,7 @@
"""
import operator
import warnings
+from typing import Set
import numpy as np
@@ -698,9 +699,9 @@ class DatetimelikeDelegateMixin(PandasDelegate):
boxed in an index, after being returned from the array
"""
# raw_methods : dispatch methods that shouldn't be boxed in an Index
- _raw_methods = set()
+ _raw_methods = set() # type: Set[str]
# raw_properties : dispatch properties that shouldn't be boxed in an Index
- _raw_properties = set()
+ _raw_properties = set() # type: Set[str]
name = None
_data = None
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index ada663556899b..39b246eb63ddd 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -4,10 +4,12 @@
import inspect
import re
import warnings
+from typing import Any, List, Optional
import numpy as np
-from pandas._libs import internals as libinternals, lib, tslib, tslibs
+from pandas._libs import (internals as libinternals, # type: ignore
+ lib, tslib, tslibs)
from pandas._libs.tslibs import Timedelta, conversion, is_null_datetimelike
import pandas.compat as compat
from pandas.compat import range, zip
@@ -1826,8 +1828,12 @@ def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
limit=limit),
placement=self.mgr_locs)
- def shift(self, periods, axis=0, fill_value=None):
- # type: (int, Optional[BlockPlacement], Any) -> List[ExtensionBlock]
+ def shift(self,
+ periods, # type: int
+ axis=0, # type: Optional[libinternals.BlockPlacement]
+ fill_value=None # type: Any
+ ):
+ # type: (...) -> List[ExtensionBlock]
"""
Shift the block by `periods`.
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 407db772d73e8..92339a76358b5 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -4,10 +4,12 @@
import itertools
import operator
import re
+from typing import List, Optional, Union
import numpy as np
-from pandas._libs import internals as libinternals, lib
+from pandas._libs import internals as libinternals, lib # type: ignore
+from pandas.api.extensions import ExtensionDtype
from pandas.compat import map, range, zip
from pandas.util._validators import validate_bool_kwarg
| progress towards #25601
Not complete just pushing for now. Main goal here is to get a CI run that works for any modules existing with comments. | https://api.github.com/repos/pandas-dev/pandas/pulls/25622 | 2019-03-10T00:37:14Z | 2019-03-20T01:25:22Z | null | 2020-01-16T00:34:33Z |
BUG: Fix user-facing AssertionError with to_html (#25608) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ea08a0a6fe07b..f5fa7a71e117c 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -214,7 +214,7 @@ I/O
- Bug in :func:`read_json` for ``orient='table'`` when it tries to infer dtypes by default, which is not applicable as dtypes are already defined in the JSON schema (:issue:`21345`)
- Bug in :func:`read_json` for ``orient='table'`` and float index, as it infers index dtype by default, which is not applicable because index dtype is already defined in the JSON schema (:issue:`25433`)
- Bug in :func:`read_json` for ``orient='table'`` and string of float column names, as it makes a column name type conversion to Timestamp, which is not applicable because column names are already defined in the JSON schema (:issue:`25435`)
--
+- :meth:`DataFrame.to_html` now raises ``TypeError`` when using an invalid type for the ``classes`` parameter instead of ``AsseertionError`` (:issue:`25608`)
-
-
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index 66d13bf2668f9..a543b21f287ec 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -163,8 +163,8 @@ def _write_table(self, indent=0):
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
- raise AssertionError('classes must be list or tuple, not {typ}'
- .format(typ=type(self.classes)))
+ raise TypeError('classes must be a string, list, or tuple, '
+ 'not {typ}'.format(typ=type(self.classes)))
_classes.extend(self.classes)
if self.table_id is None:
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 428f1411a10a6..9cb2704f65587 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -623,3 +623,13 @@ def test_ignore_display_max_colwidth(method, expected, max_colwidth):
result = getattr(df, method)()
expected = expected(max_colwidth)
assert expected in result
+
+
+@pytest.mark.parametrize("classes", [True, 0])
+def test_to_html_invalid_classes_type(classes):
+ # GH 25608
+ df = DataFrame()
+ msg = "classes must be a string, list, or tuple"
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_html(classes=classes)
| - [x] closes #25608
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I did not use an oxford comma in the error description, which is how it was written in the issue's expected result. | https://api.github.com/repos/pandas-dev/pandas/pulls/25620 | 2019-03-09T16:58:22Z | 2019-03-10T21:36:39Z | 2019-03-10T21:36:39Z | 2019-03-10T21:36:42Z |
Json normalize nan support | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 1840c47b4054f..2cabbc2e400b3 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -214,10 +214,10 @@ I/O
- Bug in :func:`read_json` for ``orient='table'`` when it tries to infer dtypes by default, which is not applicable as dtypes are already defined in the JSON schema (:issue:`21345`)
- Bug in :func:`read_json` for ``orient='table'`` and float index, as it infers index dtype by default, which is not applicable because index dtype is already defined in the JSON schema (:issue:`25433`)
- Bug in :func:`read_json` for ``orient='table'`` and string of float column names, as it makes a column name type conversion to Timestamp, which is not applicable because column names are already defined in the JSON schema (:issue:`25435`)
+- Bug in :func:`json_normalize` for ``errors='ignore'`` where missing values in the input data, were filled in resulting ``DataFrame`` with the string "nan" instead of ``numpy.nan`` (:issue:`25468`)
- :meth:`DataFrame.to_html` now raises ``TypeError`` when using an invalid type for the ``classes`` parameter instead of ``AsseertionError`` (:issue:`25608`)
- Bug in :meth:`DataFrame.to_string` and :meth:`DataFrame.to_latex` that would lead to incorrect output when the ``header`` keyword is used (:issue:`16718`)
-
--
Plotting
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 279630ccd107c..7a8188dd07b6b 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -281,6 +281,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
- result[k] = np.array(v).repeat(lengths)
+ # forcing dtype to object to avoid the metadata being casted to string
+ result[k] = np.array(v, dtype=object).repeat(lengths)
return result
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 3bf699cc8a1f0..5362274274d72 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -66,6 +66,25 @@ def author_missing_data():
}]
+@pytest.fixture
+def missing_metadata():
+ return [
+ {'name': 'Alice',
+ 'addresses': [{'number': 9562,
+ 'street': 'Morris St.',
+ 'city': 'Massillon',
+ 'state': 'OH',
+ 'zip': 44646}]
+ },
+ {'addresses': [{'number': 8449,
+ 'street': 'Spring St.',
+ 'city': 'Elizabethton',
+ 'state': 'TN',
+ 'zip': 37643}]
+ }
+ ]
+
+
class TestJSONNormalize(object):
def test_simple_records(self):
@@ -318,66 +337,51 @@ def test_nested_flattens(self):
assert result == expected
- def test_json_normalize_errors(self):
- # GH14583: If meta keys are not always present
- # a new option to set errors='ignore' has been implemented
- i = {
- "Trades": [{
- "general": {
- "tradeid": 100,
- "trade_version": 1,
- "stocks": [{
-
- "symbol": "AAPL",
- "name": "Apple",
- "price": "0"
- }, {
- "symbol": "GOOG",
- "name": "Google",
- "price": "0"
- }
- ]
- }
- }, {
- "general": {
- "tradeid": 100,
- "stocks": [{
- "symbol": "AAPL",
- "name": "Apple",
- "price": "0"
- }, {
- "symbol": "GOOG",
- "name": "Google",
- "price": "0"
- }
- ]
- }
- }
- ]
- }
- j = json_normalize(data=i['Trades'],
- record_path=[['general', 'stocks']],
- meta=[['general', 'tradeid'],
- ['general', 'trade_version']],
- errors='ignore')
- expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
- 'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
- 'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
- 'price': {0: '0', 1: '0', 2: '0', 3: '0'},
- 'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
-
- assert j.fillna('').to_dict() == expected
-
- msg = ("Try running with errors='ignore' as key 'trade_version'"
+ def test_json_normalize_errors(self, missing_metadata):
+ # GH14583:
+ # If meta keys are not always present a new option to set
+ # errors='ignore' has been implemented
+
+ msg = ("Try running with errors='ignore' as key 'name'"
" is not always present")
with pytest.raises(KeyError, match=msg):
json_normalize(
- data=i['Trades'],
- record_path=[['general', 'stocks']],
- meta=[['general', 'tradeid'],
- ['general', 'trade_version']],
+ data=missing_metadata,
+ record_path='addresses',
+ meta='name',
errors='raise')
+ def test_missing_meta(self, missing_metadata):
+ # GH25468
+ # If metadata is nullable with errors set to ignore, the null values
+ # should be numpy.nan values
+ result = json_normalize(
+ data=missing_metadata,
+ record_path='addresses',
+ meta='name',
+ errors='ignore')
+ ex_data = [
+ {'city': 'Massillon',
+ 'number': 9562,
+ 'state': 'OH',
+ 'street': 'Morris St.',
+ 'zip': 44646,
+ 'name': 'Alice'},
+ {'city': 'Elizabethton',
+ 'number': 8449,
+ 'state': 'TN',
+ 'street': 'Spring St.',
+ 'zip': 37643,
+ 'name': np.nan}
+ ]
+ ex_data = [
+ ['Massillon', 9562, 'OH', 'Morris St.', 44646, 'Alice'],
+ ['Elizabethton', 8449, 'TN', 'Spring St.', 37643, np.nan]
+ ]
+ columns = ['city', 'number', 'state', 'street', 'zip', 'name']
+ expected = DataFrame(ex_data, columns=columns)
+ tm.assert_frame_equal(result, expected)
+
def test_donot_drop_nonevalues(self):
# GH21356
data = [
| - [x] closes #25468
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25619 | 2019-03-09T16:58:10Z | 2019-03-13T17:10:37Z | 2019-03-13T17:10:37Z | 2019-03-13T18:37:43Z |
DOC: Cleanup docstring pandas.core.filter | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0b81576404e2f..0128f01bddd10 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4534,11 +4534,11 @@ def filter(self, items=None, like=None, regex=None, axis=None):
Parameters
----------
items : list-like
- List of axis to restrict to (must not all be present).
+ Keep labels from axis which are in items.
like : string
- Keep axis where "arg in col == True".
+ Keep labels from axis for which "like in label == True".
regex : string (regular expression)
- Keep axis with re.search(regex, col) == True.
+ Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
@@ -4561,7 +4561,7 @@ def filter(self, items=None, like=None, regex=None, axis=None):
Examples
--------
- >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),
+ >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
| - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Made parameter description of the `pandas.core.filter` docstring clearer. PEP8 fix in example, see also comments in #20148.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25618 | 2019-03-09T16:35:19Z | 2019-03-09T18:52:21Z | 2019-03-09T18:52:21Z | 2019-03-09T19:19:45Z |
BLD: validate_docstrings.py add ignore_known_fail argument | diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 1c45c79ba7fba..43a1e5b3b4478 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -26,6 +26,8 @@
import importlib
import doctest
import tempfile
+import configparser
+import itertools
import ast
import textwrap
@@ -156,6 +158,19 @@ def error(code, **kwargs):
return (code, ERROR_MSGS[code].format(**kwargs))
+def get_setupcfg():
+ """
+ Returns a ConfigParser which reads the setup.cfg file in the root
+ directory.
+ """
+ setup_cfg = os.path.join(BASE_PATH, 'setup.cfg')
+ config = configparser.ConfigParser(inline_comment_prefixes='#')
+ config.optionxform = str
+ with open(setup_cfg, 'r') as file:
+ config.read_file(file)
+ return config
+
+
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
@@ -811,7 +826,7 @@ def validate_one(func_name):
'examples_errors': examples_errs}
-def validate_all(prefix, ignore_deprecated=False):
+def validate_all(prefix, ignore_deprecated=False, ignore_known_fail=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
@@ -832,6 +847,9 @@ def validate_all(prefix, ignore_deprecated=False):
"""
result = {}
seen = {}
+ if ignore_known_fail:
+ config = get_setupcfg()
+ known_fails = dict(config.items('known_fail'))
# functions from the API docs
api_doc_fnames = os.path.join(
@@ -846,6 +864,12 @@ def validate_all(prefix, ignore_deprecated=False):
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
+ if ignore_known_fail and func_name in known_fails:
+ doc_info['errors'] = list(itertools.filterfalse(
+ lambda x: x[0] in known_fails[func_name].split(','),
+ doc_info['errors']))
+ if not doc_info['errors']:
+ continue
result[func_name] = doc_info
shared_code_key = doc_info['file'], doc_info['file_line']
@@ -869,13 +893,20 @@ def validate_all(prefix, ignore_deprecated=False):
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
+ if ignore_known_fail and func_name in known_fails:
+ doc_info['errors'] = list(itertools.filterfalse(
+ lambda x: x[0] in known_fails[func_name].split(','),
+ doc_info['errors']))
+ if not doc_info['errors']:
+ continue
result[func_name] = doc_info
result[func_name]['in_api'] = False
return result
-def main(func_name, prefix, errors, output_format, ignore_deprecated):
+def main(func_name, prefix, errors, output_format, ignore_deprecated,
+ ignore_known_fail):
def header(title, width=80, char='#'):
full_line = char * width
side_len = (width - len(title) - 2) // 2
@@ -889,7 +920,7 @@ def header(title, width=80, char='#'):
exit_status = 0
if func_name is None:
- result = validate_all(prefix, ignore_deprecated)
+ result = validate_all(prefix, ignore_deprecated, ignore_known_fail)
if output_format == 'json':
output = json.dumps(result)
@@ -984,9 +1015,14 @@ def header(title, width=80, char='#'):
action='store_true', help='if this flag is set, '
'deprecated objects are ignored when validating '
'all docstrings')
+ argparser.add_argument('--ignore_known_fail', default=False,
+ action='store_true', help='if this flag is set, '
+ 'objects listed in setup.cfg as known_fail are '
+ 'ignored when validating all docstrings')
args = argparser.parse_args()
sys.exit(main(args.function, args.prefix,
args.errors.split(',') if args.errors else None,
args.format,
- args.ignore_deprecated))
+ args.ignore_deprecated,
+ args.ignore_known_fail))
diff --git a/setup.cfg b/setup.cfg
index 84b8f69a83f16..9ac5405c1ae3e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -173,3 +173,26 @@ skip=
pandas/_libs/tslibs/__init__.py
pandas/util/__init__.py
pandas/arrays/__init__.py
+
+[scripts:validate_docstrings]
+ [known_fail]
+ # function_name = comma separated error_codes commented reason for ignoring
+ pandas.Timestamp.ctime = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.date = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.dst = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.isocalendar = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.isoweekday = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.strftime = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.strptime = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.time = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.timestamp = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.timetuple = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.timetz = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.toordinal = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.tzname = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.utcoffset = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.utctimetuple = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timestamp.weekday = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timedelta.days = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timedelta.microseconds = GL01,GL02 # inherits from cpython datetime library
+ pandas.Timedelta.seconds = GL01,GL02 # inherits from cpython datetime library
| - [ ] closes #25120
a solution for the problem of inherited docstrings not passing docstring tests preventing errors to be added to the CI `code_checks.sh`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25617 | 2019-03-09T12:25:29Z | 2019-04-29T15:04:07Z | null | 2019-04-29T15:04:07Z |
Fix HTML syntax errors in README.md | diff --git a/README.md b/README.md
index ce22818705865..633673d5cd04f 100644
--- a/README.md
+++ b/README.md
@@ -26,8 +26,9 @@
<td>Package Status</td>
<td>
<a href="https://pypi.org/project/pandas/">
- <img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" /></td>
+ <img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" />
</a>
+ </td>
</tr>
<tr>
<td>License</td>
@@ -73,8 +74,8 @@
<td>Gitter</td>
<td>
<a href="https://gitter.im/pydata/pandas">
- <img src="https://badges.gitter.im/Join%20Chat.svg"
- </a>
+ <img src="https://badges.gitter.im/Join%20Chat.svg" />
+ </a>
</td>
</tr>
</table>
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25615 | 2019-03-09T10:58:49Z | 2019-03-09T16:32:44Z | 2019-03-09T16:32:44Z | 2019-03-09T16:32:46Z |
Use Sphinx RTD Theme | diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml
index 6f33bc58a8b21..45ba1671b2507 100644
--- a/ci/deps/travis-36-doc.yaml
+++ b/ci/deps/travis-36-doc.yaml
@@ -34,6 +34,7 @@ dependencies:
- scipy
- seaborn
- sphinx
+ - sphinx_rtd_theme
- sqlalchemy
- statsmodels
- xarray
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c59d28a6dc3ea..feb924ac41099 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -193,7 +193,7 @@
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'nature_with_gtoc'
+html_theme = 'sphinx_rtd_theme'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
@@ -203,10 +203,13 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-# html_theme_options = {}
+html_theme_options = {
+ 'collapse_navigation': False,
+ 'navigation_depth': 2,
+}
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ['themes']
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
diff --git a/doc/source/themes/nature_with_gtoc/layout.html b/doc/source/themes/nature_with_gtoc/layout.html
deleted file mode 100644
index a2106605c5562..0000000000000
--- a/doc/source/themes/nature_with_gtoc/layout.html
+++ /dev/null
@@ -1,108 +0,0 @@
-{#
-
-Subset of agogo theme
-agogo/layout.html
-
-Sphinx layout template for the agogo theme, originally written
-by Andi Albrecht.
-
-:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
-:license: BSD, see LICENSE for details.
-#}
-{% extends "basic/layout.html" %}
-
-{%- block content %}
-<div class="content-wrapper">
- <div class="content">
- <div class="document">
- <div class="sphinxsidebar">
- {%- block sidebar1 %}
- {%- block sidebartoc %}
- <h3>{{ _('Table Of Contents') }}</h3>
- {{ toctree() }}
- {%- endblock %}
- {%- block sidebarsearch %}
- <h3 style="margin-top: 1.5em;">{{ _('Search') }}</h3>
-
- <form class="search" action="{{ pathto('search') }}" method="get">
- <input type="text" name="q" size="18"/>
- <input type="submit" value="{{ _('Go') }}"/>
- <input type="hidden" name="check_keywords" value="yes"/>
- <input type="hidden" name="area" value="default"/>
- </form>
- <p class="searchtip" style="font-size: 90%">
- {{ _('Enter search terms or a module, class or function name.') }}
- </p>
-
- </div>
- {%- endblock %}
- {# possible location for sidebar #} {% endblock %}
-
-
- {%- block document %}
- <div class="documentwrapper">
- {%- if render_sidebar %}
- <div class="bodywrapper">
- {%- endif %}
- <div class="body">
- {% block body %} {% endblock %}
- </div>
- {%- if render_sidebar %}
- </div>
- {%- endif %}
- </div>
- {%- endblock %}
-
- {%- block sidebar2 %}
-
- {% endblock %}
- <div class="clearer"></div>
- </div>
- </div>
-</div>
-{%- endblock %}
-
-{%- block footer %}
-<style type="text/css">
- .scrollToTop {
- text-align: center;
- font-weight: bold;
- position: fixed;
- bottom: 60px;
- right: 40px;
- display: none;
- }
-</style>
-<a href="#" class="scrollToTop">Scroll To Top</a>
-<script type="text/javascript">
-$(document).ready(function() {
- //Check to see if the window is top if not then display button
- $(window).scroll(function() {
- if ($(this).scrollTop() > 200) {
- $('.scrollToTop').fadeIn();
- } else {
- $('.scrollToTop').fadeOut();
- }
- });
-
- //Click event to scroll to top
- $('.scrollToTop').click(function() {
- $('html, body').animate({
- scrollTop: 0
- }, 500);
- return false;
- });
-});
-</script>
-<script type="text/javascript">
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', 'UA-27880019-2']);
- _gaq.push(['_trackPageview']);
-
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
-</script>
-{% endblock %}
\ No newline at end of file
diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t
deleted file mode 100644
index 4571d97ec50ba..0000000000000
--- a/doc/source/themes/nature_with_gtoc/static/nature.css_t
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * nature.css_t
- * ~~~~~~~~~~~~
- *
- * Sphinx stylesheet -- nature theme.
- *
- * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-@import url("basic.css");
-
-/* -- page layout ----------------------------------------------------------- */
-
-body {
- font-family: Arial, sans-serif;
- font-size: 100%;
- background-color: #111;
- color: #555;
- margin: 0;
- padding: 0;
-}
-
-
-div.documentwrapper {
- width: 100%;
-}
-
-div.bodywrapper {
-/* ugly hack, probably not attractive with other font size for re*/
- margin: 0 0 0 {{ theme_sidebarwidth|toint}}px;
- min-width: 540px;
- max-width: 800px;
-}
-
-
-hr {
- border: 1px solid #B1B4B6;
-}
-
-div.document {
- background-color: #eee;
-}
-
-div.body {
- background-color: #ffffff;
- color: #3E4349;
- padding: 0 30px 30px 30px;
- font-size: 0.9em;
-}
-
-div.footer {
- color: #555;
- width: 100%;
- padding: 13px 0;
- text-align: center;
- font-size: 75%;
-}
-
-div.footer a {
- color: #444;
- text-decoration: underline;
-}
-
-div.related {
- background-color: #6BA81E;
- line-height: 32px;
- color: #fff;
- text-shadow: 0px 1px 0 #444;
- font-size: 0.9em;
-}
-
-div.related a {
- color: #E2F3CC;
-}
-
-div.sphinxsidebar {
- font-size: 0.75em;
- line-height: 1.5em;
- width: {{ theme_sidebarwidth|toint }}px;
- margin: 0 ;
- float: left;
-
- background-color: #eee;
-}
-/*
-div.sphinxsidebarwrapper{
- padding: 20px 0;
-}
-*/
-div.sphinxsidebar h3,
-div.sphinxsidebar h4 {
- font-family: Arial, sans-serif;
- color: #222;
- font-size: 1.2em;
- font-weight: normal;
- margin: 20px 0 0 0;
- padding: 5px 10px;
- background-color: #ddd;
- text-shadow: 1px 1px 0 white
-}
-
-div.sphinxsidebar h4{
- font-size: 1.1em;
-}
-
-div.sphinxsidebar h3 a {
- color: #444;
-}
-
-
-div.sphinxsidebar p {
- color: #888;
-/* padding: 5px 20px;*/
-}
-
-div.sphinxsidebar p.searchtip {
- color: #888;
- padding: 5px 20px;
-}
-
-
-div.sphinxsidebar p.topless {
-}
-
-div.sphinxsidebar ul {
- margin: 10px 20px;
- padding: 0;
- color: #000;
-}
-
-div.sphinxsidebar a {
- color: #444;
-}
-
-div.sphinxsidebar input {
- border: 1px solid #ccc;
- font-family: sans-serif;
- font-size: 1em;
-}
-
-div.sphinxsidebar input[type=text]{
- margin-left: 20px;
-}
-
-/* -- body styles ----------------------------------------------------------- */
-
-a {
- color: #005B81;
- text-decoration: none;
-}
-
-a:hover {
- color: #E32E00;
- text-decoration: underline;
-}
-
-div.body h1,
-div.body h2,
-div.body h3,
-div.body h4,
-div.body h5,
-div.body h6 {
- font-family: Arial, sans-serif;
- background-color: #BED4EB;
- font-weight: normal;
- color: #212224;
- margin: 30px 0px 10px 0px;
- padding: 5px 0 5px 10px;
- text-shadow: 0px 1px 0 white
-}
-
-div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; }
-div.body h2 { font-size: 150%; background-color: #C8D5E3; }
-div.body h3 { font-size: 120%; background-color: #D8DEE3; }
-div.body h4 { font-size: 110%; background-color: #D8DEE3; }
-div.body h5 { font-size: 100%; background-color: #D8DEE3; }
-div.body h6 { font-size: 100%; background-color: #D8DEE3; }
-
-p.rubric {
- border-bottom: 1px solid rgb(201, 201, 201);
-}
-
-a.headerlink {
- color: #c60f0f;
- font-size: 0.8em;
- padding: 0 4px 0 4px;
- text-decoration: none;
-}
-
-a.headerlink:hover {
- background-color: #c60f0f;
- color: white;
-}
-
-div.body p, div.body dd, div.body li {
- line-height: 1.5em;
-}
-
-div.admonition p.admonition-title + p, div.deprecated p {
- display: inline;
-}
-
-div.deprecated {
- margin-bottom: 10px;
- margin-top: 10px;
- padding: 7px;
- background-color: #ffe4e4;
- border: 1px solid #f66;
-}
-
-div.highlight{
- background-color: white;
-}
-
-div.note {
- background-color: #eee;
- border: 1px solid #ccc;
-}
-
-div.seealso {
- background-color: #ffc;
- border: 1px solid #ff6;
-}
-
-div.topic {
- background-color: #eee;
-}
-
-div.warning {
- background-color: #ffe4e4;
- border: 1px solid #f66;
-}
-
-p.admonition-title {
- display: inline;
-}
-
-p.admonition-title:after {
- content: ":";
-}
-
-pre {
- padding: 10px;
- background-color: rgb(250,250,250);
- color: #222;
- line-height: 1.2em;
- border: 1px solid rgb(201,201,201);
- font-size: 1.1em;
- margin: 1.5em 0 1.5em 0;
- -webkit-box-shadow: 1px 1px 1px #d8d8d8;
- -moz-box-shadow: 1px 1px 1px #d8d8d8;
-}
-
-tt {
- background-color: #ecf0f3;
- color: #222;
- /* padding: 1px 2px; */
- font-size: 1.1em;
- font-family: monospace;
-}
-
-.viewcode-back {
- font-family: Arial, sans-serif;
-}
-
-div.viewcode-block:target {
- background-color: #f4debf;
- border-top: 1px solid #ac9;
- border-bottom: 1px solid #ac9;
-}
-
-
-/**
- * Styling for field lists
- */
-
- /* grey highlighting of 'parameter' and 'returns' field */
-table.field-list {
- border-collapse: separate;
- border-spacing: 10px;
- margin-left: 1px;
- /* border-left: 5px solid rgb(238, 238, 238) !important; */
-}
-
-table.field-list th.field-name {
- /* display: inline-block; */
- padding: 1px 8px 1px 5px;
- white-space: nowrap;
- background-color: rgb(238, 238, 238);
-}
-
-/* italic font for parameter types */
-table.field-list td.field-body > p {
- font-style: italic;
-}
-
-table.field-list td.field-body > p > strong {
- font-style: normal;
-}
-
-/* reduced space around parameter description */
-td.field-body blockquote {
- border-left: none;
- margin: 0em 0em 0.3em;
- padding-left: 30px;
-}
-
-// Adapted from the new Jupyter notebook style
-// https://github.com/jupyter/notebook/blob/c8841b68c4c0739bbee1291e0214771f24194079/notebook/static/notebook/less/renderedhtml.less#L59
-table {
- margin-left: auto;
- margin-right: auto;
- border: none;
- border-collapse: collapse;
- border-spacing: 0;
- color: @rendered_html_border_color;
- table-layout: fixed;
-}
-thead {
- border-bottom: 1px solid @rendered_html_border_color;
- vertical-align: bottom;
-}
-tr, th, td {
- vertical-align: middle;
- padding: 0.5em 0.5em;
- line-height: normal;
- white-space: normal;
- max-width: none;
- border: none;
-}
-th {
- font-weight: bold;
-}
-th.col_heading {
- text-align: right;
-}
-tbody tr:nth-child(odd) {
- background: #f5f5f5;
-}
-
-table td.data, table th.row_heading table th.col_heading {
- font-family: monospace;
- text-align: right;
-}
-
-
-/**
- * See also
- */
-
-div.seealso dd {
- margin-top: 0;
- margin-bottom: 0;
-}
diff --git a/doc/source/themes/nature_with_gtoc/theme.conf b/doc/source/themes/nature_with_gtoc/theme.conf
deleted file mode 100644
index 290a07bde8806..0000000000000
--- a/doc/source/themes/nature_with_gtoc/theme.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-[theme]
-inherit = basic
-stylesheet = nature.css
-pygments_style = tango
-
-[options]
-sidebarwidth = 270
diff --git a/environment.yml b/environment.yml
index c1669c9f49017..c20a3c0b4af6a 100644
--- a/environment.yml
+++ b/environment.yml
@@ -22,6 +22,7 @@ dependencies:
- pytest>=4.0.2
- pytest-mock
- sphinx
+ - sphinx_rtd_theme
- numpydoc
# optional
diff --git a/requirements-dev.txt b/requirements-dev.txt
index be84c6f29fdeb..18b9fbb6b2ed0 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -13,6 +13,7 @@ moto
pytest>=4.0.2
pytest-mock
sphinx
+sphinx_rtd_theme
numpydoc
beautifulsoup4>=4.2.1
blosc
| - [X] closes #15556
TOC on the left hand side is pinned during navigation and also expandable/collapsible
<img width="1089" alt="image" src="https://user-images.githubusercontent.com/609873/54065608-3cc81e00-41d8-11e9-830d-124d759bf791.png">
<img width="1099" alt="image" src="https://user-images.githubusercontent.com/609873/54065614-62552780-41d8-11e9-8edc-3bd7db452ca1.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/25614 | 2019-03-09T03:29:55Z | 2019-03-14T21:28:55Z | null | 2020-01-16T00:34:32Z |
make core.config self-contained | diff --git a/pandas/core/config.py b/pandas/core/config.py
index b6264a5257dcb..306311dc8ad5c 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -53,9 +53,6 @@
import re
import warnings
-import pandas.compat as compat
-from pandas.compat import lmap, map, u
-
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
@@ -140,7 +137,7 @@ def _describe_option(pat='', _print_desc=True):
if len(keys) == 0:
raise OptionError('No such keys(s)')
- s = u('')
+ s = ''
for k in keys: # filter by pat
s += _build_option_description(k)
@@ -634,7 +631,7 @@ def _build_option_description(k):
o = _get_registered_option(k)
d = _get_deprecated_option(k)
- s = u('{k} ').format(k=k)
+ s = '{k} '.format(k=k)
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
@@ -642,14 +639,14 @@ def _build_option_description(k):
s += 'No description available.'
if o:
- s += (u('\n [default: {default}] [currently: {current}]')
+ s += ('\n [default: {default}] [currently: {current}]'
.format(default=o.defval, current=_get_option(k, True)))
if d:
- s += u('\n (Deprecated')
- s += (u(', use `{rkey}` instead.')
+ s += '\n (Deprecated'
+ s += (', use `{rkey}` instead.'
.format(rkey=d.rkey if d.rkey else ''))
- s += u(')')
+ s += ')'
return s
@@ -776,8 +773,7 @@ def is_instance_factory(_type):
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
- from pandas.io.formats.printing import pprint_thing
- type_repr = "|".join(map(pprint_thing, _type))
+ type_repr = "|".join(map(str, _type))
else:
type_repr = "'{typ}'".format(typ=_type)
@@ -795,11 +791,11 @@ def is_one_of_factory(legal_values):
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
- from pandas.io.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any(c(x) for c in callables):
- pp_values = pp("|".join(lmap(pp, legal_values)))
+ uvals = [str(lval) for lval in legal_values]
+ pp_values = "|".join(uvals)
msg = "Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
@@ -814,7 +810,6 @@ def inner(x):
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
-is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| Broken off from #25176 | https://api.github.com/repos/pandas-dev/pandas/pulls/25613 | 2019-03-09T03:17:00Z | 2019-03-19T23:28:30Z | 2019-03-19T23:28:29Z | 2019-03-19T23:32:20Z |
DOC: Remove makePanel from docs (#25609) | diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index c8a2399739cd5..373cffd30ff14 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -1030,47 +1030,3 @@ method:
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['a', 'b', 'c', 'd'])
panel.to_frame()
-
-
-.. _dsintro.deprecate_panel:
-
-Deprecate Panel
----------------
-
-Over the last few years, pandas has increased in both breadth and depth, with new features,
-datatype support, and manipulation routines. As a result, supporting efficient indexing and functional
-routines for ``Series``, ``DataFrame`` and ``Panel`` has contributed to an increasingly fragmented and
-difficult-to-understand code base.
-
-The 3-D structure of a ``Panel`` is much less common for many types of data analysis,
-than the 1-D of the ``Series`` or the 2-D of the ``DataFrame``. Going forward it makes sense for
-pandas to focus on these areas exclusively.
-
-Oftentimes, one can simply use a MultiIndex ``DataFrame`` for easily working with higher dimensional data.
-
-In addition, the ``xarray`` package was built from the ground up, specifically in order to
-support the multi-dimensional analysis that is one of ``Panel`` s main use cases.
-`Here is a link to the xarray panel-transition documentation <https://xarray.pydata.org/en/stable/pandas.html#panel-transition>`__.
-
-.. ipython:: python
- :okwarning:
-
- import pandas.util.testing as tm
- p = tm.makePanel()
- p
-
-Convert to a MultiIndex DataFrame.
-
-.. ipython:: python
- :okwarning:
-
- p.to_frame()
-
-Alternatively, one can convert to an xarray ``DataArray``.
-
-.. ipython:: python
- :okwarning:
-
- p.to_xarray()
-
-You can see the full-documentation for the `xarray package <https://xarray.pydata.org/en/stable/>`__.
diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst
index 8a89450be2f48..161b0ef395f05 100644
--- a/doc/source/whatsnew/v0.13.1.rst
+++ b/doc/source/whatsnew/v0.13.1.rst
@@ -222,60 +222,155 @@ Enhancements
- Panel :meth:`~pandas.Panel.apply` will work on non-ufuncs. See :ref:`the docs<basics.apply>`.
- .. ipython:: python
+ .. code-block:: ipython
+
+ In [28]: import pandas.util.testing as tm
+
+ In [29]: panel = tm.makePanel(5)
- import pandas.util.testing as tm
- panel = tm.makePanel(5)
- panel
- panel['ItemA']
+ In [30]: panel
+ Out[30]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
+ Items axis: ItemA to ItemC
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
+ Minor_axis axis: A to D
+
+ In [31]: panel['ItemA']
+ Out[31]:
+ A B C D
+ 2000-01-03 -0.673690 0.577046 -1.344312 -1.469388
+ 2000-01-04 0.113648 -1.715002 0.844885 0.357021
+ 2000-01-05 -1.478427 -1.039268 1.075770 -0.674600
+ 2000-01-06 0.524988 -0.370647 -0.109050 -1.776904
+ 2000-01-07 0.404705 -1.157892 1.643563 -0.968914
+
+ [5 rows x 4 columns]
Specifying an ``apply`` that operates on a Series (to return a single element)
- .. ipython:: python
+ .. code-block:: ipython
+
+ In [32]: panel.apply(lambda x: x.dtype, axis='items')
+ Out[32]:
+ A B C D
+ 2000-01-03 float64 float64 float64 float64
+ 2000-01-04 float64 float64 float64 float64
+ 2000-01-05 float64 float64 float64 float64
+ 2000-01-06 float64 float64 float64 float64
+ 2000-01-07 float64 float64 float64 float64
- panel.apply(lambda x: x.dtype, axis='items')
+ [5 rows x 4 columns]
A similar reduction type operation
- .. ipython:: python
+ .. code-block:: ipython
+
+ In [33]: panel.apply(lambda x: x.sum(), axis='major_axis')
+ Out[33]:
+ ItemA ItemB ItemC
+ A -1.108775 -1.090118 -2.984435
+ B -3.705764 0.409204 1.866240
+ C 2.110856 2.960500 -0.974967
+ D -4.532785 0.303202 -3.685193
- panel.apply(lambda x: x.sum(), axis='major_axis')
+ [4 rows x 3 columns]
This is equivalent to
- .. ipython:: python
+ .. code-block:: ipython
+
+ In [34]: panel.sum('major_axis')
+ Out[34]:
+ ItemA ItemB ItemC
+ A -1.108775 -1.090118 -2.984435
+ B -3.705764 0.409204 1.866240
+ C 2.110856 2.960500 -0.974967
+ D -4.532785 0.303202 -3.685193
- panel.sum('major_axis')
+ [4 rows x 3 columns]
A transformation operation that returns a Panel, but is computing
the z-score across the major_axis
- .. ipython:: python
+ .. code-block:: ipython
- result = panel.apply(lambda x: (x - x.mean()) / x.std(),
- axis='major_axis')
- result
- result['ItemA']
+ In [35]: result = panel.apply(lambda x: (x - x.mean()) / x.std(),
+ ....: axis='major_axis')
+ ....:
+
+ In [36]: result
+ Out[36]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
+ Items axis: ItemA to ItemC
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
+ Minor_axis axis: A to D
+
+ In [37]: result['ItemA'] # noqa E999
+ Out[37]:
+ A B C D
+ 2000-01-03 -0.535778 1.500802 -1.506416 -0.681456
+ 2000-01-04 0.397628 -1.108752 0.360481 1.529895
+ 2000-01-05 -1.489811 -0.339412 0.557374 0.280845
+ 2000-01-06 0.885279 0.421830 -0.453013 -1.053785
+ 2000-01-07 0.742682 -0.474468 1.041575 -0.075499
+
+ [5 rows x 4 columns]
- Panel :meth:`~pandas.Panel.apply` operating on cross-sectional slabs. (:issue:`1148`)
- .. ipython:: python
+ .. code-block:: ipython
- def f(x):
- return ((x.T - x.mean(1)) / x.std(1)).T
+ In [38]: def f(x):
+ ....: return ((x.T - x.mean(1)) / x.std(1)).T
+ ....:
- result = panel.apply(f, axis=['items', 'major_axis'])
- result
- result.loc[:, :, 'ItemA']
+ In [39]: result = panel.apply(f, axis=['items', 'major_axis'])
- This is equivalent to the following
+ In [40]: result
+ Out[40]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
+ Items axis: A to D
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
+ Minor_axis axis: ItemA to ItemC
- .. ipython:: python
+ In [41]: result.loc[:, :, 'ItemA']
+ Out[41]:
+ A B C D
+ 2000-01-03 0.012922 -0.030874 -0.629546 -0.757034
+ 2000-01-04 0.392053 -1.071665 0.163228 0.548188
+ 2000-01-05 -1.093650 -0.640898 0.385734 -1.154310
+ 2000-01-06 1.005446 -1.154593 -0.595615 -0.809185
+ 2000-01-07 0.783051 -0.198053 0.919339 -1.052721
+
+ [5 rows x 4 columns]
- result = pd.Panel({ax: f(panel.loc[:, :, ax]) for ax in panel.minor_axis})
+ This is equivalent to the following
+
+ .. code-block:: ipython
- result
- result.loc[:, :, 'ItemA']
+ In [42]: result = pd.Panel({ax: f(panel.loc[:, :, ax]) for ax in panel.minor_axis})
+
+ In [43]: result
+ Out[43]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
+ Items axis: A to D
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
+ Minor_axis axis: ItemA to ItemC
+
+ In [44]: result.loc[:, :, 'ItemA']
+ Out[44]:
+ A B C D
+ 2000-01-03 0.012922 -0.030874 -0.629546 -0.757034
+ 2000-01-04 0.392053 -1.071665 0.163228 0.548188
+ 2000-01-05 -1.093650 -0.640898 0.385734 -1.154310
+ 2000-01-06 1.005446 -1.154593 -0.595615 -0.809185
+ 2000-01-07 0.783051 -0.198053 0.919339 -1.052721
+
+ [5 rows x 4 columns]
Performance
~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index c720e075012eb..26fdee4685c4b 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -45,11 +45,6 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations
New features
~~~~~~~~~~~~
-.. ipython:: python
- :suppress:
-
- import pandas.util.testing as tm
-
.. _whatsnew_0200.enhancements.agg:
``agg`` API for DataFrame/Series
@@ -1363,24 +1358,65 @@ Deprecate Panel
with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`).
-.. ipython:: python
- :okwarning:
+.. code-block:: ipython
- p = tm.makePanel()
- p
+ In [133]: import pandas.util.testing as tm
+
+ In [134]: p = tm.makePanel()
+
+ In [135]: p
+ Out[135]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 3 (items) x 3 (major_axis) x 4 (minor_axis)
+ Items axis: ItemA to ItemC
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-05 00:00:00
+ Minor_axis axis: A to D
Convert to a MultiIndex DataFrame
-.. ipython:: python
+.. code-block:: ipython
- p.to_frame()
+ In [136]: p.to_frame()
+ Out[136]:
+ ItemA ItemB ItemC
+ major minor
+ 2000-01-03 A 0.628776 -1.409432 0.209395
+ B 0.988138 -1.347533 -0.896581
+ C -0.938153 1.272395 -0.161137
+ D -0.223019 -0.591863 -1.051539
+ 2000-01-04 A 0.186494 1.422986 -0.592886
+ B -0.072608 0.363565 1.104352
+ C -1.239072 -1.449567 0.889157
+ D 2.123692 -0.414505 -0.319561
+ 2000-01-05 A 0.952478 -2.147855 -1.473116
+ B -0.550603 -0.014752 -0.431550
+ C 0.139683 -1.195524 0.288377
+ D 0.122273 -1.425795 -0.619993
+
+ [12 rows x 3 columns]
Convert to an xarray DataArray
-.. ipython:: python
- :okwarning:
+.. code-block:: ipython
- p.to_xarray()
+ In [137]: p.to_xarray()
+ Out[137]:
+ <xarray.DataArray (items: 3, major_axis: 3, minor_axis: 4)>
+ array([[[ 0.628776, 0.988138, -0.938153, -0.223019],
+ [ 0.186494, -0.072608, -1.239072, 2.123692],
+ [ 0.952478, -0.550603, 0.139683, 0.122273]],
+
+ [[-1.409432, -1.347533, 1.272395, -0.591863],
+ [ 1.422986, 0.363565, -1.449567, -0.414505],
+ [-2.147855, -0.014752, -1.195524, -1.425795]],
+
+ [[ 0.209395, -0.896581, -0.161137, -1.051539],
+ [-0.592886, 1.104352, 0.889157, -0.319561],
+ [-1.473116, -0.43155 , 0.288377, -0.619993]]])
+ Coordinates:
+ * items (items) object 'ItemA' 'ItemB' 'ItemC'
+ * major_axis (major_axis) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05
+ * minor_axis (minor_axis) object 'A' 'B' 'C' 'D'
.. _whatsnew_0200.api_breaking.deprecate_group_agg_dict:
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index e52a36a922bd9..7ec5a39c3d384 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -646,29 +646,65 @@ Deprecate Panel
with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`, :issue:`18324`).
-.. ipython:: python
- :suppress:
+.. code-block:: ipython
- import pandas.util.testing as tm
+ In [75]: import pandas.util.testing as tm
-.. ipython:: python
- :okwarning:
+ In [76]: p = tm.makePanel()
- p = tm.makePanel()
- p
+ In [77]: p
+ Out[77]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 3 (items) x 3 (major_axis) x 4 (minor_axis)
+ Items axis: ItemA to ItemC
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-05 00:00:00
+ Minor_axis axis: A to D
Convert to a MultiIndex DataFrame
-.. ipython:: python
+.. code-block:: ipython
- p.to_frame()
+ In [78]: p.to_frame()
+ Out[78]:
+ ItemA ItemB ItemC
+ major minor
+ 2000-01-03 A 0.469112 0.721555 0.404705
+ B -1.135632 0.271860 -1.039268
+ C 0.119209 0.276232 -1.344312
+ D -2.104569 0.113648 -0.109050
+ 2000-01-04 A -0.282863 -0.706771 0.577046
+ B 1.212112 -0.424972 -0.370647
+ C -1.044236 -1.087401 0.844885
+ D -0.494929 -1.478427 1.643563
+ 2000-01-05 A -1.509059 -1.039575 -1.715002
+ B -0.173215 0.567020 -1.157892
+ C -0.861849 -0.673690 1.075770
+ D 1.071804 0.524988 -1.469388
+
+ [12 rows x 3 columns]
Convert to an xarray DataArray
-.. ipython:: python
- :okwarning:
+.. code-block:: ipython
- p.to_xarray()
+ In [79]: p.to_xarray()
+ Out[79]:
+ <xarray.DataArray (items: 3, major_axis: 3, minor_axis: 4)>
+ array([[[ 0.469112, -1.135632, 0.119209, -2.104569],
+ [-0.282863, 1.212112, -1.044236, -0.494929],
+ [-1.509059, -0.173215, -0.861849, 1.071804]],
+
+ [[ 0.721555, 0.27186 , 0.276232, 0.113648],
+ [-0.706771, -0.424972, -1.087401, -1.478427],
+ [-1.039575, 0.56702 , -0.67369 , 0.524988]],
+
+ [[ 0.404705, -1.039268, -1.344312, -0.10905 ],
+ [ 0.577046, -0.370647, 0.844885, 1.643563],
+ [-1.715002, -1.157892, 1.07577 , -1.469388]]])
+ Coordinates:
+ * items (items) object 'ItemA' 'ItemB' 'ItemC'
+ * major_axis (major_axis) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05
+ * minor_axis (minor_axis) object 'A' 'B' 'C' 'D'
.. _whatsnew_0230.api_breaking.core_common:
| - [x] closes #25609
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As described in #25609, I removed the tm.makePanel() calls from the docs, along with some of the surrounding sections (a few "Deprecate Panel" sections). I did not replace these calls with an equivalent manual creation of a Panel.
If desired, I could create a panel manually in the docs with something like this:
```
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: tm.makeTimeDataFrame() for c in cols}
p = Panel.fromDict(data)
```
but this PR is currently as requested in the issue with the sections removed. | https://api.github.com/repos/pandas-dev/pandas/pulls/25612 | 2019-03-08T23:36:43Z | 2019-03-10T16:24:56Z | 2019-03-10T16:24:56Z | 2019-03-10T16:25:03Z |
Allows loc to create multiple new index | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ea08a0a6fe07b..865ee3e832028 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -187,7 +187,7 @@ Interval
Indexing
^^^^^^^^
--
+- :meth:`loc` creates required missing columns and index if a list is passed except if the axis is a :meth:`MultiIndex`, as this would lead to ambiguities.
-
-
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
old mode 100755
new mode 100644
index 623a48acdd48b..9fc55b4dd6c21
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -328,6 +328,41 @@ def _setitem_with_indexer(self, indexer, value):
take_split_path = True
break
+ def can_use_idx_as_list(idx, axis):
+ """Returns True if we can safely wiew idx as a list on index"""
+ if self.name != 'loc':
+ # Loc is the only case where this is acceptable
+ return False
+ if isinstance(self.obj.axes[axis], MultiIndex):
+ return False
+ if not is_list_like_indexer(idx):
+ return False
+ return True
+
+ def reindex_axis_with_key(key, i):
+ """reindex the axis with the new key"""
+ # make sure to clear the cache because we are
+ # just replacing the block manager here
+ # so the object is the same
+ index = self.obj._get_axis(i)
+ idx_as_list = can_use_idx_as_list(key, i)
+ if idx_as_list:
+ labels = index
+ for k in key:
+ if k not in labels:
+ labels = labels.insert(len(labels), k)
+ else:
+ labels = index.insert(len(index), key)
+ self.obj._data = self.obj.reindex(labels, axis=i)._data
+ self.obj._maybe_update_cacher(clear=True)
+ self.obj._is_copy = None
+
+ if idx_as_list:
+ return self._get_listlike_indexer(
+ key, axis=i, raise_missing=True)[1]
+ else:
+ return labels.get_loc(key)
+
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
@@ -360,25 +395,28 @@ def _setitem_with_indexer(self, indexer, value):
return self.obj
# add a new item with the dtype setup
- self.obj[key] = _infer_fill_value(value)
-
- new_indexer = convert_from_missing_indexer_tuple(
- indexer, self.obj.axes)
+ labels = self.obj._get_axis(i)
+ # If use multilabel, can't create more than one axis
+ if can_use_idx_as_list(key, i):
+
+ for k in key:
+ if k not in labels:
+ # The dtype will be set later
+ self.obj[k] = np.nan
+ else:
+ self.obj[key] = _infer_fill_value(value)
+
+ new_indexer = tuple(
+ convert_missing_indexer(_idx)[0]
+ if isinstance(_idx, dict) else _idx
+ for _idx in indexer)
+ new_indexer = self._get_setitem_indexer(
+ new_indexer)
self._setitem_with_indexer(new_indexer, value)
return self.obj
- # reindex the axis
- # make sure to clear the cache because we are
- # just replacing the block manager here
- # so the object is the same
- index = self.obj._get_axis(i)
- labels = index.insert(len(index), key)
- self.obj._data = self.obj.reindex(labels, axis=i)._data
- self.obj._maybe_update_cacher(clear=True)
- self.obj._is_copy = None
-
- nindexer.append(labels.get_loc(key))
+ nindexer.append(reindex_axis_with_key(key, i))
else:
nindexer.append(idx)
@@ -389,10 +427,12 @@ def _setitem_with_indexer(self, indexer, value):
indexer, missing = convert_missing_indexer(indexer)
if missing:
+ if can_use_idx_as_list(indexer, 0):
+ indexer = reindex_axis_with_key(indexer, 0)
# reindex the axis to the new value
# and set inplace
- if self.ndim == 1:
+ elif self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
@@ -457,9 +497,6 @@ def _setitem_with_indexer(self, indexer, value):
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
- # set
- item_labels = self.obj._get_axis(info_axis)
-
# align and set the values
if take_split_path:
@@ -469,55 +506,37 @@ def _setitem_with_indexer(self, indexer, value):
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
- info_idx = indexer[info_axis]
- if is_integer(info_idx):
- info_idx = [info_idx]
- labels = item_labels[info_idx]
+ plane_indexer, lplane_indexer = self._get_plane_indexer(indexer)
- # if we have a partial multiindex, then need to adjust the plane
- # indexer here
+ # require that we are setting the right number of values that
+ # we are indexing
+ labels = self._get_labels(indexer)
if (len(labels) == 1 and
- isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
+ isinstance(self.obj[labels[0]].axes[0], MultiIndex) and
+ (is_list_like_indexer(value) and np.iterable(value) and
+ lplane_indexer != len(value))):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
- plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
- lplane_indexer = length_of_indexer(plane_indexer[0], index)
-
- # require that we are setting the right number of values that
- # we are indexing
- if is_list_like_indexer(value) and np.iterable(
- value) and lplane_indexer != len(value):
-
- if len(obj[idx]) != len(value):
- raise ValueError("cannot set using a multi-index "
- "selection indexer with a different "
- "length than the value")
-
- # make sure we have an ndarray
- value = getattr(value, 'values', value).ravel()
-
- # we can directly set the series here
- # as we select a slice indexer on the mi
- idx = index._convert_slice_indexer(idx)
- obj._consolidate_inplace()
- obj = obj.copy()
- obj._data = obj._data.setitem(indexer=tuple([idx]),
- value=value)
- self.obj[item] = obj
- return
+ if len(obj[idx]) != len(value):
+ raise ValueError("cannot set using a multi-index "
+ "selection indexer with a different "
+ "length than the value")
- # non-mi
- else:
- plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
- if info_axis > 0:
- plane_axis = self.obj.axes[:info_axis][0]
- lplane_indexer = length_of_indexer(plane_indexer[0],
- plane_axis)
- else:
- lplane_indexer = 0
+ # make sure we have an ndarray
+ value = getattr(value, 'values', value).ravel()
+
+ # we can directly set the series here
+ # as we select a slice indexer on the mi
+ idx = index._convert_slice_indexer(idx)
+ obj._consolidate_inplace()
+ obj = obj.copy()
+ obj._data = obj._data.setitem(indexer=tuple([idx]),
+ value=value)
+ self.obj[item] = obj
+ return
def setter(item, v):
s = self.obj[item]
@@ -543,82 +562,8 @@ def setter(item, v):
# reset the sliced object if unique
self.obj[item] = s
- def can_do_equal_len():
- """ return True if we have an equal len settable """
- if (not len(labels) == 1 or not np.iterable(value) or
- is_scalar(plane_indexer[0])):
- return False
-
- item = labels[0]
- index = self.obj[item].index
-
- values_len = len(value)
- # equal len list/ndarray
- if len(index) == values_len:
- return True
- elif lplane_indexer == values_len:
- return True
-
- return False
-
- # we need an iterable, with a ndim of at least 1
- # eg. don't pass through np.array(0)
- if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
-
- # we have an equal len Frame
- if isinstance(value, ABCDataFrame) and value.ndim > 1:
- sub_indexer = list(indexer)
- multiindex_indexer = isinstance(labels, MultiIndex)
-
- for item in labels:
- if item in value:
- sub_indexer[info_axis] = item
- v = self._align_series(
- tuple(sub_indexer), value[item],
- multiindex_indexer)
- else:
- v = np.nan
-
- setter(item, v)
-
- # we have an equal len ndarray/convertible to our labels
- # hasattr first, to avoid coercing to ndarray without reason.
- # But we may be relying on the ndarray coercion to check ndim.
- # Why not just convert to an ndarray earlier on if needed?
- elif ((hasattr(value, 'ndim') and value.ndim == 2)
- or (not hasattr(value, 'ndim') and
- np.array(value).ndim) == 2):
-
- # note that this coerces the dtype if we are mixed
- # GH 7551
- value = np.array(value, dtype=object)
- if len(labels) != value.shape[1]:
- raise ValueError('Must have equal len keys and value '
- 'when setting with an ndarray')
-
- for i, item in enumerate(labels):
-
- # setting with a list, recoerces
- setter(item, value[:, i].tolist())
-
- # we have an equal len list/ndarray
- elif can_do_equal_len():
- setter(labels[0], value)
-
- # per label values
- else:
-
- if len(labels) != len(value):
- raise ValueError('Must have equal len keys and value '
- 'when setting with an iterable')
-
- for item, v in zip(labels, value):
- setter(item, v)
- else:
-
- # scalar
- for item in labels:
- setter(item, value)
+ for (item, v) in self._get_key_value_list(indexer, value):
+ setter(item, v)
else:
if isinstance(indexer, tuple):
@@ -627,6 +572,7 @@ def can_do_equal_len():
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
+ item_labels = self.obj._get_axis(info_axis)
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(com.is_null_slice(idx)
@@ -657,6 +603,127 @@ def can_do_equal_len():
value=value)
self.obj._maybe_update_cacher(clear=True)
+ def _can_do_equal_len(self, indexer, value):
+ """return True if we have an equal len settable"""
+ labels = self._get_labels(indexer)
+ plane_indexer, lplane_indexer = self._get_plane_indexer(indexer)
+ if (not len(labels) == 1 or not np.iterable(value) or
+ is_scalar(plane_indexer[0])):
+ return False
+
+ item = labels[0]
+ index = self.obj[item].index
+
+ values_len = len(value)
+ # equal len list/ndarray
+ if len(index) == values_len:
+ return True
+
+ if lplane_indexer == values_len:
+ return True
+
+ return False
+
+ def _get_key_value_list(self, indexer, value):
+ """Splits the value into a key value list to match the indexer"""
+ # we need an iterable, with a ndim of at least 1
+ # eg. don't pass through np.array(0)
+ info_axis = self.obj._info_axis_number
+ labels = self._get_labels(indexer)
+
+ if not(is_list_like_indexer(value) and
+ getattr(value, 'ndim', 1) > 0):
+ return [(item, value) for item in labels]
+
+ # we have an equal len Frame
+ if isinstance(value, ABCDataFrame) and value.ndim > 1:
+ sub_indexer = list(indexer)
+ multiindex_indexer = isinstance(labels, MultiIndex)
+
+ ret = []
+ for item in labels:
+ if item in value:
+ sub_indexer[info_axis] = item
+ value = self._align_series(
+ tuple(sub_indexer), value[item],
+ multiindex_indexer)
+ else:
+ value = np.nan
+ ret.append((item, value))
+ return ret
+
+ # we have an equal len ndarray/convertible to our labels
+ # hasattr first, to avoid coercing to ndarray without reason.
+ # But we may be relying on the ndarray coercion to check ndim.
+ # Why not just convert to an ndarray earlier on if needed?
+ elif ((hasattr(value, 'ndim') and value.ndim == 2)
+ or (not hasattr(value, 'ndim') and
+ np.array(value).ndim) == 2):
+
+ # note that this coerces the dtype if we are mixed
+ # GH 7551
+ value = np.array(value, dtype=object)
+ if len(labels) != value.shape[1]:
+ raise ValueError('Must have equal len keys and value '
+ 'when setting with an ndarray')
+
+ # setting with a list, recoerces
+ return [(item, value[:, i].tolist())
+ for i, item in enumerate(labels)]
+
+ # we have an equal len list/ndarray
+ elif self._can_do_equal_len(indexer, value):
+ return [(labels[0], value)]
+
+ # per label values
+ else:
+
+ if len(labels) != len(value):
+ raise ValueError('Must have equal len keys and value '
+ 'when setting with an iterable')
+ return [(item, v) for item, v in zip(labels, value)]
+
+ def _get_labels(self, indexer):
+ """
+ Get Labels from an indexer
+ """
+ info_axis = self.obj._info_axis_number
+ item_labels = self.obj._get_axis(info_axis)
+ info_idx = indexer[info_axis]
+ if is_integer(info_idx):
+ info_idx = [info_idx]
+ return item_labels[info_idx]
+
+ def _get_plane_indexer(self, indexer):
+ """
+ Get Plane indexer and corresponding length from indexer
+ """
+ # if we have a partial multiindex, then need to adjust the plane
+ # indexer here
+ info_axis = self.obj._info_axis_number
+ labels = self._get_labels(indexer)
+
+ if (len(labels) == 1 and
+ isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
+ item = labels[0]
+ obje = self.obj[item]
+ index = obje.index
+ idx = indexer[:info_axis][0]
+
+ plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
+ lplane_indexer = length_of_indexer(plane_indexer[0], index)
+
+ # non-mi
+ else:
+ plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
+ if info_axis > 0:
+ plane_axis = self.obj.axes[:info_axis][0]
+ lplane_indexer = length_of_indexer(plane_indexer[0],
+ plane_axis)
+ else:
+ lplane_indexer = 0
+ return plane_indexer, lplane_indexer
+
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
@@ -1349,10 +1416,15 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False,
inds, = obj.nonzero()
return inds
else:
- # When setting, missing keys are not allowed, even with .loc:
+ # When setting, missing keys are not allowed, except with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
- return self._get_listlike_indexer(obj, axis, **kwargs)[1]
+ try:
+ return self._get_listlike_indexer(obj, axis, **kwargs)[1]
+ except KeyError:
+ if is_setter and self.name == 'loc':
+ return {'key': obj}
+ raise
else:
try:
return labels.get_loc(obj)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c4f98b892feb7..e925e37ae5042 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -256,8 +256,8 @@ def test_loc_to_fail(self):
del s['a']
- with pytest.raises(KeyError, match=msg):
- s.loc[[-2]] = 0
+ s.loc[[-2]] = 0
+ tm.assert_series_equal(Series([1., 3, 0], index=[1, -1, -2]), s)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
@@ -741,16 +741,37 @@ def test_loc_setitem_empty_append(self):
df.loc[0, 'x'] = expected.loc[0, 'x']
tm.assert_frame_equal(df, expected)
+ data = [1, 2]
+ df = DataFrame(columns=['x', 'y'],
+ dtype=np.float)
+ df.loc[[0, 1], 'x'] = data
+ expected = DataFrame({'x': [1., 2.], 'y': [np.nan, np.nan]})
+ tm.assert_frame_equal(df, expected)
+
+ df = pd.DataFrame()
+ s = pd.Series({'float': 1.2, 'str': 'a'})
+ df.loc[1, ['str', 'float']] = s
+
+ expected = pd.DataFrame({'str': 'a', 'float': 1.2},
+ index=[1])
+ tm.assert_frame_equal(df, expected)
+
+ df = pd.DataFrame()
+ expected = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}, index=[5, 6])
+ df.loc[[5, 6], ['a', 'b']] = expected
+ tm.assert_frame_equal(df, expected)
+
+ df = pd.DataFrame()
+ df.loc[('a', 'b'), 'c'] = 1
+
+ expected = pd.DataFrame([1., 1.], columns=['c'], index=['a', 'b'])
+ tm.assert_frame_equal(df, expected)
+
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
data = [1, 2]
df = DataFrame(columns=['x', 'y'])
- msg = (r"None of \[Int64Index\(\[0, 1\], dtype='int64'\)\] "
- r"are in the \[index\]")
- with pytest.raises(KeyError, match=msg):
- df.loc[[0, 1], 'x'] = data
-
msg = "cannot copy sequence with size 2 to array axis with dimension 0"
with pytest.raises(ValueError, match=msg):
df.loc[0:2, 'x'] = data
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
index 8c1709ff016b3..a232ddb64224d 100644
--- a/pandas/tests/series/indexing/test_loc.py
+++ b/pandas/tests/series/indexing/test_loc.py
@@ -102,9 +102,9 @@ def test_loc_setitem_boolean(test_data):
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
- msg = r"\['foo'\] not in index"
- with pytest.raises(KeyError, match=msg):
- test_data.series.loc[inds + ['foo']] = 5
+ test_data.series.loc[inds + ['foo']] = 5
+ assert test_data.series.loc['foo'] == 5
+ assert test_data.series.loc[inds[0]] == 5
def test_basic_setitem_with_labels(test_data):
| - [x] closes #25594
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Starting in 0.21.0, using .loc or [] with a list with one or more missing labels, is deprecated. In the doc, only getting is shown:
https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#indexing-with-list-with-missing-labels-is-deprecated
As argued in #25594, this should not be the case for setting. This PR is therefore adding the possibility of adding index and columns when setting with array-like index.
If the axis is a MultiIndex, the expected result is not trivial, and is therefore not covered in this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/25607 | 2019-03-08T13:15:35Z | 2019-04-20T17:43:37Z | null | 2019-04-20T17:43:37Z |
STY: use pytest.raises context manager (generic) | diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index c2f6cbf4c564c..6f2707f764920 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import PY3, range, zip
+from pandas.compat import PY2, PY3, range, zip
from pandas.core.dtypes.common import is_scalar
@@ -16,8 +16,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
-import pandas.io.formats.printing as printing
-
# ----------------------------------------------------------------------
# Generic types test cases
@@ -135,37 +133,51 @@ def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
- pytest.raises(ValueError, lambda: bool(obj == 0))
- pytest.raises(ValueError, lambda: bool(obj == 1))
- pytest.raises(ValueError, lambda: bool(obj))
+ msg = "The truth value of a {} is ambiguous".format(
+ self._typ.__name__)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 0)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 1)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj)
obj = self._construct(shape=4, value=1)
- pytest.raises(ValueError, lambda: bool(obj == 0))
- pytest.raises(ValueError, lambda: bool(obj == 1))
- pytest.raises(ValueError, lambda: bool(obj))
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 0)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 1)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj)
obj = self._construct(shape=4, value=np.nan)
- pytest.raises(ValueError, lambda: bool(obj == 0))
- pytest.raises(ValueError, lambda: bool(obj == 1))
- pytest.raises(ValueError, lambda: bool(obj))
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 0)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj == 1)
+ with pytest.raises(ValueError, match=msg):
+ bool(obj)
# empty
obj = self._construct(shape=0)
- pytest.raises(ValueError, lambda: bool(obj))
+ with pytest.raises(ValueError, match=msg):
+ bool(obj)
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
- def f():
+ with pytest.raises(ValueError, match=msg):
if obj1:
- printing.pprint_thing("this works and shouldn't")
+ pass
- pytest.raises(ValueError, f)
- pytest.raises(ValueError, lambda: obj1 and obj2)
- pytest.raises(ValueError, lambda: obj1 or obj2)
- pytest.raises(ValueError, lambda: not obj1)
+ with pytest.raises(ValueError, match=msg):
+ obj1 and obj2
+ with pytest.raises(ValueError, match=msg):
+ obj1 or obj2
+ with pytest.raises(ValueError, match=msg):
+ not obj1
def test_downcast(self):
# test close downcasting
@@ -200,9 +212,10 @@ def test_constructor_compound_dtypes(self):
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
- pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"),
- ("B", "str"),
- ("C", "int32")])
+ msg = ("compound dtypes are not implemented in the {} constructor"
+ .format(self._typ.__name__))
+ with pytest.raises(NotImplementedError, match=msg):
+ f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f('int64')
@@ -725,6 +738,7 @@ def test_sample(sel):
with pytest.raises(ValueError):
df.sample(1, weights=s4)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
@@ -755,8 +769,14 @@ def test_squeeze(self):
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
- pytest.raises(ValueError, df.squeeze, axis=2)
- pytest.raises(ValueError, df.squeeze, axis='x')
+ msg = ("No axis named 2 for object type <class"
+ " 'pandas.core.frame.DataFrame'>")
+ with pytest.raises(ValueError, match=msg):
+ df.squeeze(axis=2)
+ msg = ("No axis named x for object type <class"
+ " 'pandas.core.frame.DataFrame'>")
+ with pytest.raises(ValueError, match=msg):
+ df.squeeze(axis='x')
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index 10430ebde8225..b7d42e45253b0 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -102,23 +102,34 @@ def test_nonzero_single_element(self):
s = Series([False])
assert not s.bool()
+ msg = "The truth value of a Series is ambiguous"
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
- pytest.raises(ValueError, lambda: bool(s))
+ with pytest.raises(ValueError, match=msg):
+ bool(s)
+ msg = "bool cannot act on a non-boolean single element Series"
for s in [Series([np.nan]), Series([pd.NaT])]:
- pytest.raises(ValueError, lambda: s.bool())
+ with pytest.raises(ValueError, match=msg):
+ s.bool()
# multiple bool are still an error
+ msg = "The truth value of a Series is ambiguous"
for s in [Series([True, True]), Series([False, False])]:
- pytest.raises(ValueError, lambda: bool(s))
- pytest.raises(ValueError, lambda: s.bool())
+ with pytest.raises(ValueError, match=msg):
+ bool(s)
+ with pytest.raises(ValueError, match=msg):
+ s.bool()
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
- pytest.raises(ValueError, lambda: bool(s))
- pytest.raises(ValueError, lambda: s.bool())
+ msg = "The truth value of a Series is ambiguous"
+ with pytest.raises(ValueError, match=msg):
+ bool(s)
+ msg = "bool cannot act on a non-boolean single element Series"
+ with pytest.raises(ValueError, match=msg):
+ s.bool()
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
| xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25603 | 2019-03-08T09:21:45Z | 2019-03-09T16:29:33Z | 2019-03-09T16:29:33Z | 2019-03-10T21:25:44Z |
BUG: Fix to_string output when using header keyword arg (#16718) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 284943cf49070..ab2f8d66c37da 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -215,6 +215,7 @@ I/O
- Bug in :func:`read_json` for ``orient='table'`` and float index, as it infers index dtype by default, which is not applicable because index dtype is already defined in the JSON schema (:issue:`25433`)
- Bug in :func:`read_json` for ``orient='table'`` and string of float column names, as it makes a column name type conversion to Timestamp, which is not applicable because column names are already defined in the JSON schema (:issue:`25435`)
- :meth:`DataFrame.to_html` now raises ``TypeError`` when using an invalid type for the ``classes`` parameter instead of ``AsseertionError`` (:issue:`25608`)
+- Bug in :meth:`DataFrame.to_string` and :meth:`DataFrame.to_latex` that would lead to incorrect output when the ``header`` keyword is used (:issue:`16718`)
-
-
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index f68ef2cc39006..91e5edc8de9f4 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -528,6 +528,10 @@ def _to_str_columns(self):
else:
str_columns = self._get_formatted_column_labels(frame)
+ if self.show_row_idx_names:
+ for x in str_columns:
+ x.append('')
+
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
@@ -770,11 +774,6 @@ def space_format(x, y):
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
-
- if self.show_row_idx_names:
- for x in str_columns:
- x.append('')
-
# self.str_columns = str_columns
return str_columns
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index b0cf5a2f17609..43bb382ea3f20 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2380,6 +2380,14 @@ def test_to_string_header(self):
exp = '0 0\n ..\n9 9'
assert res == exp
+ def test_to_string_multindex_header(self):
+ # GH 16718
+ df = (pd.DataFrame({'a': [0], 'b': [1], 'c': [2], 'd': [3]})
+ .set_index(['a', 'b']))
+ res = df.to_string(header=['r1', 'r2'])
+ exp = ' r1 r2\na b \n0 1 2 3'
+ assert res == exp
+
def _three_digit_exp():
return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 1653e474aa7b0..4bec3bca1820b 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -735,3 +735,19 @@ def test_to_latex_float_format_no_fixed_width(self):
\end{tabular}
"""
assert df.to_latex(float_format='%.0f') == expected
+
+ def test_to_latex_multindex_header(self):
+ # GH 16718
+ df = (pd.DataFrame({'a': [0], 'b': [1], 'c': [2], 'd': [3]})
+ .set_index(['a', 'b']))
+ observed = df.to_latex(header=['r1', 'r2'])
+ expected = r"""\begin{tabular}{llrr}
+\toprule
+ & & r1 & r2 \\
+a & b & & \\
+\midrule
+0 & 1 & 2 & 3 \\
+\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
| Also affects to_latex midrule position
- [x] closes #16718
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Fixes the 4th example in the above issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/25602 | 2019-03-08T09:19:08Z | 2019-03-12T17:03:16Z | 2019-03-12T17:03:15Z | 2019-03-14T09:18:26Z |
Backport PR #25586 on branch 0.24.x (BUG: secondary y axis could not be set to log scale (#25545)) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 7da99590d5a0a..2c6d1e01ed89b 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -92,7 +92,7 @@ Bug Fixes
**Visualization**
--
+- Bug in :meth:`Series.plot` where a secondary y axis could not be set to log scale (:issue:`25545`)
-
-
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 85549bafa8dc0..8e47510680add 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -287,6 +287,9 @@ def _maybe_right_yaxis(self, ax, axes_num):
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
+
+ if self.logy or self.loglog:
+ new_ax.set_yscale('log')
return new_ax
def _setup_subplots(self):
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 07a4b168a66f1..a234ea8f9416b 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -570,6 +570,18 @@ def test_df_series_secondary_legend(self):
assert ax.get_yaxis().get_visible()
tm.close()
+ @pytest.mark.slow
+ def test_secondary_logy(self):
+ # GH 25545
+ s1 = Series(np.random.randn(30))
+ s2 = Series(np.random.randn(30))
+
+ ax1 = s1.plot(logy=True)
+ ax2 = s2.plot(secondary_y=True, logy=True)
+
+ assert ax1.get_yscale() == 'log'
+ assert ax2.get_yscale() == 'log'
+
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
| Backport PR #25586: BUG: secondary y axis could not be set to log scale (#25545) | https://api.github.com/repos/pandas-dev/pandas/pulls/25599 | 2019-03-07T20:42:39Z | 2019-03-08T07:22:13Z | 2019-03-08T07:22:13Z | 2019-03-08T07:22:13Z |
DOC: Fix typo in tz_localize | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d7f71df99cdb6..0b81576404e2f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9321,7 +9321,7 @@ def tz_localize(self, tz, axis=0, level=None, copy=True,
times
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
- where clocks moved forward due to DST. Valid valuse are:
+ where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
| Fixes a typo in the documentation for `tz_localize`. | https://api.github.com/repos/pandas-dev/pandas/pulls/25598 | 2019-03-07T19:53:04Z | 2019-03-07T20:40:40Z | 2019-03-07T20:40:39Z | 2019-03-07T20:51:16Z |
STY: use pytest.raises context manager (io.sql) | diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 9d0bce3b342b4..de17f02b693de 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -28,7 +28,7 @@
import pytest
import pandas.compat as compat
-from pandas.compat import PY36, lrange, range, string_types
+from pandas.compat import PY2, PY36, lrange, range, string_types
from pandas.core.dtypes.common import (
is_datetime64_dtype, is_datetime64tz_dtype)
@@ -400,8 +400,10 @@ def _to_sql_fail(self):
self.test_frame1, 'test_frame1', if_exists='fail')
assert self.pandasSQL.has_table('test_frame1')
- pytest.raises(ValueError, self.pandasSQL.to_sql,
- self.test_frame1, 'test_frame1', if_exists='fail')
+ msg = "Table 'test_frame1' already exists"
+ with pytest.raises(ValueError, match=msg):
+ self.pandasSQL.to_sql(
+ self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
@@ -563,8 +565,10 @@ def test_to_sql_fail(self):
self.conn, if_exists='fail')
assert sql.has_table('test_frame2', self.conn)
- pytest.raises(ValueError, sql.to_sql, self.test_frame1,
- 'test_frame2', self.conn, if_exists='fail')
+ msg = "Table 'test_frame2' already exists"
+ with pytest.raises(ValueError, match=msg):
+ sql.to_sql(self.test_frame1, 'test_frame2',
+ self.conn, if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
@@ -699,10 +703,11 @@ def test_timedelta(self):
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
- def test_complex(self):
+ def test_complex_raises(self):
df = DataFrame({'a': [1 + 1j, 2j]})
- # Complex data type should raise error
- pytest.raises(ValueError, df.to_sql, 'test_complex', self.conn)
+ msg = "Complex datatypes not supported"
+ with pytest.raises(ValueError, match=msg):
+ df.to_sql('test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
@@ -774,10 +779,11 @@ def test_to_sql_index_label_multiindex(self):
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['C', 'D']
- # wrong length of index_label
- pytest.raises(ValueError, sql.to_sql, temp_frame,
- 'test_index_label', self.conn, if_exists='replace',
- index_label='C')
+ msg = ("Length of 'index_label' should match number of levels, which"
+ " is 2")
+ with pytest.raises(ValueError, match=msg):
+ sql.to_sql(temp_frame, 'test_index_label', self.conn,
+ if_exists='replace', index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
@@ -882,6 +888,8 @@ def test_escaped_table_name(self):
@pytest.mark.single
+@pytest.mark.skipif(
+ not SQLALCHEMY_INSTALLED, reason='SQLAlchemy not installed')
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
@@ -894,10 +902,7 @@ class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
mode = 'sqlalchemy'
def connect(self):
- if SQLALCHEMY_INSTALLED:
- return sqlalchemy.create_engine('sqlite:///:memory:')
- else:
- pytest.skip('SQLAlchemy not installed')
+ return sqlalchemy.create_engine('sqlite:///:memory:')
def test_read_table_columns(self):
# test columns argument in read_table
@@ -1107,20 +1112,21 @@ def test_sql_open_close(self):
tm.assert_frame_equal(self.test_frame3, result)
+ @pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason='SQLAlchemy is installed')
def test_con_string_import_error(self):
- if not SQLALCHEMY_INSTALLED:
- conn = 'mysql://root@localhost/pandas_nosetest'
- pytest.raises(ImportError, sql.read_sql, "SELECT * FROM iris",
- conn)
- else:
- pytest.skip('SQLAlchemy is installed')
+ conn = 'mysql://root@localhost/pandas_nosetest'
+ msg = "Using URI string without sqlalchemy installed"
+ with pytest.raises(ImportError, match=msg):
+ sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
- pytest.raises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
+ msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
+ with pytest.raises(sql.DatabaseError, match=msg):
+ sql.read_sql('iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
@@ -1276,9 +1282,10 @@ def test_read_table_columns(self):
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
- def test_read_table_absent(self):
- pytest.raises(
- ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
+ def test_read_table_absent_raises(self):
+ msg = "Table this_doesnt_exist not found"
+ with pytest.raises(ValueError, match=msg):
+ sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
@@ -1617,8 +1624,9 @@ def test_dtype(self):
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.TEXT)
- pytest.raises(ValueError, df.to_sql,
- 'error', self.conn, dtype={'B': str})
+ msg = "The type of B is not a SQLAlchemy type"
+ with pytest.raises(ValueError, match=msg):
+ df.to_sql('error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
@@ -1903,8 +1911,9 @@ def test_schema_support(self):
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
- pytest.raises(ValueError, sql.read_sql_table, 'test_schema_other',
- self.conn, schema='public')
+ msg = "Table test_schema_other not found"
+ with pytest.raises(ValueError, match=msg):
+ sql.read_sql_table('test_schema_other', self.conn, schema='public')
# different if_exists options
@@ -2120,6 +2129,7 @@ def _get_sqlite_column_type(self, table, column):
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
@@ -2136,8 +2146,9 @@ def test_dtype(self):
assert self._get_sqlite_column_type(
'dtype_test2', 'B') == 'STRING'
- pytest.raises(ValueError, df.to_sql,
- 'error', self.conn, dtype={'B': bool})
+ msg = r"B \(<class 'bool'>\) not a string"
+ with pytest.raises(ValueError, match=msg):
+ df.to_sql('error', self.conn, dtype={'B': bool})
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype='STRING')
@@ -2169,8 +2180,9 @@ def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
- # Raise error on blank
- pytest.raises(ValueError, df.to_sql, "", self.conn)
+ msg = "Empty table or column name specified"
+ with pytest.raises(ValueError, match=msg):
+ df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
['test_weird_name]', 'test_weird_name[',
@@ -2399,25 +2411,19 @@ def clean_up(test_table_to_drop):
"""
self.drop_table(test_table_to_drop)
- # test if invalid value for if_exists raises appropriate error
- pytest.raises(ValueError,
- sql.to_sql,
- frame=df_if_exists_1,
- con=self.conn,
- name=table_name,
- if_exists='notvalidvalue')
+ msg = "'notvalidvalue' is not valid for if_exists"
+ with pytest.raises(ValueError, match=msg):
+ sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
+ if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn,
name=table_name, if_exists='fail')
- pytest.raises(ValueError,
- sql.to_sql,
- frame=df_if_exists_1,
- con=self.conn,
- name=table_name,
- if_exists='fail')
-
+ msg = "Table 'table_if_exists' already exists"
+ with pytest.raises(ValueError, match=msg):
+ sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
+ if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
@@ -2663,23 +2669,17 @@ def clean_up(test_table_to_drop):
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
- pytest.raises(ValueError,
- sql.to_sql,
- frame=df_if_exists_1,
- con=self.conn,
- name=table_name,
- if_exists='notvalidvalue')
+ with pytest.raises(ValueError, match="<insert message here>"):
+ sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
+ if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
- pytest.raises(ValueError,
- sql.to_sql,
- frame=df_if_exists_1,
- con=self.conn,
- name=table_name,
- if_exists='fail')
+ with pytest.raises(ValueError, match="<insert message here>"):
+ sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
+ if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
| xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25597 | 2019-03-07T19:16:49Z | 2019-03-09T16:34:40Z | 2019-03-09T16:34:40Z | 2019-03-10T21:28:06Z |
BUG: support EAs in nargsort without warning | diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ef69939d6e978..a5d3afd38adb2 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -8,7 +8,9 @@
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
- ensure_int64, ensure_platform_int, is_categorical_dtype, is_list_like)
+ ensure_int64, ensure_platform_int, is_categorical_dtype, is_list_like,
+ is_extension_array_dtype)
+from pandas.core.dtypes.generic import ABCIndexClass
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
@@ -239,7 +241,9 @@ def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
# specially handle Categorical
- if is_categorical_dtype(items):
+ if is_extension_array_dtype(items):
+ if isinstance(items, ABCIndexClass):
+ items = items._values
if na_position not in {'first', 'last'}:
raise ValueError('invalid na_position: {!r}'.format(na_position))
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 7528566e8326e..0fbc44ec27a4c 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -9,7 +9,9 @@
from pandas.compat import PY2
-from pandas import DataFrame, MultiIndex, Series, compat, concat, merge
+from pandas import (
+ Categorical, DataFrame, MultiIndex, Series, compat,
+ concat, merge, to_datetime)
from pandas.core import common as com
from pandas.core.sorting import (
decons_group_index, get_group_index, is_int64_overflow_possible,
@@ -183,6 +185,22 @@ def test_nargsort(self):
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
+ @pytest.mark.parametrize('data', [
+ Categorical(['a', 'c', 'a', 'b']),
+ to_datetime([0, 2, 0, 1]).tz_localize('Europe/Brussels')])
+ def test_nargsort_extension_array(self, data):
+ result = nargsort(data)
+ expected = np.array([0, 2, 3, 1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_nargsort_datetimearray_warning(self, recwarn):
+ # https://github.com/pandas-dev/pandas/issues/25439
+ # can be removed once the FutureWarning for np.array(DTA) is removed
+ data = to_datetime([0, 2, 0, 1]).tz_localize('Europe/Brussels')
+ nargsort(data)
+ msg = "Converting timezone-aware DatetimeArray to timezone-naive"
+ assert len([w for w in recwarn.list if msg in str(w.message)]) == 0
+
class TestMerge(object):
| Closes https://github.com/pandas-dev/pandas/issues/25439
| https://api.github.com/repos/pandas-dev/pandas/pulls/25595 | 2019-03-07T16:37:43Z | 2019-06-08T20:25:08Z | null | 2019-06-08T20:25:09Z |
BUG: fix usage of na_sentinel with sort=True in factorize() | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index ee9419c79e265..1b67335e40619 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -32,6 +32,7 @@ Fixed Regressions
- Fixed regression in creating a period-dtype array from a read-only NumPy array of period objects. (:issue:`25403`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
- Fixed pip installing from source into an environment without NumPy (:issue:`25193`)
+- Fixed regression in :func:`factorize` when passing a custom ``na_sentinel`` value with ``sort=True`` (:issue:`25409`).
- Fixed regression in :meth:`DataFrame.to_csv` writing duplicate line endings with gzip compress (:issue:`25311`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4a71951e2435e..5ed2e3efe26a1 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -619,13 +619,19 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
- try:
- order = uniques.argsort()
- order2 = order.argsort()
- labels = take_1d(order2, labels, fill_value=na_sentinel)
- uniques = uniques.take(order)
- except TypeError:
- # Mixed types, where uniques.argsort fails.
+ if na_sentinel == -1:
+ # GH-25409 take_1d only works for na_sentinels of -1
+ try:
+ order = uniques.argsort()
+ order2 = order.argsort()
+ labels = take_1d(order2, labels, fill_value=na_sentinel)
+ uniques = uniques.take(order)
+ except TypeError:
+ # Mixed types, where uniques.argsort fails.
+ uniques, labels = safe_sort(uniques, labels,
+ na_sentinel=na_sentinel,
+ assume_unique=True)
+ else:
uniques, labels = safe_sort(uniques, labels,
na_sentinel=na_sentinel,
assume_unique=True)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 3f75c508d22f9..083307371b699 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -326,6 +326,21 @@ def test_parametrized_factorize_na_value(self, data, na_value):
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
+ @pytest.mark.parametrize('sort', [True, False])
+ @pytest.mark.parametrize('na_sentinel', [-1, -10, 100])
+ def test_factorize_na_sentinel(self, sort, na_sentinel):
+ data = np.array(['b', 'a', None, 'b'], dtype=object)
+ labels, uniques = algos.factorize(data, sort=sort,
+ na_sentinel=na_sentinel)
+ if sort:
+ expected_labels = np.array([1, 0, na_sentinel, 1], dtype=np.intp)
+ expected_uniques = np.array(['a', 'b'], dtype=object)
+ else:
+ expected_labels = np.array([0, 1, na_sentinel, 0], dtype=np.intp)
+ expected_uniques = np.array(['b', 'a'], dtype=object)
+ tm.assert_numpy_array_equal(labels, expected_labels)
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
+
class TestUnique(object):
| Closes https://github.com/pandas-dev/pandas/issues/25409
| https://api.github.com/repos/pandas-dev/pandas/pulls/25592 | 2019-03-07T14:59:36Z | 2019-03-12T20:25:26Z | 2019-03-12T20:25:26Z | 2019-03-12T20:39:28Z |
Backport PR #25556 on branch 0.24.x (BUG: Handle readonly arrays in period_array) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f864fcd04e3d4..283f12361841c 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -29,6 +29,7 @@ Fixed Regressions
- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the ``Series`` on the right contains null values (:issue:`25317`)
- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
+- Fixed regression in creating a period-dtype array from a read-only NumPy array of period objects. (:issue:`25403`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 2f4edb7de8f95..02fae1d09fd82 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1437,7 +1437,9 @@ cdef accessor _get_accessor_func(int code):
@cython.wraparound(False)
@cython.boundscheck(False)
-def extract_ordinals(object[:] values, freq):
+def extract_ordinals(ndarray[object] values, freq):
+ # TODO: Change type to const object[:] when Cython supports that.
+
cdef:
Py_ssize_t i, n = len(values)
int64_t[:] ordinals = np.empty(n, dtype=np.int64)
@@ -1471,7 +1473,9 @@ def extract_ordinals(object[:] values, freq):
return ordinals.base # .base to access underlying np.ndarray
-def extract_freq(object[:] values):
+def extract_freq(ndarray[object] values):
+ # TODO: Change type to const object[:] when Cython supports that.
+
cdef:
Py_ssize_t i, n = len(values)
object p
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index affe3b3854490..99255d819d28e 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -41,6 +41,22 @@ def test_period_array_ok(data, freq, expected):
tm.assert_numpy_array_equal(result, expected)
+def test_period_array_readonly_object():
+ # https://github.com/pandas-dev/pandas/issues/25403
+ pa = period_array([pd.Period('2019-01-01')])
+ arr = np.asarray(pa, dtype='object')
+ arr.setflags(write=False)
+
+ result = period_array(arr)
+ tm.assert_period_array_equal(result, pa)
+
+ result = pd.Series(arr)
+ tm.assert_series_equal(result, pd.Series(pa))
+
+ result = pd.DataFrame({"A": arr})
+ tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
+
+
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
| Backport PR #25556: BUG: Handle readonly arrays in period_array | https://api.github.com/repos/pandas-dev/pandas/pulls/25591 | 2019-03-07T14:25:36Z | 2019-03-07T15:36:01Z | 2019-03-07T15:36:01Z | 2019-03-07T15:36:01Z |
BUG: Cast ExtensionArray to numpy ndarray before plot | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index d186fdfe0f322..ddc5e543c6165 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -225,6 +225,7 @@ I/O
Plotting
^^^^^^^^
+- Fixed bug where :class:`api.extensions.ExtensionArray` could not be used in matplotlib plotting (:issue:`25587`)
-
-
-
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index b9ec4d58db739..68d5f30a399ac 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -365,6 +365,12 @@ def _compute_plot_data(self):
if is_empty:
raise TypeError('no numeric data to plot')
+ # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
+ # np.ndarray before plot.
+ numeric_data = numeric_data.copy()
+ for col in numeric_data:
+ numeric_data[col] = np.asarray(numeric_data[col])
+
self.data = numeric_data
def _make_plot(self):
@@ -1794,7 +1800,6 @@ def _plot(data, x=None, y=None, subplots=False,
)
label_name = label_kw or data.columns
data.columns = label_name
-
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 28806bb67c896..7346a3b09aecf 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -18,6 +18,7 @@
import pandas as pd
from pandas import (
DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range)
+from pandas.core.arrays import integer_array
from pandas.tests.plotting.common import (
TestPlotBase, _check_plot_works, _ok_for_gaussian_kde,
_skip_if_no_scipy_gaussian_kde)
@@ -144,8 +145,26 @@ def test_plot(self):
result = ax.axes
assert result is axes[0]
- # GH 15516
+ def test_integer_array_plot(self):
+ # GH 25587
+ arr = integer_array([1, 2, 3, 4], dtype="UInt32")
+
+ s = Series(arr)
+ _check_plot_works(s.plot.line)
+ _check_plot_works(s.plot.bar)
+ _check_plot_works(s.plot.hist)
+ _check_plot_works(s.plot.pie)
+
+ df = DataFrame({'x': arr, 'y': arr})
+ _check_plot_works(df.plot.line)
+ _check_plot_works(df.plot.bar)
+ _check_plot_works(df.plot.hist)
+ _check_plot_works(df.plot.pie, y='y')
+ _check_plot_works(df.plot.scatter, x='x', y='y')
+ _check_plot_works(df.plot.hexbin, x='x', y='y')
+
def test_mpl2_color_cycle_str(self):
+ # GH 15516
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
| Cast `ExtensionArray`(eg. `IntegerArray`) to `np.ndarray` before send to matplotlib to plot, fixes #25587.
- [x] closes #25587
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25590 | 2019-03-07T13:59:34Z | 2019-03-15T11:06:06Z | 2019-03-15T11:06:06Z | 2019-03-15T11:11:46Z |
TST: add test for groupby on list of empty list | diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 6a11f0ae9b44a..c062fb90ca43b 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1714,3 +1714,12 @@ def test_groupby_multiindex_nat():
result = ser.groupby(level=1).mean()
expected = pd.Series([3., 2.5], index=["a", "b"])
assert_series_equal(result, expected)
+
+
+def test_groupby_empty_list_raises():
+ # GH 5289
+ values = zip(range(10), range(10))
+ df = DataFrame(values, columns=['apple', 'b'])
+ msg = "Grouper and axis must be same length"
+ with pytest.raises(ValueError, match=msg):
+ df.groupby([[]])
| - [x] closes #5289
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ n/a] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25589 | 2019-03-07T12:55:21Z | 2019-03-07T21:59:40Z | 2019-03-07T21:59:40Z | 2019-03-08T08:25:39Z |
BUG: Fix _binop for operators for serials which has more than one returns (divmod/rdivmod). | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 73eb6a15a1b47..11beb1f7bb12e 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -176,6 +176,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
Categorical
^^^^^^^^^^^
@@ -211,6 +212,7 @@ Numeric
- Bug in :meth:`to_numeric` in which large negative numbers were being improperly handled (:issue:`24910`)
- Bug in :meth:`to_numeric` in which numbers were being coerced to float, even though ``errors`` was not ``coerce`` (:issue:`24910`)
- Bug in error messages in :meth:`DataFrame.corr` and :meth:`Series.corr`. Added the possibility of using a callable. (:issue:`25729`)
+- Bug in :meth:`Series.divmod` and :meth:`Series.rdivmod` which would raise an (incorrect) ``ValueError`` rather than return a pair of :class:`Series` objects as result (:issue:`25557`)
-
-
-
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 4d88ce6836ca4..bd45003c154a4 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1660,7 +1660,7 @@ def _construct_result(left, result, index, name, dtype=None):
not be enough; we still need to override the name attribute.
"""
out = left._constructor(result, index=index, dtype=dtype)
-
+ out = out.__finalize__(left)
out.name = name
return out
@@ -1668,10 +1668,11 @@ def _construct_result(left, result, index, name, dtype=None):
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
- constructor = left._constructor
return (
- constructor(result[0], index=index, name=name, dtype=dtype),
- constructor(result[1], index=index, name=name, dtype=dtype),
+ _construct_result(left, result[0], index=index, name=name,
+ dtype=dtype),
+ _construct_result(left, result[1], index=index, name=name,
+ dtype=dtype),
)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5eb3376621ab0..d4840eae8601b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2527,6 +2527,7 @@ def _binop(self, other, func, level=None, fill_value=None):
-------
Series
"""
+
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
@@ -2543,13 +2544,13 @@ def _binop(self, other, func, level=None, fill_value=None):
with np.errstate(all='ignore'):
result = func(this_vals, other_vals)
+
name = ops.get_op_result_name(self, other)
- result = self._constructor(result, index=new_index, name=name)
- result = result.__finalize__(self)
- if name is None:
- # When name is None, __finalize__ overwrites current name
- result.name = None
- return result
+ if func.__name__ in ['divmod', 'rdivmod']:
+ ret = ops._construct_divmod_result(self, result, new_index, name)
+ else:
+ ret = ops._construct_result(self, result, new_index, name)
+ return ret
def combine(self, other, func, fill_value=None):
"""
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index b2aac441db195..2f96fe906d980 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -741,6 +741,21 @@ def test_op_duplicate_index(self):
expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
assert_series_equal(result, expected)
+ def test_divmod(self):
+ # GH25557
+ a = Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+ b = Series([2, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+
+ result = a.divmod(b)
+ expected = divmod(a, b)
+ assert_series_equal(result[0], expected[0])
+ assert_series_equal(result[1], expected[1])
+
+ result = a.rdivmod(b)
+ expected = divmod(b, a)
+ assert_series_equal(result[0], expected[0])
+ assert_series_equal(result[1], expected[1])
+
class TestSeriesUnaryOps(object):
# __neg__, __pos__, __inv__
| When for binary operator returns more than one `Series` as result, instantiate `Series` for every element in the result tuple.
- [x] closes #25557
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25588 | 2019-03-07T11:49:30Z | 2019-03-20T12:28:45Z | 2019-03-20T12:28:44Z | 2019-03-20T12:51:11Z |
BUG: secondary y axis could not be set to log scale (#25545) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f864fcd04e3d4..9b5410a30dd50 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -90,7 +90,7 @@ Bug Fixes
**Visualization**
--
+- Bug in :meth:`Series.plot` where a secondary y axis could not be set to log scale (:issue:`25545`)
-
-
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 48d870bfc2e03..0ea92a57ac3f8 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -287,6 +287,9 @@ def _maybe_right_yaxis(self, ax, axes_num):
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
+
+ if self.logy or self.loglog:
+ new_ax.set_yscale('log')
return new_ax
def _setup_subplots(self):
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 07a4b168a66f1..a234ea8f9416b 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -570,6 +570,18 @@ def test_df_series_secondary_legend(self):
assert ax.get_yaxis().get_visible()
tm.close()
+ @pytest.mark.slow
+ def test_secondary_logy(self):
+ # GH 25545
+ s1 = Series(np.random.randn(30))
+ s2 = Series(np.random.randn(30))
+
+ ax1 = s1.plot(logy=True)
+ ax2 = s2.plot(secondary_y=True, logy=True)
+
+ assert ax1.get_yscale() == 'log'
+ assert ax2.get_yscale() == 'log'
+
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
| - [x] closes #25545
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25586 | 2019-03-07T09:44:11Z | 2019-03-07T20:42:30Z | 2019-03-07T20:42:29Z | 2019-03-08T11:21:39Z |
BUG: Preserve name in DatetimeIndex.snap | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ea08a0a6fe07b..82bfe526cf161 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -86,7 +86,7 @@ Other API Changes
- :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`)
- ``Timestamp`` and ``Timedelta`` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`)
- :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`)
--
+- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`)
.. _whatsnew_0250.deprecations:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index b8d052ce7be04..b65e59a3d58b7 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -787,8 +787,8 @@ def snap(self, freq='S'):
snapped[i] = s
# we know it conforms; skip check
- return DatetimeIndex._simple_new(snapped, freq=freq)
- # TODO: what about self.name? tz? if so, use shallow_copy?
+ return DatetimeIndex._simple_new(snapped, name=self.name, tz=self.tz,
+ freq=freq)
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 0efc9feb0dbd4..8e4c7d9b17efc 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -52,20 +52,28 @@ def test_fancy_setitem():
assert (s[48:54] == -3).all()
-def test_dti_snap():
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+@pytest.mark.parametrize('tz', [None, 'Asia/Shanghai', 'Europe/Berlin'])
+@pytest.mark.parametrize('name', [None, 'my_dti'])
+def test_dti_snap(name, tz):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
- '1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
+ '1/5/2002', '1/6/2002', '1/7/2002'],
+ name=name, tz=tz, freq='D')
- res = dti.snap(freq='W-MON')
- exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
- exp = exp.repeat([3, 4])
- assert (res == exp).all()
+ result = dti.snap(freq='W-MON')
+ expected = date_range('12/31/2001', '1/7/2002',
+ name=name, tz=tz, freq='w-mon')
+ expected = expected.repeat([3, 4])
+ tm.assert_index_equal(result, expected)
+ assert result.tz == expected.tz
- res = dti.snap(freq='B')
+ result = dti.snap(freq='B')
- exp = date_range('1/1/2002', '1/7/2002', freq='b')
- exp = exp.repeat([1, 1, 1, 2, 2])
- assert (res == exp).all()
+ expected = date_range('1/1/2002', '1/7/2002',
+ name=name, tz=tz, freq='b')
+ expected = expected.repeat([1, 1, 1, 2, 2])
+ tm.assert_index_equal(result, expected)
+ assert result.tz == expected.tz
def test_dti_reset_index_round_trip():
| Use self.name to construct the result of `DatetimeIndex.snap`, fix #25575.
- [x] closes #25575
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25585 | 2019-03-07T08:51:33Z | 2019-03-13T15:28:32Z | 2019-03-13T15:28:32Z | 2019-03-14T02:26:58Z |
BUG: Redefine IndexOpsMixin.size, fix #25580. | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 4ca9d57f3a2e5..7a983db754fa9 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -100,7 +100,8 @@ Bug Fixes
- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`)
- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`)
- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`)
--
+- Bug in :attr:`Series.size` raising for some extension-array-backed ``Series``, rather than returning the size (:issue:`25580`)
+- Bug in resampling raising for nullable integer-dtype columns (:issue:`25580`)
.. _whatsnew_0242.contributors:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f896596dd5216..8a57904aecce4 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -762,7 +762,7 @@ def size(self):
"""
Return the number of elements in the underlying data.
"""
- return self._values.size
+ return len(self._values)
@property
def flags(self):
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index ce675893d9907..ec05595536de4 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -101,6 +101,18 @@ def test_resample_basic(series, closed, expected):
assert_series_equal(result, expected)
+def test_resample_integerarray():
+ # GH 25580, resample on IntegerArray
+ ts = pd.Series(range(9),
+ index=pd.date_range('1/1/2000', periods=9, freq='T'),
+ dtype='Int64')
+ result = ts.resample('3T').sum()
+ expected = Series([3, 12, 21],
+ index=pd.date_range('1/1/2000', periods=3, freq='3T'),
+ dtype="Int64")
+ assert_series_equal(result, expected)
+
+
def test_resample_basic_grouper(series):
s = series
result = s.resample('5Min').last()
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 1f2e2b179c687..3ad9d54175f31 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -493,6 +493,13 @@ def test_tab_complete_warning(self, ip):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('s.', 1))
+ def test_integer_series_size(self):
+ # GH 25580
+ s = Series(range(9))
+ assert s.size == 9
+ s = Series(range(9), dtype="Int64")
+ assert s.size == 9
+
class TestCategoricalSeries(object):
|
- [x] closes #25580
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25584 | 2019-03-07T07:55:45Z | 2019-03-11T16:09:27Z | 2019-03-11T16:09:27Z | 2019-03-12T04:55:11Z |
fix minor typos in dsintro.rst | diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index 94bec5c5bc83d..c8a2399739cd5 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -576,14 +576,14 @@ To write code compatible with all versions of Python, split the assignment in tw
.. warning::
- Dependent assignment maybe subtly change the behavior of your code between
+ Dependent assignment may subtly change the behavior of your code between
Python 3.6 and older versions of Python.
- If you wish write code that supports versions of python before and after 3.6,
+ If you wish to write code that supports versions of python before and after 3.6,
you'll need to take care when passing ``assign`` expressions that
- * Updating an existing column
- * Referring to the newly updated column in the same ``assign``
+ * Update an existing column
+ * Refer to the newly updated column in the same ``assign``
For example, we'll update column "A" and then refer to it when creating "B".
@@ -665,8 +665,8 @@ row-wise. For example:
df - df.iloc[0]
-In the special case of working with time series data, and the DataFrame index
-also contains dates, the broadcasting will be column-wise:
+In the special case of working with time series data, if the DataFrame index
+contains dates, the broadcasting will be column-wise:
.. ipython:: python
:okwarning:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25579 | 2019-03-06T23:15:45Z | 2019-03-07T02:22:20Z | 2019-03-07T02:22:20Z | 2019-03-07T02:22:27Z |
Backport PR #25568 on branch 0.24.x (BLD: Fixed pip install with no numpy) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f864fcd04e3d4..4ca9d57f3a2e5 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -30,6 +30,7 @@ Fixed Regressions
- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
+- Fixed pip installing from source into an environment without NumPy (:issue:`25193`)
.. _whatsnew_0242.enhancements:
diff --git a/setup.py b/setup.py
index 52a67f147121c..2a67b21414f63 100755
--- a/setup.py
+++ b/setup.py
@@ -472,6 +472,11 @@ def maybe_cythonize(extensions, *args, **kwargs):
# Avoid running cythonize on `python setup.py clean`
# See https://github.com/cython/cython/issues/1495
return extensions
+ if not cython:
+ # Avoid trying to look up numpy when installing from sdist
+ # https://github.com/pandas-dev/pandas/issues/25193
+ # TODO: See if this can be removed after pyproject.toml added.
+ return extensions
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
# TODO: Is this really necessary here?
@@ -480,11 +485,8 @@ def maybe_cythonize(extensions, *args, **kwargs):
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
- if cython:
- build_ext.render_templates(_pxifiles)
- return cythonize(extensions, *args, **kwargs)
- else:
- return extensions
+ build_ext.render_templates(_pxifiles)
+ return cythonize(extensions, *args, **kwargs)
def srcpath(name=None, suffix='.pyx', subdir='src'):
| Backport PR #25568: BLD: Fixed pip install with no numpy | https://api.github.com/repos/pandas-dev/pandas/pulls/25578 | 2019-03-06T22:40:01Z | 2019-03-07T14:27:21Z | 2019-03-07T14:27:21Z | 2019-03-07T14:27:21Z |
DOC: Generalize "rename" documentation | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eadffb779734f..78cc4769866bc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3913,62 +3913,13 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
('inplace', False),
('level', None),
('errors', 'ignore')])
+ @Substitution(**dict(_shared_doc_kwargs,
+ **{'altered': 'axis labels',
+ 'axes_alt_types': '',
+ 'alternative_use': ''}))
+ @Substitution(generic_documentation=NDFrame.rename.__doc__)
def rename(self, *args, **kwargs):
- """
- Alter axes labels.
-
- Function / dict values must be unique (1-to-1). Labels not contained in
- a dict / Series will be left as-is. Extra labels listed don't throw an
- error.
-
- See the :ref:`user guide <basics.rename>` for more.
-
- Parameters
- ----------
- mapper : dict-like or function
- Dict-like or functions transformations to apply to
- that axis' values. Use either ``mapper`` and ``axis`` to
- specify the axis to target with ``mapper``, or ``index`` and
- ``columns``.
- index : dict-like or function
- Alternative to specifying axis (``mapper, axis=0``
- is equivalent to ``index=mapper``).
- columns : dict-like or function
- Alternative to specifying axis (``mapper, axis=1``
- is equivalent to ``columns=mapper``).
- axis : int or str
- Axis to target with ``mapper``. Can be either the axis name
- ('index', 'columns') or number (0, 1). The default is 'index'.
- copy : bool, default True
- Also copy underlying data.
- inplace : bool, default False
- Whether to return a new DataFrame. If True then value of copy is
- ignored.
- level : int or level name, default None
- In case of a MultiIndex, only rename labels in the specified
- level.
- errors : {'ignore', 'raise'}, default 'ignore'
- If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
- or `columns` contains labels that are not present in the Index
- being transformed.
- If 'ignore', existing keys will be renamed and extra keys will be
- ignored.
-
- Returns
- -------
- DataFrame
- DataFrame with the renamed axis labels.
-
- Raises
- ------
- KeyError
- If any of the labels is not found in the selected axis and
- "errors='raise'".
-
- See Also
- --------
- DataFrame.rename_axis : Set the name of the axis.
-
+ """%(generic_documentation)s
Examples
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7915d98662c9e..c8e203dc62182 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -958,23 +958,24 @@ def swaplevel(self, i=-2, j=-1, axis=0):
# ----------------------------------------------------------------------
# Rename
-
def rename(self, *args, **kwargs):
"""
- Alter axes input function or functions. Function / dict values must be
- unique (1-to-1). Labels not contained in a dict / Series will be left
- as-is. Extra labels listed don't throw an error. Alternatively, change
- ``Series.name`` with a scalar value (Series only).
+ Alter %(altered)s.
+
+ Function / dict values must be unique (1-to-1). Labels not contained in
+ the %(altered)s will be left as-is. Extra labels listed do not throw an
+ error, unless "errors='raise'". %(alternative_use)s
+
+ See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
- %(axes)s : scalar, list-like, dict-like or function, optional
- Scalar or list-like will alter the ``Series.name`` attribute,
- and raise on DataFrame or Panel.
- dict-like or functions are transformations to apply to
- that axis' values
+ %(axes)s : dict-like, %(axes_alt_types)s or function, optional
+ Dict-like or function is the transformation to apply to one of
+ {%(axes)s}.%(alternative_use)s
+ %(optional_axis)s
copy : bool, default True
- Also copy underlying data.
+ Whether to copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
@@ -990,88 +991,20 @@ def rename(self, *args, **kwargs):
Returns
-------
- renamed : %(klass)s (new object)
+ %(klass)s
+ %(klass)s with %(altered)s altered.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
- "errors='raise'".
+ ``errors='raise'``.
See Also
--------
- NDFrame.rename_axis
-
- Examples
- --------
-
- >>> s = pd.Series([1, 2, 3])
- >>> s
- 0 1
- 1 2
- 2 3
- dtype: int64
- >>> s.rename("my_name") # scalar, changes Series.name
- 0 1
- 1 2
- 2 3
- Name: my_name, dtype: int64
- >>> s.rename(lambda x: x ** 2) # function, changes labels
- 0 1
- 1 2
- 4 3
- dtype: int64
- >>> s.rename({1: 3, 2: 5}) # mapping, changes labels
- 0 1
- 3 2
- 5 3
- dtype: int64
-
- Since ``DataFrame`` doesn't have a ``.name`` attribute,
- only mapping-type arguments are allowed.
-
- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
- >>> df.rename(2)
- Traceback (most recent call last):
- ...
- TypeError: 'int' object is not callable
-
- ``DataFrame.rename`` supports two calling conventions
-
- * ``(index=index_mapper, columns=columns_mapper, ...)``
- * ``(mapper, axis={'index', 'columns'}, ...)``
-
- We *highly* recommend using keyword arguments to clarify your
- intent.
-
- >>> df.rename(index=str, columns={"A": "a", "B": "c"})
- a c
- 0 1 4
- 1 2 5
- 2 3 6
-
- >>> df.rename(index=str, columns={"A": "a", "C": "c"})
- a B
- 0 1 4
- 1 2 5
- 2 3 6
-
- Using axis-style parameters
-
- >>> df.rename(str.lower, axis='columns')
- a b
- 0 1 4
- 1 2 5
- 2 3 6
-
- >>> df.rename({1: 2, 2: 4}, axis='index')
- A B
- 0 1 4
- 2 2 5
- 4 3 6
-
- See the :ref:`user guide <basics.rename>` for more.
+ %(klass)s.rename_axis: Set the name of the axis.
"""
+
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1555542079d80..76cb7c0329e8f 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1249,9 +1249,83 @@ def reindex(self, *args, **kwargs):
result = super(Panel, self).reindex(**kwargs)
return result
- @Substitution(**_shared_doc_kwargs)
- @Appender(NDFrame.rename.__doc__)
+ @Substitution(**dict(_shared_doc_kwargs,
+ **{'altered': 'axes input function or functions',
+ 'alternative_use': '',
+ 'axes_alt_types': ''}))
+ @Substitution(generic_documentation=NDFrame.rename.__doc__)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
+ """%(generic_documentation)s
+ Examples
+ --------
+
+ >>> s = pd.Series([1, 2, 3])
+ >>> s
+ 0 1
+ 1 2
+ 2 3
+ dtype: int64
+ >>> s.rename("my_name") # scalar, changes Series.name
+ 0 1
+ 1 2
+ 2 3
+ Name: my_name, dtype: int64
+ >>> s.rename(lambda x: x ** 2) # function, changes labels
+ 0 1
+ 1 2
+ 4 3
+ dtype: int64
+ >>> s.rename({1: 3, 2: 5}) # mapping, changes labels
+ 0 1
+ 3 2
+ 5 3
+ dtype: int64
+
+ Since ``DataFrame`` doesn't have a ``.name`` attribute,
+ only mapping-type arguments are allowed.
+
+ >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ >>> df.rename(2)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'int' object is not callable
+
+ ``DataFrame.rename`` supports two calling conventions
+
+ * ``(index=index_mapper, columns=columns_mapper, ...)``
+ * ``(mapper, axis={'index', 'columns'}, ...)``
+
+ We *highly* recommend using keyword arguments to clarify your
+ intent.
+
+ >>> df.rename(index=str, columns={"A": "a", "B": "c"})
+ a c
+ 0 1 4
+ 1 2 5
+ 2 3 6
+
+ >>> df.rename(index=str, columns={"A": "a", "C": "c"})
+ a B
+ 0 1 4
+ 1 2 5
+ 2 3 6
+
+ Using axis-style parameters
+
+ >>> df.rename(str.lower, axis='columns')
+ a b
+ 0 1 4
+ 1 2 5
+ 2 3 6
+
+ >>> df.rename({1: 2, 2: 4}, axis='index')
+ A B
+ 0 1 4
+ 2 2 5
+ 4 3 6
+
+ See the :ref:`user guide <basics.rename>` for more.
+ """
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 03fc26efa4516..c20653ec79b37 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3733,43 +3733,15 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
limit=limit, fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
+ @Substitution(**dict(_shared_doc_kwargs,
+ **{'altered': "Series index labels or name",
+ 'axes_alt_types': 'scalar, hashable sequence',
+ 'alternative_use': " Change Series.name with a "
+ "scalar value or hashable "
+ "sequence-like."}))
+ @Substitution(generic_documentation=generic.NDFrame.rename.__doc__)
def rename(self, index=None, **kwargs):
- """
- Alter Series index labels or name.
-
- Function / dict values must be unique (1-to-1). Labels not contained in
- a dict / Series will be left as-is. Extra labels listed don't throw an
- error.
-
- Alternatively, change ``Series.name`` with a scalar value.
-
- See the :ref:`user guide <basics.rename>` for more.
-
- Parameters
- ----------
- index : scalar, hashable sequence, dict-like or function, optional
- dict-like or functions are transformations to apply to
- the index.
- Scalar or hashable sequence-like will alter the ``Series.name``
- attribute.
- copy : bool, default True
- Whether to copy underlying data.
- inplace : bool, default False
- Whether to return a new Series. If True then value of copy is
- ignored.
- level : int or level name, default None
- In case of a MultiIndex, only rename labels in the specified
- level.
-
- Returns
- -------
- Series
- Series with index labels or name altered.
-
- See Also
- --------
- Series.rename_axis : Set the name of the axis.
-
+ """%(generic_documentation)s
Examples
--------
>>> s = pd.Series([1, 2, 3])
@@ -3778,7 +3750,7 @@ def rename(self, index=None, **kwargs):
1 2
2 3
dtype: int64
- >>> s.rename("my_name") # scalar, changes Series.name
+ >>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
| In this pull request, the documentation of the rename function of NDFrame is generalized over all its children using the function: DataFrame, Series and Panel. These three classes now use the Substitute decorator to add the generic part of the documentation to their own, using the same decorator to replace class specific parameters into the generic documentation.
I have tried my best to create as little changes to the documentation of each individual function. Although I have tried to get rid of all style errors, I do still receive some errors from the `validate_docstrings.py`:
```
Series
2 Errors found:
Parameters {**kwargs} not documented
Unknown parameters {errors, copy, inplace, level}
```
```
Panel
2 Errors found:
Parameters {minor_axis, items, **kwargs, major_axis} not documented
Unknown parameters {items, major_axis, minor_axis, copy, errors, inplace, level}
```
Help is appreciated! | https://api.github.com/repos/pandas-dev/pandas/pulls/25577 | 2019-03-06T21:17:57Z | 2019-05-12T21:27:09Z | null | 2019-05-12T21:27:09Z |
CLN: typing/mypy cleanup: Small fixes to make stubgen happy | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 3ddceb8c2839d..0ec1bc7a84231 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -710,7 +710,7 @@ def _raise_on_incompatible(left, right):
# Constructor Helpers
def period_array(data, freq=None, copy=False):
- # type: (Sequence[Optional[Period]], Optional[Tick]) -> PeriodArray
+ # type: (Sequence[Optional[Period]], Optional[Tick], bool) -> PeriodArray
"""
Construct a new PeriodArray from a sequence of Period scalars.
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 6114e578dc90f..9be2c9af169e8 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -397,6 +397,7 @@ def _get_fill(arr):
def _sparse_array_op(left, right, op, name):
+ # type: (SparseArray, SparseArray, Callable, str) -> Any
"""
Perform a binary operation between two arrays.
@@ -413,7 +414,6 @@ def _sparse_array_op(left, right, op, name):
-------
SparseArray
"""
- # type: (SparseArray, SparseArray, Callable, str) -> Any
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 926da40deaff2..36dcb692bb079 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1041,7 +1041,7 @@ def _bool_agg(self, val_test, skipna):
"""
def objs_to_bool(vals):
- # type: np.ndarray -> (np.ndarray, typing.Type)
+ # type: (np.ndarray) -> (np.ndarray, typing.Type)
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
@@ -1743,7 +1743,7 @@ def quantile(self, q=0.5, interpolation='linear'):
"""
def pre_processor(vals):
- # type: np.ndarray -> (np.ndarray, Optional[typing.Type])
+ # type: (np.ndarray) -> (np.ndarray, Optional[typing.Type])
if is_object_dtype(vals):
raise TypeError("'quantile' cannot be performed against "
"'object' dtypes!")
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 4e2c04dba8b04..ada663556899b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1827,13 +1827,13 @@ def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
placement=self.mgr_locs)
def shift(self, periods, axis=0, fill_value=None):
+ # type: (int, Optional[BlockPlacement], Any) -> List[ExtensionBlock]
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
- # type: (int, Optional[BlockPlacement]) -> List[ExtensionBlock]
return [
self.make_block_same_class(
self.values.shift(periods=periods, fill_value=fill_value),
| No code changes, just moved a few comments, and fixed a few broken types.
This patch allows stubgen to run over the pandas codebase without generating errors | https://api.github.com/repos/pandas-dev/pandas/pulls/25576 | 2019-03-06T21:08:31Z | 2019-03-08T21:52:27Z | 2019-03-08T21:52:27Z | 2019-03-09T04:05:28Z |
minor typo error | diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst
index 972b562cfebba..50c53a56174c8 100644
--- a/doc/source/getting_started/10min.rst
+++ b/doc/source/getting_started/10min.rst
@@ -103,7 +103,7 @@ Display the index, columns:
df.columns
:meth:`DataFrame.to_numpy` gives a NumPy representation of the underlying data.
-Note that his can be an expensive operation when your :class:`DataFrame` has
+Note that this can be an expensive operation when your :class:`DataFrame` has
columns with different data types, which comes down to a fundamental difference
between pandas and NumPy: **NumPy arrays have one dtype for the entire array,
while pandas DataFrames have one dtype per column**. When you call
| Changed from '**his** can be an expensive operation...' to '**this** can be an expensive operation...'
| https://api.github.com/repos/pandas-dev/pandas/pulls/25574 | 2019-03-06T19:15:20Z | 2019-03-06T21:26:47Z | 2019-03-06T21:26:47Z | 2019-03-06T21:26:52Z |
BUG: Using categorical dtype in read_json | diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 4bae067ee5196..0637dc303d0c0 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -9,7 +9,7 @@
from pandas.compat import StringIO, long, to_str, u
from pandas.errors import AbstractMethodError
-from pandas.core.dtypes.common import is_period_dtype
+from pandas.core.dtypes.common import is_period_dtype, is_categorical_dtype
from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime
from pandas.core.reshape.concat import concat
@@ -712,6 +712,10 @@ def _try_convert_data(self, name, data, use_dtypes=True,
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
+
+ if is_categorical_dtype(dtype):
+ return data.astype('category'), True
+
if dtype is not None:
try:
dtype = np.dtype(dtype)
| Pandas `read_json` ignores `category` dtype. This fix is need to using `category` in `read_json`.
Example: `pd.read_json(filename, lines=True, dtype={'ColumnName':'category'})`.
fyi #21892
| https://api.github.com/repos/pandas-dev/pandas/pulls/25573 | 2019-03-06T17:45:14Z | 2019-03-07T11:48:50Z | null | 2019-03-07T11:48:50Z |
REF: add custom Exception for safe_sort | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 80bf7214e7a27..7f684711b3042 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -616,7 +616,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
na_value=na_value)
if sort and len(uniques) > 0:
- from pandas.core.sorting import safe_sort
+ from pandas.core.sorting import safe_sort, SortError
if na_sentinel == -1:
# GH-25409 take_1d only works for na_sentinels of -1
try:
@@ -626,13 +626,19 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
uniques = uniques.take(order)
except TypeError:
# Mixed types, where uniques.argsort fails.
+ try:
+ uniques, labels = safe_sort(uniques, labels,
+ na_sentinel=na_sentinel,
+ assume_unique=True)
+ except SortError as e:
+ raise TypeError(e) from e
+ else:
+ try:
uniques, labels = safe_sort(uniques, labels,
na_sentinel=na_sentinel,
assume_unique=True)
- else:
- uniques, labels = safe_sort(uniques, labels,
- na_sentinel=na_sentinel,
- assume_unique=True)
+ except SortError as e:
+ raise TypeError(e) from e
uniques = _reconstruct_data(uniques, dtype, original)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a43e1b3007e2b..8409bae7d5e54 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -43,6 +43,7 @@
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name, make_invalid_op
import pandas.core.sorting as sorting
+from pandas.core.sorting import SortError
from pandas.core.strings import StringMethods
from pandas.io.formats.printing import (
@@ -2345,7 +2346,7 @@ def union(self, other, sort=None):
if sort is None:
try:
result = sorting.safe_sort(result)
- except TypeError as e:
+ except SortError as e:
warnings.warn("{}, sort order is undefined for "
"incomparable objects".format(e),
RuntimeWarning, stacklevel=3)
@@ -2432,7 +2433,10 @@ def intersection(self, other, sort=False):
taken = other.take(indexer)
if sort is None:
- taken = sorting.safe_sort(taken.values)
+ try:
+ taken = sorting.safe_sort(taken.values)
+ except sorting.SortError as e:
+ raise TypeError(e) from e
if self.name != other.name:
name = None
else:
@@ -2504,7 +2508,7 @@ def difference(self, other, sort=None):
if sort is None:
try:
the_diff = sorting.safe_sort(the_diff)
- except TypeError:
+ except SortError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
@@ -2580,7 +2584,7 @@ def symmetric_difference(self, other, result_name=None, sort=None):
if sort is None:
try:
the_diff = sorting.safe_sort(the_diff)
- except TypeError:
+ except SortError:
pass
attribs = self._get_attributes_dict()
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index cd5c853c6efe4..edbea5696277c 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1738,7 +1738,10 @@ def _sort_labels(uniques, left, right):
llength = len(left)
labels = np.concatenate([left, right])
- _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)
+ try:
+ _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)
+ except sorting.SortError as e:
+ raise TypeError(e) from e
new_labels = ensure_int64(new_labels)
new_left, new_right = new_labels[:llength], new_labels[llength:]
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index e1cf5b76ba05b..e8c0d6e75b7d4 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -17,6 +17,13 @@
_INT64_MAX = np.iinfo(np.int64).max
+class SortError(TypeError):
+ """
+ Error raised when problems arise during sorting due to problems
+ with input data. Subclass of `TypeError`.
+ """
+
+
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
@@ -437,8 +444,9 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
- nor list-like
- * If ``values`` cannot be sorted
+ nor list-like.
+ SortError
+ * If ``values`` cannot be sorted.
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
@@ -456,8 +464,11 @@ def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
- nums = np.sort(values[~str_pos])
- strs = np.sort(values[str_pos])
+ try:
+ nums = np.sort(values[~str_pos])
+ strs = np.sort(values[str_pos])
+ except TypeError as e:
+ raise SortError(e) from e
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index b64786de264cd..6f4f0f0fa28e4 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -24,6 +24,7 @@
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray
import pandas.core.common as com
+from pandas.core.sorting import SortError
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@@ -228,11 +229,9 @@ def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
- msg = ("unorderable types: .* [<>] .*"
- "|" # the above case happens for numpy < 1.14
- "'[<>]' not supported between instances of .*")
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(TypeError, match="complex") as excinfo:
algos.factorize(x17[::-1], sort=True)
+ assert type(excinfo.value.__cause__) == SortError
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 04a50cf6facd5..3adb8f3aba0eb 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -10,7 +10,7 @@
DataFrame, MultiIndex, Series, compat, concat, merge, to_datetime)
from pandas.core import common as com
from pandas.core.sorting import (
- decons_group_index, get_group_index, is_int64_overflow_possible,
+ SortError, decons_group_index, get_group_index, is_int64_overflow_possible,
lexsort_indexer, nargsort, safe_sort)
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -413,10 +413,8 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- msg = ("unorderable types: .* [<>] .*"
- "|" # the above case happens for numpy < 1.14
- "'[<>]' not supported between instances of .*")
- with pytest.raises(TypeError, match=msg):
+ msg = "int.*datetime|datetime.*int"
+ with pytest.raises(SortError, match=msg):
safe_sort(arr)
def test_exceptions(self):
| xref https://github.com/pandas-dev/pandas/pull/25537#issuecomment-469425015
custom `Exception` added primarily to allow more targeted testing, but also to distinguish sort failures and bad input to `safe_sort` | https://api.github.com/repos/pandas-dev/pandas/pulls/25569 | 2019-03-06T14:41:27Z | 2019-03-29T12:31:18Z | null | 2019-03-29T12:31:18Z |
BLD: Fixed pip install with no numpy | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f864fcd04e3d4..4ca9d57f3a2e5 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -30,6 +30,7 @@ Fixed Regressions
- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
+- Fixed pip installing from source into an environment without NumPy (:issue:`25193`)
.. _whatsnew_0242.enhancements:
diff --git a/setup.py b/setup.py
index c8d29a2e4be5a..a83e07b50ed57 100755
--- a/setup.py
+++ b/setup.py
@@ -477,6 +477,11 @@ def maybe_cythonize(extensions, *args, **kwargs):
# Avoid running cythonize on `python setup.py clean`
# See https://github.com/cython/cython/issues/1495
return extensions
+ if not cython:
+ # Avoid trying to look up numpy when installing from sdist
+ # https://github.com/pandas-dev/pandas/issues/25193
+ # TODO: See if this can be removed after pyproject.toml added.
+ return extensions
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
# TODO: Is this really necessary here?
@@ -485,11 +490,8 @@ def maybe_cythonize(extensions, *args, **kwargs):
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
- if cython:
- build_ext.render_templates(_pxifiles)
- return cythonize(extensions, *args, **kwargs)
- else:
- return extensions
+ build_ext.render_templates(_pxifiles)
+ return cythonize(extensions, *args, **kwargs)
def srcpath(name=None, suffix='.pyx', subdir='src'):
| Closes #25193
Still testing this locally and on https://github.com/pandas-dev/pandas-ci/pull/6 | https://api.github.com/repos/pandas-dev/pandas/pulls/25568 | 2019-03-06T14:26:47Z | 2019-03-06T22:39:50Z | 2019-03-06T22:39:50Z | 2019-09-12T19:23:00Z |
DEPR: remove Panel-specific parts of core.indexing | diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx
index 6e62978c8477f..308e914b7b5b7 100644
--- a/pandas/_libs/indexing.pyx
+++ b/pandas/_libs/indexing.pyx
@@ -17,4 +17,8 @@ cdef class _NDFrameIndexerBase:
ndim = self._ndim
if ndim is None:
ndim = self._ndim = self.obj.ndim
+ if ndim > 2:
+ msg = ("NDFrameIndexer does not support NDFrame objects with"
+ " ndim > 2")
+ raise ValueError(msg)
return ndim
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 93e56834b62f6..86158fa9ee529 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -11,7 +11,7 @@
from pandas.core.dtypes.common import (
ensure_platform_int, is_float, is_integer, is_integer_dtype, is_iterator,
is_list_like, is_numeric_dtype, is_scalar, is_sequence, is_sparse)
-from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import _infer_fill_value, isna
import pandas.core.common as com
@@ -450,10 +450,6 @@ def _setitem_with_indexer(self, indexer, value):
self.obj._maybe_update_cacher(clear=True)
return self.obj
- # set using setitem (Panel and > dims)
- elif self.ndim >= 3:
- return self.obj.__setitem__(indexer, value)
-
# set
item_labels = self.obj._get_axis(info_axis)
@@ -642,9 +638,6 @@ def can_do_equal_len():
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
- if isinstance(value, ABCPanel):
- value = self._align_panel(indexer, value)
-
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
@@ -690,7 +683,6 @@ def ravel(i):
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
- is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
@@ -702,11 +694,6 @@ def ravel(i):
if is_frame:
single_aligner = single_aligner and aligners[0]
- # panel
- elif is_panel:
- single_aligner = (single_aligner and
- (aligners[1] or aligners[2]))
-
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
@@ -738,7 +725,7 @@ def ravel(i):
return ser.reindex(new_ix)._values
# 2 dims
- elif single_aligner and is_frame:
+ elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
@@ -746,30 +733,6 @@ def ravel(i):
return ser._values.copy()
return ser.reindex(ax)._values
- # >2 dims
- elif single_aligner:
-
- broadcast = []
- for n, labels in enumerate(self.obj._get_plane_axes(i)):
-
- # reindex along the matching dimensions
- if len(labels & ser.index):
- ser = ser.reindex(labels)
- else:
- broadcast.append((n, len(labels)))
-
- # broadcast along other dims
- ser = ser._values.copy()
- for (axis, l) in broadcast:
- shape = [-1] * (len(broadcast) + 1)
- shape[axis] = l
- ser = np.tile(ser, l).reshape(shape)
-
- if self.obj.ndim == 3:
- ser = ser.T
-
- return ser
-
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
@@ -782,7 +745,6 @@ def ravel(i):
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
- is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
@@ -802,21 +764,6 @@ def _align_frame(self, indexer, df):
else:
sindexers.append(i)
- # panel
- if is_panel:
-
- # need to conform to the convention
- # as we are not selecting on the items axis
- # and we have a single indexer
- # GH 7763
- if len(sindexers) == 1 and sindexers[0] != 0:
- df = df.T
-
- if idx is None:
- idx = df.index
- if cols is None:
- cols = df.columns
-
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
@@ -843,24 +790,8 @@ def _align_frame(self, indexer, df):
val = df.reindex(index=ax)._values
return val
- elif is_scalar(indexer) and is_panel:
- idx = self.obj.axes[1]
- cols = self.obj.axes[2]
-
- # by definition we are indexing on the 0th axis
- # a passed in dataframe which is actually a transpose
- # of what is needed
- if idx.equals(df.index) and cols.equals(df.columns):
- return df.copy()._values
-
- return df.reindex(idx, columns=cols)._values
-
raise ValueError('Incompatible indexer with DataFrame')
- def _align_panel(self, indexer, df):
- raise NotImplementedError("cannot set using an indexer with a Panel "
- "yet!")
-
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
@@ -1059,13 +990,6 @@ def _getitem_nested_tuple(self, tup):
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
-
- # GH 7516
- # if had a 3 dim and are going to a 2d
- # axes are reversed on a DataFrame
- if i >= 1 and current_ndim == 3 and obj.ndim == 2:
- obj = obj.T
-
axis -= 1
return obj
@@ -1562,8 +1486,8 @@ class _LocIndexer(_LocationIndexer):
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
- - A ``callable`` function with one argument (the calling Series, DataFrame
- or Panel) and that returns valid output for indexing (one of the above)
+ - A ``callable`` function with one argument (the calling Series or
+ DataFrame) and that returns valid output for indexing (one of the above)
See more at :ref:`Selection by Label <indexing.label>`
@@ -1931,8 +1855,8 @@ class _iLocIndexer(_LocationIndexer):
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- - A ``callable`` function with one argument (the calling Series, DataFrame
- or Panel) and that returns valid output for indexing (one of the above).
+ - A ``callable`` function with one argument (the calling Series or
+ DataFrame) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 22f6855717e80..a0e3df182b129 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -11,11 +11,14 @@
import pandas as pd
from pandas import DataFrame, Index, NaT, Series
+from pandas.core.generic import NDFrame
from pandas.core.indexing import (
_maybe_numeric_slice, _non_reducing_slice, validate_indices)
from pandas.tests.indexing.common import Base, _mklbl
import pandas.util.testing as tm
+ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
+
# ------------------------------------------------------------------------
# Indexing test cases
@@ -53,6 +56,93 @@ def test_setitem_ndarray_1d(self):
with pytest.raises(ValueError):
df[2:5] = np.arange(1, 4) * 1j
+ @pytest.mark.parametrize('index', tm.all_index_generator(5),
+ ids=lambda x: type(x).__name__)
+ @pytest.mark.parametrize('obj', [
+ lambda i: Series(np.arange(len(i)), index=i),
+ lambda i: DataFrame(
+ np.random.randn(len(i), len(i)), index=i, columns=i)
+ ], ids=['Series', 'DataFrame'])
+ @pytest.mark.parametrize('idxr, idxr_id', [
+ (lambda x: x, 'getitem'),
+ (lambda x: x.loc, 'loc'),
+ (lambda x: x.iloc, 'iloc'),
+ pytest.param(lambda x: x.ix, 'ix', marks=ignore_ix)
+ ])
+ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
+ # GH 25567
+ obj = obj(index)
+ idxr = idxr(obj)
+ nd3 = np.random.randint(5, size=(2, 2, 2))
+
+ msg = (r"Buffer has wrong number of dimensions \(expected 1,"
+ r" got 3\)|"
+ "The truth value of an array with more than one element is"
+ " ambiguous|"
+ "Cannot index with multidimensional key|"
+ r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]|"
+ "unhashable type: 'numpy.ndarray'" # TypeError
+ )
+
+ if (isinstance(obj, Series) and idxr_id == 'getitem'
+ and index.inferred_type in [
+ 'string', 'datetime64', 'period', 'timedelta64',
+ 'boolean', 'categorical']):
+ idxr[nd3]
+ else:
+ if (isinstance(obj, DataFrame) and idxr_id == 'getitem'
+ and index.inferred_type == 'boolean'):
+ error = TypeError
+ else:
+ error = ValueError
+
+ with pytest.raises(error, match=msg):
+ idxr[nd3]
+
+ @pytest.mark.parametrize('index', tm.all_index_generator(5),
+ ids=lambda x: type(x).__name__)
+ @pytest.mark.parametrize('obj', [
+ lambda i: Series(np.arange(len(i)), index=i),
+ lambda i: DataFrame(
+ np.random.randn(len(i), len(i)), index=i, columns=i)
+ ], ids=['Series', 'DataFrame'])
+ @pytest.mark.parametrize('idxr, idxr_id', [
+ (lambda x: x, 'setitem'),
+ (lambda x: x.loc, 'loc'),
+ (lambda x: x.iloc, 'iloc'),
+ pytest.param(lambda x: x.ix, 'ix', marks=ignore_ix)
+ ])
+ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
+ # GH 25567
+ obj = obj(index)
+ idxr = idxr(obj)
+ nd3 = np.random.randint(5, size=(2, 2, 2))
+
+ msg = (r"Buffer has wrong number of dimensions \(expected 1,"
+ r" got 3\)|"
+ "The truth value of an array with more than one element is"
+ " ambiguous|"
+ "Only 1-dimensional input arrays are supported|"
+ "'pandas._libs.interval.IntervalTree' object has no attribute"
+ " 'set_value'|" # AttributeError
+ "unhashable type: 'numpy.ndarray'|" # TypeError
+ r"^\[\[\[" # pandas.core.indexing.IndexingError
+ )
+
+ if ((idxr_id == 'iloc')
+ or ((isinstance(obj, Series) and idxr_id == 'setitem'
+ and index.inferred_type in [
+ 'floating', 'string', 'datetime64', 'period', 'timedelta64',
+ 'boolean', 'categorical']))
+ or (idxr_id == 'ix' and index.inferred_type in [
+ 'string', 'datetime64', 'period', 'boolean'])):
+ idxr[nd3] = 0
+ else:
+ with pytest.raises(
+ (ValueError, AttributeError, TypeError,
+ pd.core.indexing.IndexingError), match=msg):
+ idxr[nd3] = 0
+
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
@@ -1015,3 +1105,26 @@ def test_extension_array_cross_section_converts():
result = df.iloc[0]
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('idxr, error, error_message', [
+ (lambda x: x,
+ AttributeError,
+ "'numpy.ndarray' object has no attribute 'get'"),
+ (lambda x: x.loc,
+ AttributeError,
+ "type object 'NDFrame' has no attribute '_AXIS_ALIASES'"),
+ (lambda x: x.iloc,
+ AttributeError,
+ "type object 'NDFrame' has no attribute '_AXIS_ALIASES'"),
+ pytest.param(
+ lambda x: x.ix,
+ ValueError,
+ "NDFrameIndexer does not support NDFrame objects with ndim > 2",
+ marks=ignore_ix)
+])
+def test_ndframe_indexing_raises(idxr, error, error_message):
+ # GH 25567
+ frame = NDFrame(np.random.randint(5, size=(2, 2, 2)))
+ with pytest.raises(error, match=error_message):
+ idxr(frame)[0]
| follow-on from #25550 | https://api.github.com/repos/pandas-dev/pandas/pulls/25567 | 2019-03-06T11:11:38Z | 2019-05-30T13:15:19Z | 2019-05-30T13:15:19Z | 2019-05-30T16:48:39Z |
Backport PR #25559 on branch 0.24.x (DOC: Small fixes to 0.24.2 whatsnew) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index e80b1060e867d..f864fcd04e3d4 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -24,13 +24,11 @@ Fixed Regressions
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
- Fixed regression in :meth:`DataFrame.replace` where ``regex=True`` was only replacing patterns matching the start of the string (:issue:`25259`)
-
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
-- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
-- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the `Series` on the right contains null values (:issue:`25317`)
-- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
+- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ``Categorical`` data (:issue:`25299`)
+- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the ``Series`` on the right contains null values (:issue:`25317`)
+- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
-
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
.. _whatsnew_0242.enhancements:
@@ -60,7 +58,7 @@ Bug Fixes
**I/O**
-- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`);
+- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`)
- Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`)
- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`)
- Bug where float indexes could have misaligned values when printing (:issue:`25061`)
@@ -86,7 +84,7 @@ Bug Fixes
**Reshaping**
-- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
+- Bug in :meth:`~pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
@@ -103,7 +101,7 @@ Bug Fixes
- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`)
-
-.. _whatsnew_0.242.contributors:
+.. _whatsnew_0242.contributors:
Contributors
~~~~~~~~~~~~
| Backport PR #25559: DOC: Small fixes to 0.24.2 whatsnew | https://api.github.com/repos/pandas-dev/pandas/pulls/25563 | 2019-03-06T09:13:38Z | 2019-03-06T09:16:57Z | 2019-03-06T09:16:57Z | 2019-03-06T09:50:44Z |
Backport PR #25558 on branch 0.24.x (TST: Skip IntervalTree construction overflow test on 32bit) | diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 46b2d12015a22..5d9ef2a9a6c32 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -172,6 +172,7 @@ def test_is_overlapping_trivial(self, closed, left, right):
tree = IntervalTree(left, right, closed=closed)
assert tree.is_overlapping is False
+ @pytest.mark.skipif(compat.is_platform_32bit(), reason='GH 23440')
def test_construction_overflow(self):
# GH 25485
left, right = np.arange(101), [np.iinfo(np.int64).max] * 101
| Backport PR #25558: TST: Skip IntervalTree construction overflow test on 32bit | https://api.github.com/repos/pandas-dev/pandas/pulls/25561 | 2019-03-06T04:17:34Z | 2019-03-06T09:15:41Z | 2019-03-06T09:15:41Z | 2019-03-06T09:15:41Z |
DOC: Small fixes to 0.24.2 whatsnew | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index e80b1060e867d..f864fcd04e3d4 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -24,13 +24,11 @@ Fixed Regressions
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
- Fixed regression in :meth:`DataFrame.replace` where ``regex=True`` was only replacing patterns matching the start of the string (:issue:`25259`)
-
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
-- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
-- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the `Series` on the right contains null values (:issue:`25317`)
-- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
+- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ``Categorical`` data (:issue:`25299`)
+- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the ``Series`` on the right contains null values (:issue:`25317`)
+- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
-
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
.. _whatsnew_0242.enhancements:
@@ -60,7 +58,7 @@ Bug Fixes
**I/O**
-- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`);
+- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`)
- Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`)
- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`)
- Bug where float indexes could have misaligned values when printing (:issue:`25061`)
@@ -86,7 +84,7 @@ Bug Fixes
**Reshaping**
-- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
+- Bug in :meth:`~pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
@@ -103,7 +101,7 @@ Bug Fixes
- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`)
-
-.. _whatsnew_0.242.contributors:
+.. _whatsnew_0242.contributors:
Contributors
~~~~~~~~~~~~
| A few small things I noticed reading over the whatsnew entries.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25559 | 2019-03-06T01:34:53Z | 2019-03-06T09:13:12Z | 2019-03-06T09:13:12Z | 2019-03-06T18:01:06Z |
TST: Skip IntervalTree construction overflow test on 32bit | diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 46b2d12015a22..5d9ef2a9a6c32 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -172,6 +172,7 @@ def test_is_overlapping_trivial(self, closed, left, right):
tree = IntervalTree(left, right, closed=closed)
assert tree.is_overlapping is False
+ @pytest.mark.skipif(compat.is_platform_32bit(), reason='GH 23440')
def test_construction_overflow(self):
# GH 25485
left, right = np.arange(101), [np.iinfo(np.int64).max] * 101
| - [X] xref https://github.com/pandas-dev/pandas/pull/25498#issuecomment-469672734
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Skipping this test as it's failing on 32bit due to #23440. Note that this is consistent with #23442 where a few other tests that are being skipped for the same reason.
cc @jreback | https://api.github.com/repos/pandas-dev/pandas/pulls/25558 | 2019-03-06T01:28:15Z | 2019-03-06T04:17:06Z | 2019-03-06T04:17:06Z | 2019-03-06T18:01:26Z |
BUG: Handle readonly arrays in period_array | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f864fcd04e3d4..283f12361841c 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -29,6 +29,7 @@ Fixed Regressions
- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the ``Series`` on the right contains null values (:issue:`25317`)
- Fixed regression in :class:`TimedeltaIndex` where ``np.sum(index)`` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
+- Fixed regression in creating a period-dtype array from a read-only NumPy array of period objects. (:issue:`25403`)
- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index a5a50ea59753d..c8eaa2cfd85c2 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1438,7 +1438,9 @@ cdef accessor _get_accessor_func(int code):
@cython.wraparound(False)
@cython.boundscheck(False)
-def extract_ordinals(object[:] values, freq):
+def extract_ordinals(ndarray[object] values, freq):
+ # TODO: Change type to const object[:] when Cython supports that.
+
cdef:
Py_ssize_t i, n = len(values)
int64_t[:] ordinals = np.empty(n, dtype=np.int64)
@@ -1472,7 +1474,9 @@ def extract_ordinals(object[:] values, freq):
return ordinals.base # .base to access underlying np.ndarray
-def extract_freq(object[:] values):
+def extract_freq(ndarray[object] values):
+ # TODO: Change type to const object[:] when Cython supports that.
+
cdef:
Py_ssize_t i, n = len(values)
object p
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index affe3b3854490..99255d819d28e 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -41,6 +41,22 @@ def test_period_array_ok(data, freq, expected):
tm.assert_numpy_array_equal(result, expected)
+def test_period_array_readonly_object():
+ # https://github.com/pandas-dev/pandas/issues/25403
+ pa = period_array([pd.Period('2019-01-01')])
+ arr = np.asarray(pa, dtype='object')
+ arr.setflags(write=False)
+
+ result = period_array(arr)
+ tm.assert_period_array_equal(result, pa)
+
+ result = pd.Series(arr)
+ tm.assert_series_equal(result, pd.Series(pa))
+
+ result = pd.DataFrame({"A": arr})
+ tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
+
+
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
| Closes #25403
~@jbrockmendel suggested changing the def from `object[:]` to `ndarray[object]`. This unfortunately had the same runtime error about readonly buffers. So as a workaround for now (until Cython has const memoryviews), how about unsetting and then `re-setting` the writability of the input array?~ | https://api.github.com/repos/pandas-dev/pandas/pulls/25556 | 2019-03-05T20:18:48Z | 2019-03-07T14:24:56Z | 2019-03-07T14:24:56Z | 2019-03-07T14:25:00Z |
DEPS: Bump numpy to 1.13.3 | diff --git a/.travis.yml b/.travis.yml
index f8302f4718ef2..529f1221899dc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -41,7 +41,7 @@ matrix:
- dist: trusty
env:
- - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36.yaml" PATTERN="((not slow and not network) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true
+ - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true
# In allow_failures
- dist: trusty
diff --git a/README.md b/README.md
index dcf39864e46e2..e8bfd28cc8208 100644
--- a/README.md
+++ b/README.md
@@ -164,7 +164,7 @@ pip install pandas
```
## Dependencies
-- [NumPy](https://www.numpy.org): 1.12.0 or higher
+- [NumPy](https://www.numpy.org): 1.13.3 or higher
- [python-dateutil](https://labix.org/python-dateutil): 2.5.0 or higher
- [pytz](https://pythonhosted.org/pytz): 2015.4 or higher
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index cd5879bf55e4b..1e3390a7f8d9b 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -8,7 +8,7 @@ jobs:
vmImage: ${{ parameters.vmImage }}
strategy:
matrix:
- py36_np14:
+ py36_np15:
ENV_FILE: ci/deps/azure-windows-36.yaml
CONDA_PY: "36"
diff --git a/ci/deps/azure-35-compat.yaml b/ci/deps/azure-35-compat.yaml
index adae9bc761a42..708c08239c7c0 100644
--- a/ci/deps/azure-35-compat.yaml
+++ b/ci/deps/azure-35-compat.yaml
@@ -3,23 +3,23 @@ channels:
- defaults
- conda-forge
dependencies:
- - beautifulsoup4==4.4.1
- - bottleneck=1.2.0
- - cython=0.28.2
- - hypothesis>=3.58.0
+ - beautifulsoup4=4.4.1
+ - bottleneck=1.2.1
- jinja2=2.8
- - numexpr=2.6.1
- - numpy=1.12.0
+ - numexpr=2.6.2
+ - numpy=1.13.3
- openpyxl=2.4.0
- pytables=3.4.2
- python-dateutil=2.5.0
- - python=3.5*
+ - python=3.5.*
- pytz=2015.4
- - scipy=0.18.1
+ - scipy=0.19.0
- xlrd=1.0.0
- xlsxwriter=0.7.7
- xlwt=1.0.0
# universal
+ - cython=0.28.2
+ - hypothesis>=3.58.0
- pytest-xdist
- pytest-mock
- isort
diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index c74d56443be5d..4209247cd6ce5 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -4,15 +4,15 @@ channels:
- conda-forge
dependencies:
- beautifulsoup4==4.5.1
- - bottleneck=1.2.0
+ - bottleneck=1.2.*
- cython=0.28.2
- lxml
- - matplotlib=2.0.0
- - numpy=1.12.0
+ - matplotlib=2.2.2
+ - numpy=1.14.*
- openpyxl=2.4.0
- python-dateutil
- python-blosc
- - python=3.6
+ - python=3.6.*
- pytz=2016.10
- scipy
- sqlalchemy=1.1.4
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 3f788e5ddcf39..fa2749017ec07 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -10,14 +10,14 @@ dependencies:
- ipython
- jinja2
- lxml
- - matplotlib
+ - matplotlib=3.0.*
- nomkl
- numexpr
- - numpy
+ - numpy=1.15.*
- openpyxl
- pytables
- python-dateutil
- - python=3.6*
+ - python=3.6.*
- pytz
- s3fs
- scipy
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 9d598cddce91a..71d69d0ab68c1 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -16,7 +16,7 @@ dependencies:
- openpyxl
- pytables
- python-dateutil
- - python=3.7*
+ - python=3.7.*
- pytz
- s3fs
- scipy
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index e58c1f599279c..d940463e2768c 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -2,7 +2,7 @@ name: pandas-dev
channels:
- defaults
dependencies:
- - python=3.7*
+ - python=3.7.*
- pytz
- Cython>=0.28.2
# universal
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml
index 38625c6563753..591266348a5f1 100644
--- a/ci/deps/azure-macos-35.yaml
+++ b/ci/deps/azure-macos-35.yaml
@@ -8,14 +8,14 @@ dependencies:
- html5lib
- jinja2
- lxml
- - matplotlib=2.2.0
+ - matplotlib=2.2.3
- nomkl
- numexpr
- - numpy=1.12.0
+ - numpy=1.13.3
- openpyxl
- pyarrow
- pytables
- - python=3.5*
+ - python=3.5.*
- pytz
- xarray
- xlrd
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 5ce55a4cb4c0e..7b3ae259fb8dd 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -7,9 +7,9 @@ dependencies:
- bottleneck
- boost-cpp<1.67
- fastparquet>=0.2.1
- - matplotlib
+ - matplotlib=3.0.2
- numexpr
- - numpy=1.14*
+ - numpy=1.15.*
- openpyxl
- parquet-cpp
- pyarrow
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml
index 96ddc1d6293d8..5384e794d442a 100644
--- a/ci/deps/azure-windows-37.yaml
+++ b/ci/deps/azure-windows-37.yaml
@@ -9,7 +9,7 @@ dependencies:
- html5lib
- jinja2
- lxml
- - matplotlib=3.0.1
+ - matplotlib=2.2.*
- numexpr
- numpy=1.14.*
- openpyxl
diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36-cov.yaml
similarity index 92%
rename from ci/deps/travis-36.yaml
rename to ci/deps/travis-36-cov.yaml
index 06fc0d76a3d16..95914568bea43 100644
--- a/ci/deps/travis-36.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -14,14 +14,15 @@ dependencies:
- matplotlib
- nomkl
- numexpr
- - numpy
+ - numpy=1.15.*
- openpyxl
+ - pandas-gbq
- psycopg2
- pyarrow=0.9.0
- pymysql
- pytables
- python-snappy
- - python=3.6.6
+ - python=3.6.*
- pytz
- s3fs
- scikit-learn
diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml
index 8015f7bdc81c6..9d6cbd82fdc05 100644
--- a/ci/deps/travis-36-doc.yaml
+++ b/ci/deps/travis-36-doc.yaml
@@ -29,7 +29,7 @@ dependencies:
- pytables
- python-dateutil
- python-snappy
- - python=3.6*
+ - python=3.6.*
- pytz
- scipy
- seaborn
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index fb0401958f05a..71022320da674 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -4,26 +4,33 @@ channels:
- conda-forge
dependencies:
- beautifulsoup4
+ - blosc=1.14.3
+ - python-blosc
- cython>=0.28.2
+ - fastparquet=0.2.1
+ - gcsfs=0.1.0
- html5lib
- ipython
- jinja2
- - lxml
- - matplotlib
+ - lxml=3.7.0
+ - matplotlib=3.0.0
- nomkl
- numexpr
- numpy
- openpyxl
- - psycopg2
+ - pandas-gbq=0.8.0
+ - psycopg2=2.6.2
- pymysql=0.7.9
- pytables
- python-dateutil
- - python=3.6*
+ # cannot go past python=3.6.6 for matplotlib=3.0.0 due to
+ # https://github.com/matplotlib/matplotlib/issues/12626
+ - python=3.6.6
- pytz
- - s3fs
+ - s3fs=0.0.8
- scipy
- - sqlalchemy
- - xarray
+ - sqlalchemy=1.1.4
+ - xarray=0.8.2
- xlrd
- xlsxwriter
- xlwt
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index 46875d59411d9..365c78c02f4d4 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -16,7 +16,7 @@ dependencies:
- pymysql
- pytables
- python-dateutil
- - python=3.6*
+ - python=3.6.*
- pytz
- s3fs
- scipy
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index f71d29fe13378..3ddd08e640806 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -4,7 +4,7 @@ channels:
- conda-forge
- c3i_test
dependencies:
- - python=3.7
+ - python=3.7.*
- botocore>=1.11
- cython>=0.28.2
- numpy
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 5df633e8dd984..b3b5945cc515e 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -224,7 +224,7 @@ Dependencies
------------
* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 24.2.0 or higher
-* `NumPy <http://www.numpy.org>`__: 1.12.0 or higher
+* `NumPy <http://www.numpy.org>`__: 1.13.3 or higher
* `python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher
* `pytz <http://pytz.sourceforge.net/>`__: 2015.4 or higher
@@ -235,11 +235,11 @@ Recommended Dependencies
* `numexpr <https://github.com/pydata/numexpr>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
- If installed, must be Version 2.6.1 or higher.
+ If installed, must be Version 2.6.2 or higher.
* `bottleneck <https://github.com/kwgoodman/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed,
- must be Version 1.2.0 or higher.
+ must be Version 1.2.1 or higher.
.. note::
@@ -254,27 +254,27 @@ Optional Dependencies
* `Cython <http://www.cython.org>`__: Only necessary to build development
version. Version 0.28.2 or higher.
-* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.18.1 or higher
-* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims. Version 0.7.0 or higher is recommended.
+* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.19.0 or higher
+* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims. Version 0.8.2 or higher is recommended.
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher
* `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.9.0): necessary for feather-based storage.
-* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.2.1) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support.
+* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.9.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.2.1) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 1.1.4 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are:
* `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL
* `pymysql <https://github.com/PyMySQL/PyMySQL>`__: for MySQL.
* `SQLite <https://docs.python.org/3/library/sqlite3.html>`__: for SQLite, this is included in Python's standard library by default.
-* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 2.0.0 or higher.
+* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 2.2.2 or higher.
* For Excel I/O:
* `xlrd/xlwt <http://www.python-excel.org/>`__: Excel reading (xlrd), version 1.0.0 or higher required, and writing (xlwt)
* `openpyxl <https://openpyxl.readthedocs.io/en/stable/>`__: openpyxl version 2.4.0
- for writing .xlsx files (xlrd >= 0.9.0)
+ for writing .xlsx files (xlrd >= 1.0.0)
* `XlsxWriter <https://pypi.org/project/XlsxWriter>`__: Alternative Excel writer
* `Jinja2 <http://jinja.pocoo.org/>`__: Template engine for conditional HTML formatting.
-* `s3fs <http://s3fs.readthedocs.io/>`__: necessary for Amazon S3 access (s3fs >= 0.0.7).
+* `s3fs <http://s3fs.readthedocs.io/>`__: necessary for Amazon S3 access (s3fs >= 0.0.8).
* `blosc <https://pypi.org/project/blosc>`__: for msgpack compression using ``blosc``
* `gcsfs <http://gcsfs.readthedocs.io/>`__: necessary for Google Cloud Storage access (gcsfs >= 0.1.0).
* One of
@@ -289,8 +289,6 @@ Optional Dependencies
<https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__:
for Google BigQuery I/O. (pandas-gbq >= 0.8.0)
-
-* `Backports.lzma <https://pypi.org/project/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library.
* One of the following combinations of libraries is needed to use the
top-level :func:`~pandas.read_html` function:
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 49b2349851479..c7554a70bea91 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -159,30 +159,54 @@ cause a ``SparseSeries`` or ``SparseDataFrame`` to be returned, as before.
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Due to dropping support for Python 2.7, a number of optional dependencies have updated minimum versions.
-Independently, some minimum supported versions of dependencies were updated (:issue:`23519`, :issue:`24942`).
+Due to dropping support for Python 2.7, a number of optional dependencies have updated minimum versions (issue:`25725`, :issue:`24942`, :issue:`25752`).
+Independently, some minimum supported versions of dependencies were updated (:issue:`23519`, :issue:`25554`).
If installed, we now require:
+-----------------+-----------------+----------+
| Package | Minimum Version | Required |
+=================+=================+==========+
-| beautifulsoup4 | 4.4.1 | |
+| numpy | 1.13.3 | X |
+-----------------+-----------------+----------+
-| openpyxl | 2.4.0 | |
+| pytz | 2015.4 | X |
+-----------------+-----------------+----------+
-| pymysql | 0.7.9 | |
+| bottleneck | 1.2.1 | |
+-----------------+-----------------+----------+
-| pytz | 2015.4 | |
-+-----------------+-----------------+----------+
-| sqlalchemy | 1.1.4 | |
-+-----------------+-----------------+----------+
-| xlsxwriter | 0.7.7 | |
-+-----------------+-----------------+----------+
-| xlwt | 1.0.0 | |
+| numexpr | 2.6.2 | |
+-----------------+-----------------+----------+
| pytest (dev) | 4.0.2 | |
+-----------------+-----------------+----------+
+For `optional libraries <https://pandas-docs.github.io/pandas-docs-travis/install.html#dependencies>`_ the general recommendation is to use the latest version.
+The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
+Optional libraries below the lowest tested version may still work, but are not considered supported.
+
++-----------------+-----------------+
+| Package | Minimum Version |
++=================+=================+
+| fastparquet | 0.2.1 |
++-----------------+-----------------+
+| matplotlib | 2.2.2 |
++-----------------+-----------------+
+| openpyxl | 2.4.0 |
++-----------------+-----------------+
+| pyarrow | 0.9.0 |
++-----------------+-----------------+
+| pytables | 3.4.2 |
++-----------------+-----------------+
+| scipy | 0.19.0 |
++-----------------+-----------------+
+| sqlalchemy | 1.1.4 |
++-----------------+-----------------+
+| xarray | 0.8.2 |
++-----------------+-----------------+
+| xlrd | 1.0.0 |
++-----------------+-----------------+
+| xlsxwriter | 0.7.7 |
++-----------------+-----------------+
+| xlwt | 1.0.0 |
++-----------------+-----------------+
+
.. _whatsnew_0250.api.other:
Other API Changes
diff --git a/environment.yml b/environment.yml
index 1d7c8b96216e3..a1042be48156c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -31,14 +31,14 @@ dependencies:
- blosc
- botocore>=1.11
- boto3
- - bottleneck>=1.2.0
+ - bottleneck>=1.2.1
- fastparquet>=0.2.1
- html5lib
- ipython>=5.6.0
- ipykernel
- jinja2
- lxml
- - matplotlib>=2.0.0
+ - matplotlib>=2.2.2
- nbsphinx
- numexpr>=2.6.8
- openpyxl
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index c4d47a3c2384a..ae15fb36169be 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -129,18 +129,6 @@ class RoundTo(object):
return 4
-cdef inline _npdivmod(x1, x2):
- """implement divmod for numpy < 1.13"""
- return np.floor_divide(x1, x2), np.remainder(x1, x2)
-
-
-try:
- from numpy import divmod as npdivmod
-except ImportError:
- # numpy < 1.13
- npdivmod = _npdivmod
-
-
cdef inline _floor_int64(values, unit):
return values - np.remainder(values, unit)
@@ -183,7 +171,7 @@ def round_nsint64(values, mode, freq):
# for odd unit there is no need of a tie break
if unit % 2:
return _rounddown_int64(values, unit)
- quotient, remainder = npdivmod(values, unit)
+ quotient, remainder = np.divmod(values, unit)
mask = np.logical_or(
remainder > (unit // 2),
np.logical_and(remainder == (unit // 2), quotient % 2)
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 6e9f768d8bd68..aed8fd2710e6f 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -9,18 +9,17 @@
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
-_np_version_under1p13 = _nlv < LooseVersion('1.13')
_np_version_under1p14 = _nlv < LooseVersion('1.14')
_np_version_under1p15 = _nlv < LooseVersion('1.15')
_np_version_under1p16 = _nlv < LooseVersion('1.16')
_np_version_under1p17 = _nlv < LooseVersion('1.17')
-if _nlv < '1.12':
+if _nlv < '1.13.3':
raise ImportError('this version of pandas is incompatible with '
- 'numpy < 1.12.0\n'
+ 'numpy < 1.13.3\n'
'your numpy version is {0}.\n'
- 'Please upgrade numpy to >= 1.12.0 to use '
+ 'Please upgrade numpy to >= 1.13.3 to use '
'this pandas version'.format(_np_version))
@@ -64,7 +63,6 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
__all__ = ['np',
- '_np_version_under1p13',
'_np_version_under1p14',
'_np_version_under1p15',
'_np_version_under1p16',
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 75b64a06fe8e8..26f42cd13ffe1 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -101,13 +101,6 @@ def f(self, other):
ret[na_mask] = False
return ret
- # Numpy < 1.13 may convert a scalar to a zerodim array during
- # comparison operation when second arg has higher priority, e.g.
- #
- # cat[0] < cat
- #
- # With cat[0], for example, being ``np.int64(1)`` by the time it gets
- # into this function would become ``np.array(1)``.
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 33e6674389e7c..8b14471521a69 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -197,9 +197,6 @@ def wrapper(self, other):
result = com.values_from_object(result)
- # Make sure to pass an array to result[...]; indexing with
- # Series breaks with older version of numpy
- o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 8e2ab586cacb6..0deefd2b10b6e 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -1,6 +1,7 @@
import numbers
import numpy as np
+from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas.compat.numpy import function as nv
@@ -82,20 +83,6 @@ def itemsize(self):
return self._dtype.itemsize
-# TODO(NumPy1.13): remove this
-# Compat for NumPy 1.12, which doesn't provide NDArrayOperatorsMixin
-# or __array_ufunc__, so those operations won't be available to people
-# on older NumPys.
-#
-# We would normally write this as bases=(...), then "class Foo(*bases):
-# but Python2 doesn't allow unpacking tuples in the class statement.
-# So, we fall back to "object", to avoid writing a metaclass.
-try:
- from numpy.lib.mixins import NDArrayOperatorsMixin
-except ImportError:
- NDArrayOperatorsMixin = object
-
-
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
"""
A pandas ExtensionArray for NumPy data.
@@ -111,10 +98,6 @@ class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
-
- Notes
- -----
- Operations like ``+`` and applying ufuncs requires NumPy>=1.13.
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 96c89981ff5e9..c414362041627 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -1846,9 +1846,7 @@ def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False):
if isna(fill_value):
mask = notna(arr)
else:
- # For str arrays in NumPy 1.12.0, operator!= below isn't
- # element-wise but just returns False if fill_value is not str,
- # so cast to object comparison to be safe
+ # cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.astype(object)
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index da89bde56fe18..c9b68661fd596 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -2,7 +2,7 @@
import warnings
_NUMEXPR_INSTALLED = False
-_MIN_NUMEXPR_VERSION = "2.6.1"
+_MIN_NUMEXPR_VERSION = "2.6.2"
_NUMEXPR_VERSION = None
try:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 0604689c6bb2b..3a676fc94dff8 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -17,7 +17,6 @@
from pandas._libs import Timestamp, lib
import pandas.compat as compat
from pandas.compat import lzip
-from pandas.compat.numpy import _np_version_under1p13
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
@@ -1218,7 +1217,7 @@ def count(self):
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
- minlength = ngroups or (None if _np_version_under1p13 else 0)
+ minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 6a6ab78ae3554..f029cc75c350a 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -1,7 +1,6 @@
"""
Routines for filling missing data.
"""
-from distutils.version import LooseVersion
import operator
import numpy as np
@@ -347,17 +346,8 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
y : scalar or array_like
The result, of length R or length M or M by R.
"""
- import scipy
from scipy import interpolate
- if LooseVersion(scipy.__version__) < LooseVersion('0.18.0'):
- try:
- method = interpolate.piecewise_polynomial_interpolate
- return method(xi, yi.reshape(-1, 1), x,
- orders=order, der=der)
- except AttributeError:
- pass
-
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 5e762325da192..cfc42d26c5471 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -23,7 +23,7 @@
import pandas.core.common as com
_BOTTLENECK_INSTALLED = False
-_MIN_BOTTLENECK_VERSION = '1.0.0'
+_MIN_BOTTLENECK_VERSION = '1.2.1'
try:
import bottleneck as bn
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 266535fb6fcbd..09bed9a534f19 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -45,24 +45,11 @@ def _is_sqlalchemy_connectable(con):
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
-
- from distutils.version import LooseVersion
- ver = sqlalchemy.__version__
- # For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
- # for a sqlite engine, which results in a warning when trying to
- # read/write a DataFrame with int64 values. (GH7433)
- if LooseVersion(ver) < LooseVersion('0.8.2'):
- from sqlalchemy import BigInteger
- from sqlalchemy.ext.compiler import compiles
-
- @compiles(BigInteger, 'sqlite')
- def compile_big_int_sqlite(type_, compiler, **kw):
- return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
- import sqlalchemy
+ import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py
index 5c5c4800eef62..67f3d983480f8 100644
--- a/pandas/plotting/_compat.py
+++ b/pandas/plotting/_compat.py
@@ -16,8 +16,5 @@ def inner():
return inner
-_mpl_ge_2_0_1 = _mpl_version('2.0.1', operator.ge)
-_mpl_ge_2_1_0 = _mpl_version('2.1.0', operator.ge)
-_mpl_ge_2_2_0 = _mpl_version('2.2.0', operator.ge)
-_mpl_ge_2_2_2 = _mpl_version('2.2.2', operator.ge)
+_mpl_ge_2_2_3 = _mpl_version('2.2.3', operator.ge)
_mpl_ge_3_0_0 = _mpl_version('3.0.0', operator.ge)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 59d3bb355c1d7..35948ce7c19c9 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1,7 +1,6 @@
# being a bit too dynamic
# pylint: disable=E1101
from collections import namedtuple
-from distutils.version import LooseVersion
import re
import warnings
@@ -1474,18 +1473,9 @@ def _get_ind(self, y):
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
- from scipy import __version__ as spv
y = remove_na_arraylike(y)
-
- if LooseVersion(spv) >= '0.11.0':
- gkde = gaussian_kde(y, bw_method=bw_method)
- else:
- gkde = gaussian_kde(y)
- if bw_method is not None:
- msg = ('bw_method was added in Scipy 0.11.0.' +
- ' Scipy version in use is {spv}.'.format(spv=spv))
- warnings.warn(msg)
+ gkde = gaussian_kde(y, bw_method=bw_method)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index d556101ac2ecb..0df6631a1e9d7 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -963,8 +963,7 @@ def test_binops(self):
self.check_binop(ops, scalars, idxs)
def test_binops_pow(self):
- # later versions of numpy don't allow powers of negative integers
- # so test separately
+ # numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
ops = [pow]
scalars = [1, 2]
diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py
index 9cf26dce15d0a..5e4f6e376c1d3 100644
--- a/pandas/tests/arrays/test_numpy.py
+++ b/pandas/tests/arrays/test_numpy.py
@@ -5,8 +5,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
@@ -178,7 +176,7 @@ def test_validate_reduction_keyword_args():
# ----------------------------------------------------------------------------
# Ops
-@td.skip_if_no("numpy", min_version="1.13.0")
+
def test_ufunc():
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
result = np.abs(arr)
@@ -193,7 +191,6 @@ def test_ufunc():
tm.assert_extension_array_equal(r2, e2)
-@td.skip_if_no("numpy", min_version="1.13.0")
def test_basic_binop():
# Just a basic smoke test. The EA interface tests exercise this
# more thoroughly.
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 2a940daa3a4eb..d097f5e37e6b0 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import datetime
-from distutils.version import LooseVersion
import dateutil
import numpy as np
@@ -16,13 +15,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
-try:
- import scipy
- _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
- LooseVersion('0.19.0'))
-except ImportError:
- _is_scipy_ge_0190 = False
-
def _skip_if_no_pchip():
try:
@@ -716,14 +708,8 @@ def test_interp_various(self):
result = df.interpolate(method='cubic')
# GH #15662.
- # new cubic and quadratic interpolation algorithms from scipy 0.19.0.
- # previously `splmake` was used. See scipy/scipy#6710
- if _is_scipy_ge_0190:
- expected.A.loc[3] = 2.81547781
- expected.A.loc[13] = 5.52964175
- else:
- expected.A.loc[3] = 2.81621174
- expected.A.loc[13] = 5.64146581
+ expected.A.loc[3] = 2.81547781
+ expected.A.loc[13] = 5.52964175
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
@@ -732,12 +718,8 @@ def test_interp_various(self):
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
- if _is_scipy_ge_0190:
- expected.A.loc[3] = 2.82150771
- expected.A.loc[13] = 6.12648668
- else:
- expected.A.loc[3] = 2.82533638
- expected.A.loc[13] = 6.02817974
+ expected.A.loc[3] = 2.82150771
+ expected.A.loc[13] = 6.12648668
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
@@ -769,14 +751,10 @@ def test_interp_alt_scipy(self):
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
- import scipy
+
result = df.interpolate(method='pchip')
expected.loc[2, 'A'] = 3
-
- if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
- expected.loc[5, 'A'] = 6.0
- else:
- expected.loc[5, 'A'] = 6.125
+ expected.loc[5, 'A'] = 6.0
assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 6bb9dea15d1ce..3af625323118c 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
-from distutils.version import LooseVersion
import numpy as np
import pytest
@@ -209,7 +208,6 @@ def test_rank_axis(self):
def test_rank_methods_frame(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
- import scipy
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
@@ -225,11 +223,8 @@ def test_rank_methods_frame(self):
rankdata, ax, vals,
m if m != 'first' else 'ordinal')
sprank = sprank.astype(np.float64)
- expected = DataFrame(sprank, columns=cols)
-
- if (LooseVersion(scipy.__version__) >=
- LooseVersion('0.17.0')):
- expected = expected.astype('float64')
+ expected = DataFrame(sprank,
+ columns=cols).astype('float64')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 9067a724289fe..1eef226749383 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -6,7 +6,6 @@
import numpy as np
from numpy import random
-import pytest
from pandas.compat import iteritems
from pandas.util._decorators import cache_readonly
@@ -28,23 +27,6 @@
"""
-def _skip_if_no_scipy_gaussian_kde():
- try:
- from scipy.stats import gaussian_kde # noqa
- except ImportError:
- pytest.skip("scipy version doesn't support gaussian_kde")
-
-
-def _ok_for_gaussian_kde(kind):
- if kind in ['kde', 'density']:
- try:
- from scipy.stats import gaussian_kde # noqa
- except ImportError:
- return False
-
- return True
-
-
@td.skip_if_no_mpl
class TestPlotBase(object):
@@ -53,10 +35,7 @@ def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
- self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
- self.mpl_ge_2_1_0 = plotting._compat._mpl_ge_2_1_0()
- self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0()
- self.mpl_ge_2_2_2 = plotting._compat._mpl_ge_2_2_2()
+ self.mpl_ge_2_2_3 = plotting._compat._mpl_ge_2_2_3()
self.mpl_ge_3_0_0 = plotting._compat._mpl_ge_3_0_0()
self.bp_n_objects = 7
@@ -470,8 +449,6 @@ def is_grid_on():
spndx = 1
for kind in kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 8d8330cf5b9a2..c451228b5b319 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -14,8 +14,7 @@
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.resample import DatetimeIndex
-from pandas.tests.plotting.common import (
- TestPlotBase, _skip_if_no_scipy_gaussian_kde)
+from pandas.tests.plotting.common import TestPlotBase
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal, ensure_clean
@@ -406,11 +405,9 @@ def test_get_finder(self):
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)
- else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
+ else: # 2.2.3, 2.2.4
xpl1 = [7565, 7564, 7553, 7546, 7518, 7428, 7066]
xpl2 = [7566, 7564, 7554, 7546, 7519, 7429, 7066]
@@ -436,11 +433,9 @@ def test_finder_daily(self):
def test_finder_quarterly(self):
yrs = [3.5, 11]
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)
- else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
+ else: # 2.2.3, 2.2.4
xpl1 = [68, 68]
xpl2 = [72, 68]
@@ -466,11 +461,9 @@ def test_finder_quarterly(self):
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)
- else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
+ else: # 2.2.3, 2.2.4
xpl1 = [216, 216, 204, 204]
xpl2 = [216, 216, 216, 204]
@@ -504,11 +497,9 @@ def test_finder_monthly_long(self):
@pytest.mark.slow
def test_finder_annual(self):
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
- else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
+ else: # 2.2.3, 2.2.4
xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970]
xp = [Period(x, freq='A').ordinal for x in xp]
@@ -545,10 +536,7 @@ def test_finder_hourly(self):
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
- if self.mpl_ge_2_0_1:
- xp = Period('1/1/1999', freq='H').ordinal
- else: # 2.0.0
- xp = Period('1998-12-31 22:00', freq='H').ordinal
+ xp = Period('1/1/1999', freq='H').ordinal
assert rs == xp
@@ -563,9 +551,7 @@ def test_gaps(self):
line = lines[0]
data = line.get_xydata()
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
@@ -584,9 +570,7 @@ def test_gaps(self):
line = lines[0]
data = line.get_xydata()
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
@@ -604,9 +588,7 @@ def test_gaps(self):
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
@@ -629,9 +611,7 @@ def test_gap_upsample(self):
line = lines[0]
data = line.get_xydata()
- if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
- or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
- # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
@@ -698,7 +678,6 @@ def test_secondary_y_ts(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_secondary_kde(self):
- _skip_if_no_scipy_gaussian_kde()
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
@@ -1422,13 +1401,8 @@ def test_plot_outofbounds_datetime(self):
def test_format_timedelta_ticks_narrow(self):
- if self.mpl_ge_2_0_1:
- expected_labels = (['00:00:00.0000000{:0>2d}'.format(i)
- for i in range(10)])
- else: # 2.0.0
- expected_labels = [''] + [
- '00:00:00.00000000{:d}'.format(2 * i)
- for i in range(5)] + ['']
+ expected_labels = (['00:00:00.0000000{:0>2d}'.format(i)
+ for i in range(10)])
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
@@ -1443,7 +1417,6 @@ def test_format_timedelta_ticks_narrow(self):
def test_format_timedelta_ticks_wide(self):
expected_labels = [
- '',
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
@@ -1453,13 +1426,7 @@ def test_format_timedelta_ticks_wide(self):
'6 days 22:40:00',
'8 days 02:26:40',
'9 days 06:13:20',
- ''
]
- if self.mpl_ge_2_2_0:
- expected_labels = expected_labels[1:-1]
- elif self.mpl_ge_2_0_1:
- expected_labels = expected_labels[1:-1]
- expected_labels[-1] = ''
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 4c22c3245b788..292c6ea910788 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -19,9 +19,7 @@
from pandas import (
DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range)
from pandas.core.arrays import integer_array
-from pandas.tests.plotting.common import (
- TestPlotBase, _check_plot_works, _ok_for_gaussian_kde,
- _skip_if_no_scipy_gaussian_kde)
+from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.util.testing as tm
from pandas.io.formats.printing import pprint_thing
@@ -1514,8 +1512,6 @@ def test_boxplot_subplots_return_type(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
- _skip_if_no_scipy_gaussian_kde()
-
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
@@ -1536,8 +1532,6 @@ def test_kde_df(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
- _skip_if_no_scipy_gaussian_kde()
-
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@@ -1563,11 +1557,7 @@ def test_hist_df(self):
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
- if plotting._compat._mpl_ge_2_2_0():
- kwargs = {"density": True}
- else:
- kwargs = {"normed": True}
- ax = series.plot.hist(cumulative=True, bins=4, **kwargs)
+ ax = series.plot.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
@@ -1707,8 +1697,6 @@ def test_df_legend_labels(self):
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
@@ -1797,8 +1785,6 @@ def test_no_legend(self):
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@@ -2044,8 +2030,6 @@ def test_hist_colors(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
- _skip_if_no_scipy_gaussian_kde()
-
from matplotlib import cm
custom_colors = 'rgcby'
@@ -2067,8 +2051,6 @@ def test_kde_colors(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
- _skip_if_no_scipy_gaussian_kde()
-
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
@@ -2212,11 +2194,11 @@ def test_unordered_ts(self):
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
+ @td.skip_if_no_scipy
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
+
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
@@ -2226,8 +2208,6 @@ def test_kind_both_ways(self):
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
@@ -2239,8 +2219,6 @@ def test_partially_invalid_plot_data(self):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
@@ -2569,8 +2547,6 @@ def test_errorbar_asymmetrical(self):
tm.close()
- # This XPASSES when tested with mpl == 3.0.1
- @td.xfail_if_mpl_2_2
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
@@ -2727,6 +2703,7 @@ def _check(axes):
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+ @td.skip_if_no_scipy
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
@@ -2734,8 +2711,7 @@ def test_memory_leak(self):
results = {}
for kind in plotting._core._plot_klass.keys():
- if not _ok_for_gaussian_kde(kind):
- continue
+
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 4f0bef52b5e15..c62ed21c2fb17 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -12,7 +12,6 @@
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.util.testing as tm
-from pandas.plotting._compat import _mpl_ge_2_2_0
from pandas.plotting._core import grouped_hist
@@ -193,12 +192,8 @@ def test_hist_df_legacy(self):
ylabelsize=yf, yrot=yrot)
tm.close()
- # make sure kwargs to hist are handled
- if _mpl_ge_2_2_0():
- kwargs = {"density": True}
- else:
- kwargs = {"normed": True}
- ax = ser.hist(cumulative=True, bins=4, **kwargs)
+
+ ax = ser.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
@@ -248,12 +243,11 @@ def test_hist_layout(self):
@pytest.mark.slow
# GH 9351
def test_tight_layout(self):
- if self.mpl_ge_2_0_1:
- df = DataFrame(randn(100, 3))
- _check_plot_works(df.hist)
- self.plt.tight_layout()
+ df = DataFrame(randn(100, 3))
+ _check_plot_works(df.hist)
+ self.plt.tight_layout()
- tm.close()
+ tm.close()
@td.skip_if_no_mpl
@@ -285,14 +279,9 @@ def test_grouped_hist_legacy(self):
xf, yf = 20, 18
xrot, yrot = 30, 40
- if _mpl_ge_2_2_0():
- kwargs = {"density": True}
- else:
- kwargs = {"normed": True}
-
axes = grouped_hist(df.A, by=df.C, cumulative=True,
bins=4, xlabelsize=xf, xrot=xrot,
- ylabelsize=yf, yrot=yrot, **kwargs)
+ ylabelsize=yf, yrot=yrot, density=True)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 98248586f3d27..a184c024f4459 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -61,8 +61,6 @@ def test_bootstrap_plot(self):
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
- # This XPASSES when tested with mpl == 3.0.1
- @td.xfail_if_mpl_2_2
@td.skip_if_no_scipy
def test_scatter_matrix_axis(self):
scatter_matrix = plotting.scatter_matrix
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index e384c578aa446..a2250a8942e22 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -15,9 +15,7 @@
import pandas as pd
from pandas import DataFrame, Series, date_range
-from pandas.tests.plotting.common import (
- TestPlotBase, _check_plot_works, _ok_for_gaussian_kde,
- _skip_if_no_scipy_gaussian_kde)
+from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.util.testing as tm
import pandas.plotting as plotting
@@ -61,8 +59,6 @@ def test_plot(self):
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
- if not _ok_for_gaussian_kde(kind):
- continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
@@ -602,7 +598,6 @@ def test_hist_kde(self):
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
- _skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
_, ax = self.plt.subplots()
@@ -616,8 +611,6 @@ def test_hist_kde(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_kwargs(self):
- _skip_if_no_scipy_gaussian_kde()
-
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(self.ts.plot.kde, bw_method='scott', ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=20)
@@ -634,8 +627,6 @@ def test_kde_kwargs(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
- _skip_if_no_scipy_gaussian_kde()
-
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
@@ -669,7 +660,6 @@ def test_hist_kde_color(self):
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
- _skip_if_no_scipy_gaussian_kde()
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, color='r', ax=ax)
self._check_ax_scales(ax, yaxis='log')
@@ -694,8 +684,7 @@ def test_kind_both_ways(self):
plotting._core._series_kinds)
_, ax = self.plt.subplots()
for kind in kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
+
s.plot(kind=kind, ax=ax)
getattr(s.plot, kind)()
@@ -704,8 +693,6 @@ def test_invalid_plot_data(self):
s = Series(list('abcd'))
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
@@ -715,16 +702,12 @@ def test_invalid_plot_data(self):
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
- if not _ok_for_gaussian_kde(kind):
- continue
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
@@ -783,12 +766,9 @@ def test_errorbar_plot(self):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
- # MPL > 2.0.0 will most likely use TypeError here
- with pytest.raises((TypeError, ValueError)):
+ with pytest.raises(TypeError):
s.plot(yerr=s_err)
- # This XPASSES when tested with mpl == 3.0.1
- @td.xfail_if_mpl_2_2
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 919f942bfa437..9e44cc32f8f45 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1,7 +1,6 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from distutils.version import LooseVersion
from itertools import product
import operator
@@ -343,7 +342,6 @@ def test_corr(self, datetime_series):
@td.skip_if_no_scipy
def test_corr_rank(self):
- import scipy
import scipy.stats as stats
# kendall and spearman
@@ -358,11 +356,6 @@ def test_corr_rank(self):
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
- # these methods got rewritten in 0.8
- if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
- pytest.skip("skipping corr rank because of scipy version "
- "{0}".format(scipy.__version__))
-
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 403fdb383d81a..7b1df6917e77c 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -2,7 +2,6 @@
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
-from distutils.version import LooseVersion
import numpy as np
from numpy import nan
@@ -21,13 +20,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
-try:
- import scipy
- _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
- LooseVersion('0.19.0'))
-except ImportError:
- _is_scipy_ge_0190 = False
-
def _skip_if_no_pchip():
try:
@@ -1069,12 +1061,7 @@ def test_interp_scipy_basic(self):
assert_series_equal(result, expected)
# quadratic
# GH #15662.
- # new cubic and quadratic interpolation algorithms from scipy 0.19.0.
- # previously `splmake` was used. See scipy/scipy#6710
- if _is_scipy_ge_0190:
- expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
- else:
- expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
+ expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
@@ -1342,7 +1329,7 @@ def test_spline(self):
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
- @td.skip_if_no('scipy', min_version='0.15')
+ @td.skip_if_no_scipy
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index c6a149bc0c296..31885077f53c1 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-from distutils.version import LooseVersion
from itertools import chain
import numpy as np
@@ -321,7 +320,6 @@ def test_rank_desc_mix_nans_infs(self):
def test_rank_methods_series(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
- import scipy
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
@@ -335,10 +333,7 @@ def test_rank_methods_series(self):
for m in ['average', 'min', 'max', 'first', 'dense']:
result = ts.rank(method=m)
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
- expected = Series(sprank, index=index)
-
- if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
- expected = expected.astype('float64')
+ expected = Series(sprank, index=index).astype('float64')
tm.assert_series_equal(result, expected)
def test_rank_dense_method(self):
diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py
index bdb2cd022b451..a80a51a66017e 100644
--- a/pandas/tests/sparse/frame/test_to_from_scipy.py
+++ b/pandas/tests/sparse/frame/test_to_from_scipy.py
@@ -1,5 +1,3 @@
-from distutils.version import LooseVersion
-
import numpy as np
import pytest
@@ -77,9 +75,8 @@ def test_from_to_scipy_object(spmatrix, fill_value):
columns = list('cd')
index = list('ab')
- if (spmatrix is scipy.sparse.dok_matrix and LooseVersion(
- scipy.__version__) >= LooseVersion('0.19.0')):
- pytest.skip("dok_matrix from object does not work in SciPy >= 0.19")
+ if spmatrix is scipy.sparse.dok_matrix:
+ pytest.skip("dok_matrix from object does not work in SciPy")
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index b8226bc2f8269..b64786de264cd 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -228,9 +228,9 @@ def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
- msg = (r"'(<|>)' not supported between instances of 'complex' and"
- r" 'complex'|"
- r"unorderable types: complex\(\) > complex\(\)")
+ msg = ("unorderable types: .* [<>] .*"
+ "|" # the above case happens for numpy < 1.14
+ "'[<>]' not supported between instances of .*")
with pytest.raises(TypeError, match=msg):
algos.factorize(x17[::-1], sort=True)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 62962f20b0786..53d62a492794e 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -6,7 +6,6 @@
from numpy.random import randn
import pytest
-from pandas import _np_version_under1p13
from pandas.core.api import DataFrame
from pandas.core.computation import expressions as expr
import pandas.util.testing as tm
@@ -357,8 +356,8 @@ def test_bool_ops_warn_on_arithmetic(self):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
- # >= 1.13.0 these are now TypeErrors
- if op == '-' and not _np_version_under1p13:
+ if op == '-':
+ # raises TypeError
continue
with tm.use_numexpr(True, min_elements=5):
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 21aee7eeaa0f4..5a163e7819fd1 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -5,7 +5,6 @@
import numpy as np
import pytest
-from pandas.compat.numpy import _np_version_under1p13
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
@@ -345,7 +344,7 @@ def test_nanstd(self, ddof):
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert', ddof=ddof)
- @td.skip_if_no('scipy', min_version='0.17.0')
+ @td.skip_if_no_scipy
@pytest.mark.parametrize('ddof', range(3))
def test_nansem(self, ddof):
from scipy.stats import sem
@@ -414,7 +413,7 @@ def _skew_kurt_wrap(self, values, axis=None, func=None):
return 0.
return result
- @td.skip_if_no('scipy', min_version='0.17.0')
+ @td.skip_if_no_scipy
def test_nanskew(self):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
@@ -423,7 +422,7 @@ def test_nanskew(self):
allow_str=False, allow_date=False,
allow_tdelta=False)
- @td.skip_if_no('scipy', min_version='0.17.0')
+ @td.skip_if_no_scipy
def test_nankurt(self):
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
@@ -1017,6 +1016,8 @@ def test_use_bottleneck():
(np.nanmedian, 2.5),
(np.min, 1),
(np.max, 4),
+ (np.nanmin, 1),
+ (np.nanmax, 4)
])
def test_numpy_ops(numpy_op, expected):
# GH8383
@@ -1024,21 +1025,6 @@ def test_numpy_ops(numpy_op, expected):
assert result == expected
-@pytest.mark.parametrize("numpy_op, expected", [
- (np.nanmin, 1),
- (np.nanmax, 4),
-])
-def test_numpy_ops_np_version_under1p13(numpy_op, expected):
- # GH8383
- result = numpy_op(pd.Series([1, 2, 3, 4]))
- if _np_version_under1p13:
- # bug for numpy < 1.13, where result is a series, should be a scalar
- with pytest.raises(ValueError):
- assert result == expected
- else:
- assert result == expected
-
-
@pytest.mark.parametrize("operation", [
nanops.nanany,
nanops.nanall,
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index c753b5531fde7..04a50cf6facd5 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -413,10 +413,9 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- msg = (r"'(<|>)' not supported between instances of ('"
- r"datetime\.datetime' and 'int'|'int' and 'datetime\.datetime"
- r"')|"
- r"unorderable types: int\(\) > datetime\.datetime\(\)")
+ msg = ("unorderable types: .* [<>] .*"
+ "|" # the above case happens for numpy < 1.14
+ "'[<>]' not supported between instances of .*")
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index c0a972388d886..7266833f8bbde 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -23,7 +23,6 @@ def test_foo():
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
-from distutils.version import LooseVersion
import locale
import pytest
@@ -79,17 +78,6 @@ def _skip_if_no_mpl():
return True
-def _skip_if_mpl_2_2():
- mod = safe_import("matplotlib")
-
- if mod:
- v = mod.__version__
- if LooseVersion(v) > LooseVersion('2.1.2'):
- return True
- else:
- mod.use("Agg", warn=False)
-
-
def _skip_if_has_locale():
lang, _ = locale.getlocale()
if lang is not None:
@@ -149,9 +137,6 @@ def decorated_func(func):
reason="NumPy 1.15 or greater required")
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(),
reason="matplotlib is present")
-xfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(),
- reason="matplotlib 2.2",
- strict=False)
skip_if_32bit = pytest.mark.skipif(is_platform_32bit(),
reason="skipping for 32 bit")
skip_if_windows = pytest.mark.skipif(is_platform_windows(),
diff --git a/requirements-dev.txt b/requirements-dev.txt
index e3034cb99ee80..173441489b388 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -20,14 +20,14 @@ beautifulsoup4>=4.2.1
blosc
botocore>=1.11
boto3
-bottleneck>=1.2.0
+bottleneck>=1.2.1
fastparquet>=0.2.1
html5lib
ipython>=5.6.0
ipykernel
jinja2
lxml
-matplotlib>=2.0.0
+matplotlib>=2.2.2
nbsphinx
numexpr>=2.6.8
openpyxl
diff --git a/setup.py b/setup.py
index e5ef0d7bd3aea..1dca7fa77219f 100755
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ def is_platform_mac():
return sys.platform == 'darwin'
-min_numpy_ver = '1.12.0'
+min_numpy_ver = '1.13.3'
setuptools_kwargs = {
'install_requires': [
'python-dateutil >= 2.5.0',
| #25227 is trying to add `pyproject.toml` which is necessary for the whole `pip`-machinery to work smoothly post v.19.0. However, this seems to be running into problems with a numpy version bump, and mood there was to bump numpy requirement to 1.13.x.
The bare minimum for the bump is implemented in that PR, but a version bump requires getting rid of a bunch of compat code (and CI adjustments), otherwise the cruft just accumulates (it took me three PRs to clean out until 1.12).
Scipy is goint to [bump to 1.13.3](https://github.com/pandas-dev/pandas/pull/25227#issuecomment-464128057) with the next release (due to cython issues, I believe), so I thought I'll directly take it to 1.13.3 here.
I tried redistributing the CI jobs as best as I could. Here's the before/after:
**EDIT: updated pins in https://github.com/pandas-dev/pandas/pull/25554#issuecomment-475881206**
CI job: line number | before | this PR
-------|-----------------------------|-------------------
`ci/deps/azure-27-compat.yaml:10:` | `numpy=1.12.0` | `numpy=1.13.3`
`ci/deps/azure-27-locale.yaml:10:` | `numpy=1.12.0` | `numpy=1.14.*`
`ci/deps/azure-36-locale_slow.yaml:16:` | `numpy` | `numpy=1.15.*`
`ci/deps/azure-37-locale.yaml:15:` | `numpy` | `numpy`
`ci/deps/azure-37-numpydev.yaml:18:` | `numpy` | `numpy`
`ci/deps/azure-macos-35.yaml:14:` | `numpy=1.12.0` | `numpy=1.13.3`
`ci/deps/azure-windows-27.yaml:15:` | `numpy=1.12*` | `numpy=1.13.3`
`ci/deps/azure-windows-36.yaml:12:` | `numpy=1.14*` | `numpy=1.15.*`
`ci/deps/travis-27.yaml:20:` | `numpy=1.13*` | `numpy=1.14.*`
`ci/deps/travis-36-doc.yaml:23:` | `numpy=1.13*` | `numpy`
`ci/deps/travis-36-locale.yaml:15:` | `numpy` | `numpy`
`ci/deps/travis-36-slow.yaml:12:` | `numpy` | `numpy`
`ci/deps/travis-36.yaml:17:` | `numpy` | `numpy=1.15.*`
`ci/deps/travis-37.yaml:10:` | `numpy` | `numpy`
Matplotlib needs to be bumped as well (since current min 2.0.0 cannot be resolved by conda together with numpy 1.13.3), but it's only a tiny bump to ~2.0.2~ ~2.1.0~ ~2.1.1~ 2.2.2. In any case, the version spread should probably start to reflect that there's a bunch of mpl 3.0.x about, and so I also changed the spread a bit here:
CI job: line number | before | this PR
-------------|-------|-----------
`ci/deps/azure-27-locale.yaml:9:` | `matplotlib=2.0.0` | `matplotlib=2.2.2`
`ci/deps/azure-36-locale_slow.yaml:13:` | `matplotlib` | `matplotlib=3.0.*`
`ci/deps/azure-37-locale.yaml:12:` | `matplotlib` | `matplotlib`
`ci/deps/azure-macos-35.yaml:11:` | `matplotlib=2.2.0` | `matplotlib=2.2.3`
`ci/deps/azure-windows-27.yaml:13:` | `matplotlib=2.0.1` | `matplotlib=2.2.4`
`ci/deps/azure-windows-36.yaml:10:` | `matplotlib` | `matplotlib`
`ci/deps/travis-27.yaml:16:` | `matplotlib=2.2.2` | `matplotlib=2.2.*`
`ci/deps/travis-36-doc.yaml:17:` | `matplotlib` | `matplotlib`
`ci/deps/travis-36-locale.yaml:12:` | `matplotlib` | `matplotlib=3.0.1`
`ci/deps/travis-36-slow.yaml:10:` | `matplotlib` | `matplotlib`
`ci/deps/travis-36.yaml:14:` | `matplotlib` | `matplotlib=3.0.0`
It's probably wishful thinking that everything will pass straight away (especially when mpl versions change, see the code in `pandas/tests/plotting/test_datetimelike.py`), but oh well...
| https://api.github.com/repos/pandas-dev/pandas/pulls/25554 | 2019-03-05T18:37:00Z | 2019-03-28T12:48:15Z | 2019-03-28T12:48:15Z | 2019-03-28T17:50:31Z |
BUG: in error message raised when invalid axis parameter | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 5dd6ce168a0de..ea08a0a6fe07b 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -196,7 +196,7 @@ Missing
^^^^^^^
- Fixed misleading exception message in :meth:`Series.missing` if argument ``order`` is required, but omitted (:issue:`10633`, :issue:`24014`).
--
+- Fixed class type displayed in exception message in :meth:`DataFrame.dropna` if invalid ``axis`` parameter passed (:issue:`25555`)
-
MultiIndex
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7915d98662c9e..d7f71df99cdb6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -358,7 +358,7 @@ def _get_axis_number(cls, axis):
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
- .format(axis, type(cls)))
+ .format(axis, cls))
@classmethod
def _get_axis_name(cls, axis):
@@ -372,7 +372,7 @@ def _get_axis_name(cls, axis):
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
- .format(axis, type(cls)))
+ .format(axis, cls))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 3363a45149fff..2969e8be2db03 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1385,7 +1385,8 @@ def test_idxmin(self, float_frame, int_frame):
skipna=skipna)
tm.assert_series_equal(result, expected)
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
@@ -1402,7 +1403,8 @@ def test_idxmax(self, float_frame, int_frame):
skipna=skipna)
tm.assert_series_equal(result, expected)
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 118341276d799..badfa0ca8fd15 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -366,7 +366,8 @@ def test_swapaxes(self):
self._assert_frame_equal(df.T, df.swapaxes(0, 1))
self._assert_frame_equal(df.T, df.swapaxes(1, 0))
self._assert_frame_equal(df, df.swapaxes(0, 0))
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>")
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index fb00776b33cbb..cf8c55f00b061 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -1067,7 +1067,8 @@ def test_reindex_axis(self):
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
self.intframe.reindex_axis(rows, axis=2)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 2f3b0a9f76de9..189531c7b4459 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -140,7 +140,8 @@ def test_dropna(self):
assert_frame_equal(dropped, expected)
# bad input
- msg = "No axis named 3 for object type <class 'type'>"
+ msg = ("No axis named 3 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
df.dropna(axis=3)
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 19b6636978643..facbfdd0c032b 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -95,10 +95,12 @@ def test_quantile_axis_parameter(self):
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
- msg = "No axis named -1 for object type <class 'type'>"
+ msg = ("No axis named -1 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis=-1)
- msg = "No axis named column for object type <class 'type'>"
+ msg = ("No axis named column for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis="column")
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 8b29394bcab84..baf50982d8ab0 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -55,7 +55,8 @@ def test_sort_values(self):
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=['A', 'B'], axis=2, inplace=True)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 716a9e30e4cc3..9965be9091451 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -862,7 +862,8 @@ def test_frame_to_period(self):
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
- msg = "No axis named 2 for object type <class 'type'>"
+ msg = ("No axis named 2 for object type"
+ " <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 1f265d574da15..d7d9c526503cb 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -771,6 +771,7 @@ def test_isin_empty(self, empty):
result = s.isin(empty)
tm.assert_series_equal(expected, result)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_ptp(self):
# GH21614
N = 1000
@@ -796,7 +797,8 @@ def test_ptp(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
- msg = r"No axis named 1 for object type <(class|type) 'type'>"
+ msg = ("No axis named 1 for object type"
+ " <class 'pandas.core.series.Series'>")
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index f07dd1dfb5fda..ef9e575e60385 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -10,7 +10,7 @@
import pytz
from pandas._libs.tslib import iNaT
-from pandas.compat import range
+from pandas.compat import PY2, range
from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
@@ -654,6 +654,7 @@ def test_timedelta64_nan(self):
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
@@ -661,7 +662,8 @@ def test_dropna_empty(self):
assert len(s) == 0
# invalid axis
- msg = r"No axis named 1 for object type <(class|type) 'type'>"
+ msg = ("No axis named 1 for object type"
+ " <class 'pandas.core.series.Series'>")
with pytest.raises(ValueError, match=msg):
s.dropna(axis=1)
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index dfcda889269ee..373083c077e28 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -9,7 +9,7 @@
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
import pandas.compat as compat
-from pandas.compat import product
+from pandas.compat import PY2, product
import pandas.util._test_decorators as td
from pandas import NaT, Series, Timestamp, date_range
@@ -203,10 +203,12 @@ def test_rank_categorical(self):
assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)
assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method='average')
- msg = r"No axis named average for object type <(class|type) 'type'>"
+ msg = ("No axis named average for object type"
+ " <class 'pandas.core.series.Series'>")
with pytest.raises(ValueError, match=msg):
s.rank('average')
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 216f84c8f077a..162fa4ac9ab52 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas.compat import PY2
+
from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
@@ -88,6 +90,7 @@ def test_sort_values(self):
with pytest.raises(ValueError, match=msg):
s.sort_values(inplace=True)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_sort_index(self):
rindex = list(self.ts.index)
random.shuffle(rindex)
@@ -109,7 +112,8 @@ def test_sort_index(self):
sorted_series = random_order.sort_index(axis=0)
assert_series_equal(sorted_series, self.ts)
- msg = r"No axis named 1 for object type <(class|type) 'type'>"
+ msg = ("No axis named 1 for object type"
+ " <class 'pandas.core.series.Series'>")
with pytest.raises(ValueError, match=msg):
random_order.sort_values(axis=1)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index d082b023e1f27..b6896685dd474 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -8,7 +8,7 @@
from pandas._libs.tslib import iNaT
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-from pandas.compat import StringIO, lrange, product
+from pandas.compat import PY2, StringIO, lrange, product
from pandas.errors import NullFrequencyError
import pandas.util._test_decorators as td
@@ -867,6 +867,7 @@ def test_between_time_formats(self):
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_between_time_axis(self):
# issue 8839
rng = date_range('1/1/2000', periods=100, freq='10min')
@@ -876,7 +877,8 @@ def test_between_time_axis(self):
assert len(ts.between_time(stime, etime)) == expected_length
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
- msg = r"No axis named 1 for object type <(class|type) 'type'>"
+ msg = ("No axis named 1 for object type"
+ " <class 'pandas.core.series.Series'>")
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, axis=1)
| - [x] closes #25555
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I don't think there is currently an issue for this. will open new and then add whatsnew. | https://api.github.com/repos/pandas-dev/pandas/pulls/25553 | 2019-03-05T18:30:15Z | 2019-03-06T22:35:56Z | 2019-03-06T22:35:55Z | 2019-03-07T09:50:08Z |
Document the behavior of `axis=None` with `style.background_gradient` | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index c8b5dc6b9b7c0..b872f86eb8683 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -576,10 +576,10 @@ def apply(self, func, axis=0, subset=None, **kwargs):
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
- axis : int, str or None
- apply to each column (``axis=0`` or ``'index'``)
- or to each row (``axis=1`` or ``'columns'``) or
- to the entire DataFrame at once with ``axis=None``
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``), or to the entire DataFrame at once
+ with ``axis=None``.
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
@@ -894,10 +894,12 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
matplotlib colormap
low, high : float
compress the range by these values.
- axis : int or str
- 1 or 'columns' for columnwise, 0 or 'index' for rowwise
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``), or to the entire DataFrame at once
+ with ``axis=None``.
subset : IndexSlice
- a valid slice for ``data`` to limit the style application to
+ a valid slice for ``data`` to limit the style application to.
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
@@ -1081,10 +1083,10 @@ def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
- axis : int, str or None, default 0
- Apply to each column (`axis=0` or `'index'`)
- or to each row (`axis=1` or `'columns'`) or
- to the entire DataFrame at once with `axis=None`.
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``), or to the entire DataFrame at once
+ with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
@@ -1149,11 +1151,12 @@ def highlight_max(self, subset=None, color='yellow', axis=0):
Parameters
----------
subset : IndexSlice, default None
- a valid slice for ``data`` to limit the style application to
+ a valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
- axis : int, str, or None; default 0
- 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
- or ``None`` for tablewise
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``), or to the entire DataFrame at once
+ with ``axis=None``.
Returns
-------
@@ -1169,11 +1172,12 @@ def highlight_min(self, subset=None, color='yellow', axis=0):
Parameters
----------
subset : IndexSlice, default None
- a valid slice for ``data`` to limit the style application to
+ a valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
- axis : int, str, or None; default 0
- 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
- or ``None`` for tablewise
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``), or to the entire DataFrame at once
+ with ``axis=None``.
Returns
-------
| Add docstring entry for using `axis=None`with `style.background_gradient`, which was added in #21259. Also reword to be in numerical order and consistent with how `highlight_max` and `highligh_min` defines column-wise and row-wise.
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25551 | 2019-03-05T16:53:45Z | 2019-03-07T02:21:08Z | 2019-03-07T02:21:08Z | 2019-03-07T02:21:14Z |
TST/CLN: Remove more Panel tests | diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 91ea38920c702..1b74eeea1a8c3 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -4,15 +4,13 @@
from warnings import catch_warnings, filterwarnings
import numpy as np
-import pytest
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
from pandas import (
- DataFrame, Float64Index, MultiIndex, Panel, Series, UInt64Index,
- date_range)
+ DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range)
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
@@ -31,11 +29,10 @@ def _axify(obj, key, axis):
return tuple(axes)
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class Base(object):
""" indexing comprehensive base class """
- _objs = {'series', 'frame', 'panel'}
+ _objs = {'series', 'frame'}
_typs = {'ints', 'uints', 'labels', 'mixed', 'ts', 'floats', 'empty',
'ts_rev', 'multi'}
@@ -45,31 +42,18 @@ def setup_method(self, method):
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
- with catch_warnings(record=True):
- self.panel_ints = Panel(np.random.rand(4, 4, 4),
- items=lrange(0, 8, 2),
- major_axis=lrange(0, 12, 3),
- minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
- self.panel_uints = Panel(np.random.rand(4, 4, 4),
- items=UInt64Index(lrange(0, 8, 2)),
- major_axis=UInt64Index(lrange(0, 12, 3)),
- minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_floats = Series(np.random.rand(4),
index=Float64Index(range(0, 8, 2)))
self.frame_floats = DataFrame(np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)))
- self.panel_floats = Panel(np.random.rand(4, 4, 4),
- items=Float64Index(range(0, 8, 2)),
- major_axis=Float64Index(range(0, 12, 3)),
- minor_axis=Float64Index(range(0, 16, 4)))
m_idces = [MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
@@ -80,31 +64,19 @@ def setup_method(self, method):
self.frame_multi = DataFrame(np.random.randn(4, 4),
index=m_idces[0],
columns=m_idces[1])
- self.panel_multi = Panel(np.random.rand(4, 4, 4),
- items=m_idces[0],
- major_axis=m_idces[1],
- minor_axis=m_idces[2])
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
- self.panel_labels = Panel(np.random.randn(4, 4, 4),
- items=list('abcd'),
- major_axis=list('ABCD'),
- minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
- self.panel_mixed = Panel(np.random.randn(4, 4, 4),
- items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
- self.panel_ts = Panel(np.random.randn(4, 4, 4),
- items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
@@ -112,12 +84,9 @@ def setup_method(self, method):
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
- self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
- items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
- self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
@@ -273,7 +242,7 @@ def _print(result, error=None):
else:
axes = list(axes)
else:
- axes = [0, 1, 2]
+ axes = [0, 1]
# check
for o in objs:
@@ -296,10 +265,4 @@ def _call(obj=obj):
k2 = key2
_eq(t, o, a, obj, key1, k2)
- # Panel deprecations
- if isinstance(obj, Panel):
- with catch_warnings():
- filterwarnings("ignore", "\nPanel*", FutureWarning)
- _call()
- else:
- _call()
+ _call()
diff --git a/pandas/tests/indexing/multiindex/test_panel.py b/pandas/tests/indexing/multiindex/test_panel.py
deleted file mode 100644
index 314009146911a..0000000000000
--- a/pandas/tests/indexing/multiindex/test_panel.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, MultiIndex, Panel, Series
-from pandas.util import testing as tm
-
-
-@pytest.mark.filterwarnings('ignore:\\nPanel:FutureWarning')
-class TestMultiIndexPanel(object):
-
- def test_iloc_getitem_panel_multiindex(self):
-
- # GH 7199
- # Panel with multi-index
- multi_index = MultiIndex.from_tuples([('ONE', 'one'),
- ('TWO', 'two'),
- ('THREE', 'three')],
- names=['UPPER', 'lower'])
-
- simple_index = [x[0] for x in multi_index]
- wd1 = Panel(items=['First', 'Second'],
- major_axis=['a', 'b', 'c', 'd'],
- minor_axis=multi_index)
-
- wd2 = Panel(items=['First', 'Second'],
- major_axis=['a', 'b', 'c', 'd'],
- minor_axis=simple_index)
-
- expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
- result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
- tm.assert_frame_equal(result1, expected1)
-
- expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
- result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
- tm.assert_frame_equal(result2, expected2)
-
- expected1 = DataFrame(index=['a'], columns=multi_index,
- dtype='float64')
- result1 = wd1.iloc[0, [0], [0, 1, 2]]
- tm.assert_frame_equal(result1, expected1)
-
- expected2 = DataFrame(index=['a'], columns=simple_index,
- dtype='float64')
- result2 = wd2.iloc[0, [0], [0, 1, 2]]
- tm.assert_frame_equal(result2, expected2)
-
- # GH 7516
- mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
- p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
- items=['a', 'b', 'c'], major_axis=mi,
- minor_axis=['u', 'v', 'w'])
- result = p.iloc[:, 1, 0]
- expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
- tm.assert_series_equal(result, expected)
-
- result = p.loc[:, (1, 'y'), 'u']
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 29f70929624fc..c4f98b892feb7 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -95,8 +95,6 @@ def test_loc_getitem_int(self):
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
- self.check_result('int label', 'loc', 4, 'ix', 4,
- typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
@@ -137,14 +135,10 @@ def test_loc_getitem_label_list(self):
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
- self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
- typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
- self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
- ['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
@@ -167,11 +161,6 @@ def test_loc_getitem_label_list_with_missing(self):
typs=['ints', 'uints', 'floats'],
axes=1, fails=KeyError)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
- typs=['ints', 'uints', 'floats'],
- axes=2, fails=KeyError)
-
# GH 17758 - MultiIndex and missing keys
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.check_result('list lbl', 'loc', [(1, 3), (1, 4), (2, 5)],
@@ -194,8 +183,6 @@ def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
- self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
- typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
@@ -203,8 +190,6 @@ def test_loc_getitem_label_array_like(self):
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
- self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
- 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
@@ -222,8 +207,6 @@ def test_loc_getitem_int_slice(self):
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
- self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
- typs=['ints', 'uints'], axes=2)
def test_loc_to_fail(self):
@@ -318,8 +301,6 @@ def test_loc_getitem_label_slice(self):
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
- self.check_result('lab slice', 'loc', slice('W', 'Z'),
- 'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
@@ -327,9 +308,6 @@ def test_loc_getitem_label_slice(self):
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
- self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
- 'ix', slice('20130102', '20130104'),
- typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
@@ -339,8 +317,6 @@ def test_loc_getitem_label_slice(self):
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
- self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
- typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py
deleted file mode 100644
index 8033d19f330b3..0000000000000
--- a/pandas/tests/indexing/test_panel.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from warnings import catch_warnings
-
-import numpy as np
-import pytest
-
-from pandas import Panel, date_range
-from pandas.util import testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class TestPanel(object):
-
- def test_iloc_getitem_panel(self):
-
- with catch_warnings(record=True):
- # GH 7189
- p = Panel(np.arange(4 * 3 * 2).reshape(4, 3, 2),
- items=['A', 'B', 'C', 'D'],
- major_axis=['a', 'b', 'c'],
- minor_axis=['one', 'two'])
-
- result = p.iloc[1]
- expected = p.loc['B']
- tm.assert_frame_equal(result, expected)
-
- result = p.iloc[1, 1]
- expected = p.loc['B', 'b']
- tm.assert_series_equal(result, expected)
-
- result = p.iloc[1, 1, 1]
- expected = p.loc['B', 'b', 'two']
- assert result == expected
-
- # combined
- result = p.iloc[0, [True, True], [0, 1]]
- expected = p.loc['A', ['a', 'b'], ['one', 'two']]
- tm.assert_frame_equal(result, expected)
-
- # out-of-bounds exception
- with pytest.raises(IndexError):
- p.iloc[tuple([10, 5])]
-
- with pytest.raises(IndexError):
- p.iloc[0, [True, True], [0, 1, 2]]
-
- # trying to use a label
- with pytest.raises(ValueError):
- p.iloc[tuple(['j', 'D'])]
-
- # GH
- p = Panel(
- np.random.rand(4, 3, 2), items=['A', 'B', 'C', 'D'],
- major_axis=['U', 'V', 'W'], minor_axis=['X', 'Y'])
- expected = p['A']
-
- result = p.iloc[0, :, :]
- tm.assert_frame_equal(result, expected)
-
- result = p.iloc[0, [True, True, True], :]
- tm.assert_frame_equal(result, expected)
-
- result = p.iloc[0, [True, True, True], [0, 1]]
- tm.assert_frame_equal(result, expected)
-
- with pytest.raises(IndexError):
- p.iloc[0, [True, True, True], [0, 1, 2]]
-
- with pytest.raises(IndexError):
- p.iloc[0, [True, True, True], [2]]
-
- def test_iloc_panel_issue(self):
-
- with catch_warnings(record=True):
- # see gh-3617
- p = Panel(np.random.randn(4, 4, 4))
-
- assert p.iloc[:3, :3, :3].shape == (3, 3, 3)
- assert p.iloc[1, :3, :3].shape == (3, 3)
- assert p.iloc[:3, 1, :3].shape == (3, 3)
- assert p.iloc[:3, :3, 1].shape == (3, 3)
- assert p.iloc[1, 1, :3].shape == (3, )
- assert p.iloc[1, :3, 1].shape == (3, )
- assert p.iloc[:3, 1, 1].shape == (3, )
-
- @pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
- def test_panel_getitem(self):
-
- with catch_warnings(record=True):
- # with an object-like
- # GH 9140
- class TestObject(object):
-
- def __str__(self):
- return "TestObject"
-
- obj = TestObject()
-
- p = Panel(np.random.randn(1, 5, 4), items=[obj],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
-
- expected = p.iloc[0]
- result = p[obj]
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
deleted file mode 100644
index b418091de8d7f..0000000000000
--- a/pandas/tests/test_panel.py
+++ /dev/null
@@ -1,573 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: disable=W0612,E1101
-from collections import OrderedDict
-from datetime import datetime
-
-import numpy as np
-import pytest
-
-from pandas.compat import lrange
-
-from pandas import DataFrame, MultiIndex, Series, date_range, notna
-import pandas.core.panel as panelm
-from pandas.core.panel import Panel
-import pandas.util.testing as tm
-from pandas.util.testing import (
- assert_almost_equal, assert_frame_equal, assert_series_equal,
- makeCustomDataframe as mkdf, makeMixedDataFrame)
-
-from pandas.tseries.offsets import MonthEnd
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class PanelTests(object):
- panel = None
-
- def not_hashable(self):
- c_empty = Panel()
- c = Panel(Panel([[[1]]]))
- pytest.raises(TypeError, hash, c_empty)
- pytest.raises(TypeError, hash, c)
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class SafeForSparse(object):
-
- # issue 7692
- def test_raise_when_not_implemented(self):
- p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
- items=['ItemA', 'ItemB', 'ItemC'],
- major_axis=date_range('20130101', periods=4),
- minor_axis=list('ABCDE'))
- d = p.sum(axis=1).iloc[0]
- ops = ['add', 'sub', 'mul', 'truediv',
- 'floordiv', 'div', 'mod', 'pow']
- for op in ops:
- with pytest.raises(NotImplementedError):
- getattr(p, op)(d, axis=0)
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class CheckIndexing(object):
-
- def test_delitem_and_pop(self):
-
- values = np.empty((3, 3, 3))
- values[0] = 0
- values[1] = 1
- values[2] = 2
-
- panel = Panel(values, lrange(3), lrange(3), lrange(3))
-
- # did we delete the right row?
-
- panelc = panel.copy()
- del panelc[0]
- tm.assert_frame_equal(panelc[1], panel[1])
- tm.assert_frame_equal(panelc[2], panel[2])
-
- panelc = panel.copy()
- del panelc[1]
- tm.assert_frame_equal(panelc[0], panel[0])
- tm.assert_frame_equal(panelc[2], panel[2])
-
- panelc = panel.copy()
- del panelc[2]
- tm.assert_frame_equal(panelc[1], panel[1])
- tm.assert_frame_equal(panelc[0], panel[0])
-
- def test_setitem(self):
- # bad shape
- p = Panel(np.random.randn(4, 3, 2))
- msg = (r"shape of value must be \(3, 2\), "
- r"shape of given object was \(4, 2\)")
- with pytest.raises(ValueError, match=msg):
- p[0] = np.random.randn(4, 2)
-
- def test_setitem_ndarray(self):
- timeidx = date_range(start=datetime(2009, 1, 1),
- end=datetime(2009, 12, 31),
- freq=MonthEnd())
- lons_coarse = np.linspace(-177.5, 177.5, 72)
- lats_coarse = np.linspace(-87.5, 87.5, 36)
- P = Panel(items=timeidx, major_axis=lons_coarse,
- minor_axis=lats_coarse)
- data = np.random.randn(72 * 36).reshape((72, 36))
- key = datetime(2009, 2, 28)
- P[key] = data
-
- assert_almost_equal(P[key].values, data)
-
- def test_set_minor_major(self):
- # GH 11014
- df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
- df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
- panel = Panel({'Item1': df1, 'Item2': df2})
-
- newminor = notna(panel.iloc[:, :, 0])
- panel.loc[:, :, 'NewMinor'] = newminor
- assert_frame_equal(panel.loc[:, :, 'NewMinor'],
- newminor.astype(object))
-
- newmajor = notna(panel.iloc[:, 0, :])
- panel.loc[:, 'NewMajor', :] = newmajor
- assert_frame_equal(panel.loc[:, 'NewMajor', :],
- newmajor.astype(object))
-
- def test_getitem_fancy_slice(self):
- pass
-
- def test_ix_setitem_slice_dataframe(self):
- a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
- minor_axis=[111, 222, 333])
- b = DataFrame(np.random.randn(2, 3), index=[111, 333],
- columns=[1, 2, 3])
-
- a.loc[:, 22, [111, 333]] = b
-
- assert_frame_equal(a.loc[:, 22, [111, 333]], b)
-
- def test_ix_align(self):
- from pandas import Series
- b = Series(np.random.randn(10), name=0)
- b.sort_values()
- df_orig = Panel(np.random.randn(3, 10, 2))
- df = df_orig.copy()
-
- df.loc[0, :, 0] = b
- assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
-
- df = df_orig.swapaxes(0, 1)
- df.loc[:, 0, 0] = b
- assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
-
- df = df_orig.swapaxes(1, 2)
- df.loc[0, 0, :] = b
- assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
-
- def test_ix_frame_align(self):
- # GH3830, panel assignent by values/frame
- for dtype in ['float64', 'int64']:
-
- panel = Panel(np.arange(40).reshape((2, 4, 5)),
- items=['a1', 'a2'], dtype=dtype)
- df1 = panel.iloc[0]
- df2 = panel.iloc[1]
-
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
-
- # Assignment by Value Passes for 'a2'
- panel.loc['a2'] = df1.values
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df1)
-
- # Assignment by DataFrame Ok w/o loc 'a2'
- panel['a2'] = df2
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
-
- # Assignment by DataFrame Fails for 'a2'
- panel.loc['a2'] = df2
- tm.assert_frame_equal(panel.loc['a1'], df1)
- tm.assert_frame_equal(panel.loc['a2'], df2)
-
- def test_logical_with_nas(self):
- d = Panel({'ItemA': {'a': [np.nan, False]},
- 'ItemB': {'a': [True, True]}})
-
- result = d['ItemA'] | d['ItemB']
- expected = DataFrame({'a': [np.nan, True]})
- assert_frame_equal(result, expected)
-
- # this is autodowncasted here
- result = d['ItemA'].fillna(False) | d['ItemB']
- expected = DataFrame({'a': [True, True]})
- assert_frame_equal(result, expected)
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
-
- def test_constructor_cast(self):
- # can't cast
- data = [[['foo', 'bar', 'baz']]]
- pytest.raises(ValueError, Panel, data, dtype=float)
-
- def test_constructor_empty_panel(self):
- empty = Panel()
- assert len(empty.items) == 0
- assert len(empty.major_axis) == 0
- assert len(empty.minor_axis) == 0
-
- def test_constructor_observe_dtype(self):
- # GH #411
- panel = Panel(items=lrange(3), major_axis=lrange(3),
- minor_axis=lrange(3), dtype='O')
- assert panel.values.dtype == np.object_
-
- def test_constructor_dtypes(self):
- # GH #797
-
- def _check_dtype(panel, dtype):
- for i in panel.items:
- assert panel[i].values.dtype.name == dtype
-
- # only nan holding types allowed here
- for dtype in ['float64', 'float32', 'object']:
- panel = Panel(items=lrange(2), major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
- items=lrange(2),
- major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
- items=lrange(2),
- major_axis=lrange(10),
- minor_axis=lrange(5), dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- panel = Panel(
- np.random.randn(2, 10, 5),
- items=lrange(2), major_axis=lrange(10),
- minor_axis=lrange(5),
- dtype=dtype)
- _check_dtype(panel, dtype)
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
- df1 = DataFrame(np.random.randn(2, 5),
- index=lrange(2), columns=lrange(5))
- df2 = DataFrame(np.random.randn(2, 5),
- index=lrange(2), columns=lrange(5))
- panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
- _check_dtype(panel, dtype)
-
- def test_constructor_fails_with_not_3d_input(self):
- msg = "The number of dimensions required is 3"
- with pytest.raises(ValueError, match=msg):
- Panel(np.random.randn(10, 2))
-
- def test_ctor_orderedDict(self):
- keys = list(set(np.random.randint(0, 5000, 100)))[
- :50] # unique random int keys
- d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
- p = Panel(d)
- assert list(p.items) == keys
-
- p = Panel.from_dict(d)
- assert list(p.items) == keys
-
- def test_from_dict_mixed_orient(self):
- df = tm.makeDataFrame()
- df['foo'] = 'bar'
-
- data = {'k1': df, 'k2': df}
-
- panel = Panel.from_dict(data, orient='minor')
-
- assert panel['foo'].values.dtype == np.object_
- assert panel['A'].values.dtype == np.float64
-
- def test_constructor_error_msgs(self):
- msg = (r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(4, 5, 5\)")
- with pytest.raises(ValueError, match=msg):
- Panel(np.random.randn(3, 4, 5),
- lrange(4), lrange(5), lrange(5))
-
- msg = (r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(5, 4, 5\)")
- with pytest.raises(ValueError, match=msg):
- Panel(np.random.randn(3, 4, 5),
- lrange(5), lrange(4), lrange(5))
-
- msg = (r"Shape of passed values is \(3, 4, 5\), "
- r"indices imply \(5, 5, 4\)")
- with pytest.raises(ValueError, match=msg):
- Panel(np.random.randn(3, 4, 5),
- lrange(5), lrange(5), lrange(4))
-
- def test_apply_slabs(self):
- # with multi-indexes
- # GH7469
- index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
- 'two', 'a'), ('two', 'b')])
- dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
- 4, 3), columns=list("ABC"), index=index)
- dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
- 4, 3), columns=list("ABC"), index=index)
- p = Panel({'f': dfa, 'g': dfb})
- result = p.apply(lambda x: x.sum(), axis=0)
-
- # on windows this will be in32
- result = result.astype('int64')
- expected = p.sum(0)
- assert_frame_equal(result, expected)
-
- def test_apply_no_or_zero_ndim(self):
- # GH10332
- self.panel = Panel(np.random.rand(5, 5, 5))
-
- result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
- result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
- result_int64 = self.panel.apply(
- lambda df: np.int64(0), axis=[1, 2])
- result_float64 = self.panel.apply(lambda df: np.float64(0.0),
- axis=[1, 2])
-
- expected_int = expected_int64 = Series([0] * 5)
- expected_float = expected_float64 = Series([0.0] * 5)
-
- assert_series_equal(result_int, expected_int)
- assert_series_equal(result_int64, expected_int64)
- assert_series_equal(result_float, expected_float)
- assert_series_equal(result_float64, expected_float64)
-
- def test_fillna(self):
- # limit not implemented when only value is specified
- p = Panel(np.random.randn(3, 4, 5))
- p.iloc[0:2, 0:2, 0:2] = np.nan
- pytest.raises(NotImplementedError,
- lambda: p.fillna(999, limit=1))
-
- def test_to_frame_multi_major(self):
- idx = MultiIndex.from_tuples(
- [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
- df = DataFrame([[1, 'a', 1], [2, 'b', 1],
- [3, 'c', 1], [4, 'd', 1]],
- columns=['A', 'B', 'C'], index=idx)
- wp = Panel({'i1': df, 'i2': df})
- expected_idx = MultiIndex.from_tuples(
- [
- (1, 'one', 'A'), (1, 'one', 'B'),
- (1, 'one', 'C'), (1, 'two', 'A'),
- (1, 'two', 'B'), (1, 'two', 'C'),
- (2, 'one', 'A'), (2, 'one', 'B'),
- (2, 'one', 'C'), (2, 'two', 'A'),
- (2, 'two', 'B'), (2, 'two', 'C')
- ],
- names=[None, None, 'minor'])
- expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
- 'c', 1, 4, 'd', 1],
- 'i2': [1, 'a', 1, 2, 'b',
- 1, 3, 'c', 1, 4, 'd', 1]},
- index=expected_idx)
- result = wp.to_frame()
- assert_frame_equal(result, expected)
-
- wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
- result = wp.to_frame()
- assert_frame_equal(result, expected[1:])
-
- idx = MultiIndex.from_tuples(
- [(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
- df = DataFrame([[1, 'a', 1], [2, 'b', 1],
- [3, 'c', 1], [4, 'd', 1]],
- columns=['A', 'B', 'C'], index=idx)
- wp = Panel({'i1': df, 'i2': df})
- ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
- (1, 'two', 'C'),
- (1, 'one', 'A'),
- (1, 'one', 'B'),
- (1, 'one', 'C'),
- (2, 'one', 'A'),
- (2, 'one', 'B'),
- (2, 'one', 'C'),
- (np.nan, 'two', 'A'),
- (np.nan, 'two', 'B'),
- (np.nan, 'two', 'C')],
- names=[None, None, 'minor'])
- expected.index = ex_idx
- result = wp.to_frame()
- assert_frame_equal(result, expected)
-
- def test_to_frame_multi_major_minor(self):
- cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
- codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
- idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
- 2, 'two'), (3, 'three'), (4, 'four')])
- df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
- ['a', 'b', 'w', 'x'],
- ['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
- [-5, -6, -7, -8]], columns=cols, index=idx)
- wp = Panel({'i1': df, 'i2': df})
-
- exp_idx = MultiIndex.from_tuples(
- [(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
- (1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
- (1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
- (1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
- (2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
- (2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
- (2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
- (2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
- (3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
- (3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
- (4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
- (4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
- names=[None, None, None, None])
- exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
- [3, 3], [4, 4],
- [13, 13], [14, 14], ['a', 'a'],
- ['b', 'b'], ['w', 'w'],
- ['x', 'x'], ['c', 'c'], ['d', 'd'], [
- 'y', 'y'], ['z', 'z'],
- [-1, -1], [-2, -2], [-3, -3], [-4, -4],
- [-5, -5], [-6, -6],
- [-7, -7], [-8, -8]]
- result = wp.to_frame()
- expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
- assert_frame_equal(result, expected)
-
- def test_to_frame_multi_drop_level(self):
- idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
- df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
- wp = Panel({'i1': df, 'i2': df})
- result = wp.to_frame()
- exp_idx = MultiIndex.from_tuples(
- [(2, 'one', 'A'), (2, 'two', 'A')],
- names=[None, None, 'minor'])
- expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
- assert_frame_equal(result, expected)
-
- def test_panel_dups(self):
-
- # GH 4960
- # duplicates in an index
-
- # items
- data = np.random.randn(5, 100, 5)
- no_dup_panel = Panel(data, items=list("ABCDE"))
- panel = Panel(data, items=list("AACDE"))
-
- expected = no_dup_panel['A']
- result = panel.iloc[0]
- assert_frame_equal(result, expected)
-
- expected = no_dup_panel['E']
- result = panel.loc['E']
- assert_frame_equal(result, expected)
-
- # major
- data = np.random.randn(5, 5, 5)
- no_dup_panel = Panel(data, major_axis=list("ABCDE"))
- panel = Panel(data, major_axis=list("AACDE"))
-
- expected = no_dup_panel.loc[:, 'A']
- result = panel.iloc[:, 0]
- assert_frame_equal(result, expected)
-
- expected = no_dup_panel.loc[:, 'E']
- result = panel.loc[:, 'E']
- assert_frame_equal(result, expected)
-
- # minor
- data = np.random.randn(5, 100, 5)
- no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
- panel = Panel(data, minor_axis=list("AACDE"))
-
- expected = no_dup_panel.loc[:, :, 'A']
- result = panel.iloc[:, :, 0]
- assert_frame_equal(result, expected)
-
- expected = no_dup_panel.loc[:, :, 'E']
- result = panel.loc[:, :, 'E']
- assert_frame_equal(result, expected)
-
- def test_filter(self):
- pass
-
- def test_shift(self):
- # mixed dtypes #6959
- data = [('item ' + ch, makeMixedDataFrame())
- for ch in list('abcde')]
- data = dict(data)
- mixed_panel = Panel.from_dict(data, orient='minor')
- shifted = mixed_panel.shift(1)
- assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
-
- def test_numpy_round(self):
- values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
- [-1566.213, 88.88], [-12, 94.5]],
- [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
- [272.212, -99.99], [23, -76.5]]]
- p = Panel(values, items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B'])
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.round(p, out=p)
-
- # removing Panel before NumPy enforces, so just ignore
- @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
- def test_multiindex_get(self):
- ind = MultiIndex.from_tuples(
- [('a', 1), ('a', 2), ('b', 1), ('b', 2)],
- names=['first', 'second'])
- wp = Panel(np.random.random((4, 5, 5)),
- items=ind,
- major_axis=np.arange(5),
- minor_axis=np.arange(5))
- f1 = wp['a']
- f2 = wp.loc['a']
-
- assert (f1.items == [1, 2]).all()
- assert (f2.items == [1, 2]).all()
-
- MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
- names=['first', 'second'])
-
- def test_repr_empty(self):
- empty = Panel()
- repr(empty)
-
- @pytest.mark.parametrize('bad_kwarg, exception, msg', [
- # errors must be 'ignore' or 'raise'
- ({'errors': 'something'}, ValueError, 'The parameter errors must.*'),
- ({'join': 'inner'}, NotImplementedError, 'Only left join is supported')
- ])
- def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
- pan = Panel([[[1.5, np.nan, 3.]]])
- with pytest.raises(exception, match=msg):
- pan.update(pan, **bad_kwarg)
-
- def test_update_raise_on_overlap(self):
- pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]],
- [[1.5, np.nan, 3.], [1.5, np.nan, 3.],
- [1.5, np.nan, 3.],
- [1.5, np.nan, 3.]]])
-
- with pytest.raises(ValueError, match='Data overlaps'):
- pan.update(pan, errors='raise')
-
- @pytest.mark.parametrize('raise_conflict', [True, False])
- def test_update_deprecation(self, raise_conflict):
- pan = Panel([[[1.5, np.nan, 3.]]])
- other = Panel([[[]]])
- with tm.assert_produces_warning(FutureWarning):
- pan.update(other, raise_conflict=raise_conflict)
-
-
-def test_panel_index():
- index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
- expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
- np.repeat([1, 2, 3], 4)],
- names=['time', 'panel'])
- tm.assert_index_equal(index, expected)
-
-
-@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-def test_panel_np_all():
- wp = Panel({"A": DataFrame({'b': [1, 2]})})
- result = np.all(wp)
- assert result == np.bool_(True)
| cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/25550 | 2019-03-05T14:50:16Z | 2019-03-05T18:44:11Z | 2019-03-05T18:44:11Z | 2019-03-05T19:19:42Z |
ENH: to_datetime support iso week year (16607) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ddc5e543c6165..6b31678b09a65 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -19,6 +19,7 @@ including other versions of pandas.
Other Enhancements
^^^^^^^^^^^^^^^^^^
+- Added support for ISO week year format ('%G-%V-%u') when parsing datetimes using :meth: `to_datetime` (:issue:`16607`)
- Indexing of ``DataFrame`` and ``Series`` now accepts zerodim ``np.ndarray`` (:issue:`24919`)
- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`)
- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`)
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 87658ae92175e..d3461dada0fa5 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -54,7 +54,10 @@ cdef dict _parse_code_table = {'y': 0,
'W': 16,
'Z': 17,
'p': 18, # an additional key, only with I
- 'z': 19}
+ 'z': 19,
+ 'G': 20,
+ 'V': 21,
+ 'u': 22}
def array_strptime(object[:] values, object fmt,
@@ -77,6 +80,7 @@ def array_strptime(object[:] values, object fmt,
object[:] result_timezone
int year, month, day, minute, hour, second, weekday, julian
int week_of_year, week_of_year_start, parse_code, ordinal
+ int iso_week, iso_year
int64_t us, ns
object val, group_key, ampm, found, timezone
dict found_key
@@ -169,13 +173,14 @@ def array_strptime(object[:] values, object fmt,
raise ValueError("time data %r does not match format "
"%r (search)" % (values[i], fmt))
+ iso_year = -1
year = 1900
month = day = 1
hour = minute = second = ns = us = 0
timezone = None
# Default to -1 to signify that values not known; not critical to have,
# though
- week_of_year = -1
+ iso_week = week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
@@ -265,13 +270,44 @@ def array_strptime(object[:] values, object fmt,
timezone = pytz.timezone(found_dict['Z'])
elif parse_code == 19:
timezone = parse_timezone_directive(found_dict['z'])
+ elif parse_code == 20:
+ iso_year = int(found_dict['G'])
+ elif parse_code == 21:
+ iso_week = int(found_dict['V'])
+ elif parse_code == 22:
+ weekday = int(found_dict['u'])
+ weekday -= 1
+
+ # don't assume default values for ISO week/year
+ if iso_year != -1:
+ if iso_week == -1 or weekday == -1:
+ raise ValueError("ISO year directive '%G' must be used with "
+ "the ISO week directive '%V' and a weekday "
+ "directive '%A', '%a', '%w', or '%u'.")
+ if julian != -1:
+ raise ValueError("Day of the year directive '%j' is not "
+ "compatible with ISO year directive '%G'. "
+ "Use '%Y' instead.")
+ elif year != -1 and week_of_year == -1 and iso_week != -1:
+ if weekday == -1:
+ raise ValueError("ISO week directive '%V' must be used with "
+ "the ISO year directive '%G' and a weekday "
+ "directive '%A', '%a', '%w', or '%u'.")
+ else:
+ raise ValueError("ISO week directive '%V' is incompatible with"
+ " the year directive '%Y'. Use the ISO year "
+ "'%G' instead.")
# If we know the wk of the year and what day of that wk, we can figure
# out the Julian day of the year.
- if julian == -1 and week_of_year != -1 and weekday != -1:
- week_starts_Mon = True if week_of_year_start == 0 else False
- julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
- week_starts_Mon)
+ if julian == -1 and weekday != -1:
+ if week_of_year != -1:
+ week_starts_Mon = week_of_year_start == 0
+ julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
+ week_starts_Mon)
+ elif iso_year != -1 and iso_week != -1:
+ year, julian = _calc_julian_from_V(iso_year, iso_week,
+ weekday + 1)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the wk
# calculation.
@@ -511,6 +547,7 @@ class TimeRE(dict):
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,9})",
+ 'G': r"(?P<G>\d\d\d\d)",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': (r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|"
@@ -518,7 +555,9 @@ class TimeRE(dict):
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
+ 'u': r"(?P<u>[1-7])",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
+ 'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
@@ -593,11 +632,27 @@ _CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
-cdef _calc_julian_from_U_or_W(int year, int week_of_year,
- int day_of_week, int week_starts_Mon):
+cdef int _calc_julian_from_U_or_W(int year, int week_of_year,
+ int day_of_week, int week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
- assumes the week starts on Sunday or Monday (6 or 0)."""
+ assumes the week starts on Sunday or Monday (6 or 0).
+
+ Parameters
+ ----------
+ year : int
+ the year
+ week_of_year : int
+ week taken from format U or W
+ week_starts_Mon : int
+ represents whether the week of the year
+ assumes the week starts on Sunday or Monday (6 or 0)
+
+ Returns
+ -------
+ int
+ converted julian day
+ """
cdef:
int first_weekday, week_0_length, days_to_week
@@ -620,6 +675,40 @@ cdef _calc_julian_from_U_or_W(int year, int week_of_year,
return 1 + days_to_week + day_of_week
+cdef object _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday):
+ """Calculate the Julian day based on the ISO 8601 year, week, and weekday.
+ ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
+ ISO week days range from 1 (Monday) to 7 (Sunday).
+
+ Parameters
+ ----------
+ iso_year : int
+ the year taken from format %G
+ iso_week : int
+ the week taken from format %V
+ iso_weekday : int
+ weekday taken from format %u
+
+ Returns
+ -------
+ (int, int)
+ the iso year and the Gregorian ordinal date / julian date
+ """
+
+ cdef:
+ int correction, ordinal
+
+ correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
+ ordinal = (iso_week * 7) + iso_weekday - correction
+ # ordinal may be negative or 0 now, which means the date is in the previous
+ # calendar year
+ if ordinal < 1:
+ ordinal += datetime_date(iso_year, 1, 1).toordinal()
+ iso_year -= 1
+ ordinal -= datetime_date(iso_year, 1, 1).toordinal()
+ return iso_year, ordinal
+
+
cdef parse_timezone_directive(object z):
"""
Parse the '%z' directive and return a pytz.FixedOffset
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 64e06787db6fe..80a7deecdffbe 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -455,6 +455,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
+ See strftime documentation for more information on choices:
+ https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 1a1e33bd508fc..22e589beb8ba1 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -244,6 +244,63 @@ def test_to_datetime_parse_timezone_keeps_name(self):
class TestToDatetime(object):
+ @pytest.mark.parametrize("s, _format, dt", [
+ ['2015-1-1', '%G-%V-%u', datetime(2014, 12, 29, 0, 0)],
+ ['2015-1-4', '%G-%V-%u', datetime(2015, 1, 1, 0, 0)],
+ ['2015-1-7', '%G-%V-%u', datetime(2015, 1, 4, 0, 0)]
+ ])
+ def test_to_datetime_iso_week_year_format(self, s, _format, dt):
+ # See GH#16607
+ assert to_datetime(s, format=_format) == dt
+
+ @pytest.mark.parametrize("msg, s, _format", [
+ ["ISO week directive '%V' must be used with the ISO year directive "
+ "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 50",
+ "%Y %V"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 51",
+ "%G %V"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 "
+ "Monday", "%G %A"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 Mon",
+ "%G %a"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 6",
+ "%G %w"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 6",
+ "%G %u"],
+ ["ISO year directive '%G' must be used with the ISO week directive "
+ "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "2051",
+ "%G"],
+ ["Day of the year directive '%j' is not compatible with ISO year "
+ "directive '%G'. Use '%Y' instead.", "1999 51 6 256", "%G %V %u %j"],
+ ["ISO week directive '%V' is incompatible with the year directive "
+ "'%Y'. Use the ISO year '%G' instead.", "1999 51 Sunday", "%Y %V %A"],
+ ["ISO week directive '%V' is incompatible with the year directive "
+ "'%Y'. Use the ISO year '%G' instead.", "1999 51 Sun", "%Y %V %a"],
+ ["ISO week directive '%V' is incompatible with the year directive "
+ "'%Y'. Use the ISO year '%G' instead.", "1999 51 1", "%Y %V %w"],
+ ["ISO week directive '%V' is incompatible with the year directive "
+ "'%Y'. Use the ISO year '%G' instead.", "1999 51 1", "%Y %V %u"],
+ ["ISO week directive '%V' must be used with the ISO year directive "
+ "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", "20", "%V"]
+ ])
+ def test_error_iso_week_year(self, msg, s, _format):
+ # See GH#16607
+ # This test checks for errors thrown when giving the wrong format
+ # However, as discussed on PR#25541, overriding the locale
+ # causes a different error to be thrown due to the format being
+ # locale specific, but the test data is in english.
+ # Therefore, the tests only run when locale is not overwritten,
+ # as a sort of solution to this problem.
+ if (locale.getlocale() != ('zh_CN', 'UTF-8') and
+ locale.getlocale() != ('it_IT', 'UTF-8')):
+ with pytest.raises(ValueError, match=msg):
+ to_datetime(s, format=_format)
+
@pytest.mark.parametrize('tz', [None, 'US/Central'])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
| - [x] closes #16607
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I messed up the rebase of the old PR #24844 and started this again on a clean state. Reviewers please review on this PR!
#### From old PR
I found the issue stagnant, but since the fix was already in place, I manually took the code @rosygupta made and applied it onto latest master. Rebase wasn't an option that I found would work since the file in question has been split into multiple. Let me know what else needs to be updated. | https://api.github.com/repos/pandas-dev/pandas/pulls/25541 | 2019-03-05T08:03:21Z | 2019-03-18T12:35:48Z | 2019-03-18T12:35:48Z | 2019-03-19T04:03:13Z |
BUG: Fix #25481 by fixing the error message in TypeError | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 124ec8f4ab92c..9468641211720 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -121,7 +121,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
- Bug in :func:`to_datetime` which would raise an (incorrect) ``ValueError`` when called with a date far into the future and the ``format`` argument specified instead of raising ``OutOfBoundsDatetime`` (:issue:`23830`)
--
+- Bug in an error message in :meth:`DataFrame.plot`. Improved the error message if non-numerics are passed to :meth:`DataFrame.plot` (:issue:`25481`)
-
Categorical
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 48d870bfc2e03..f2802372222cc 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -358,10 +358,9 @@ def _compute_plot_data(self):
except AttributeError:
is_empty = not len(numeric_data)
- # no empty frames or series allowed
+ # no non-numeric frames or series allowed
if is_empty:
- raise TypeError('Empty {0!r}: no numeric data to '
- 'plot'.format(numeric_data.__class__.__name__))
+ raise TypeError('no numeric data to plot')
self.data = numeric_data
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 6702ad6cfb761..b9a29cc4ac27e 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -97,7 +97,7 @@ def test_nonnumeric_exclude(self):
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
- msg = "Empty 'DataFrame': no numeric data to plot"
+ msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df['A'].plot()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 98b241f5c8206..28806bb67c896 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -485,7 +485,9 @@ def test_subplots_timeseries_y_axis(self):
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_all_tz"].values).all()
- with pytest.raises(TypeError):
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
@@ -2219,7 +2221,9 @@ def test_all_invalid_plot_data(self):
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
- with pytest.raises(TypeError):
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
@pytest.mark.slow
@@ -2230,7 +2234,9 @@ def test_partially_invalid_plot_data(self):
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
- with pytest.raises(TypeError):
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
with tm.RNGContext(42):
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 07a4b168a66f1..f5c44ed35819c 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -694,7 +694,9 @@ def test_invalid_plot_data(self):
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
- with pytest.raises(TypeError):
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
@pytest.mark.slow
@@ -711,7 +713,9 @@ def test_partially_invalid_plot_data(self):
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
- with pytest.raises(TypeError):
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
| BUG: Fix #25481 by fixing the error message in TypeError
- [x] closes #25481
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25540 | 2019-03-05T06:50:22Z | 2019-03-10T21:59:15Z | 2019-03-10T21:59:14Z | 2019-03-10T21:59:17Z |
DOC: Polishing typos out of doc/source/user_guide/indexing.rst | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index be1745e2664a1..00d4dc9efc8cc 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -435,7 +435,7 @@ Selection By Position
This is sometimes called ``chained assignment`` and should be avoided.
See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`.
-Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
+Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bound is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
The ``.iloc`` attribute is the primary access method. The following are valid inputs:
@@ -545,7 +545,7 @@ Selection By Callable
.. versionadded:: 0.18.1
``.loc``, ``.iloc``, and also ``[]`` indexing can accept a ``callable`` as indexer.
-The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing.
+The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) that returns valid output for indexing.
.. ipython:: python
@@ -569,7 +569,7 @@ You can use callable indexing in ``Series``.
df1.A.loc[lambda s: s > 0]
Using these methods / indexers, you can chain data selection operations
-without using temporary variable.
+without using a temporary variable.
.. ipython:: python
@@ -907,7 +907,7 @@ of the DataFrame):
df[df['A'] > 0]
-List comprehensions and ``map`` method of Series can also be used to produce
+List comprehensions and the ``map`` method of Series can also be used to produce
more complex criteria:
.. ipython:: python
@@ -1556,7 +1556,7 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
ind
``set_names``, ``set_levels``, and ``set_codes`` also take an optional
-`level`` argument
+``level`` argument
.. ipython:: python
| ✅ fixes typos in Indexing and Selecting Data doc:
- subject verb agreements
- extra conjunctions
- adds articles
- fixes formatting as code markup
| https://api.github.com/repos/pandas-dev/pandas/pulls/25538 | 2019-03-04T22:41:40Z | 2019-03-04T22:45:43Z | null | 2019-03-04T22:45:43Z |
fix MacPython / pandas-wheels ci failures | diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 2a64947042979..7528566e8326e 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -409,8 +409,9 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- msg = (r"'(<|>)' not supported between instances of"
- r" 'datetime\.datetime' and 'int'|"
+ msg = (r"'(<|>)' not supported between instances of ('"
+ r"datetime\.datetime' and 'int'|'int' and 'datetime\.datetime"
+ r"')|"
r"unorderable types: int\(\) > datetime\.datetime\(\)")
if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
| https://github.com/pandas-dev/pandas/pull/25483#issuecomment-469366672
@jreback : the regex is getting a bit unreadable.. this may suggest that a more (user friendly|consistent) error message should be raised by `safe_sort`? | https://api.github.com/repos/pandas-dev/pandas/pulls/25537 | 2019-03-04T19:19:00Z | 2019-03-05T10:24:47Z | 2019-03-05T10:24:47Z | 2019-03-05T12:22:14Z |
ENH: Add errors parameter to DataFrame.rename | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 124ec8f4ab92c..67474918159d5 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -25,7 +25,7 @@ Other Enhancements
- ``Series.str`` has gained :meth:`Series.str.casefold` method to removes all case distinctions present in a string (:issue:`25405`)
- :meth:`DataFrame.set_index` now works for instances of ``abc.Iterator``, provided their output is of the same length as the calling frame (:issue:`22484`, :issue:`24984`)
- :meth:`DatetimeIndex.union` now supports the ``sort`` argument. The behaviour of the sort parameter matches that of :meth:`Index.union` (:issue:`24994`)
--
+- :meth:`DataFrame.rename` now supports the ``errors`` argument to raise errors when attempting to rename nonexistent keys (:issue:`13473`)
.. _whatsnew_0250.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b4d95055d06d..eadffb779734f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3911,7 +3911,8 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
- ('level', None)])
+ ('level', None),
+ ('errors', 'ignore')])
def rename(self, *args, **kwargs):
"""
Alter axes labels.
@@ -3924,30 +3925,49 @@ def rename(self, *args, **kwargs):
Parameters
----------
- mapper, index, columns : dict-like or function, optional
- dict-like or functions transformations to apply to
+ mapper : dict-like or function
+ Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
- axis : int or str, optional
+ index : dict-like or function
+ Alternative to specifying axis (``mapper, axis=0``
+ is equivalent to ``index=mapper``).
+ columns : dict-like or function
+ Alternative to specifying axis (``mapper, axis=1``
+ is equivalent to ``columns=mapper``).
+ axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
- copy : boolean, default True
- Also copy underlying data
- inplace : boolean, default False
+ copy : bool, default True
+ Also copy underlying data.
+ inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
+ errors : {'ignore', 'raise'}, default 'ignore'
+ If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
+ or `columns` contains labels that are not present in the Index
+ being transformed.
+ If 'ignore', existing keys will be renamed and extra keys will be
+ ignored.
Returns
-------
DataFrame
+ DataFrame with the renamed axis labels.
+
+ Raises
+ ------
+ KeyError
+ If any of the labels is not found in the selected axis and
+ "errors='raise'".
See Also
--------
- DataFrame.rename_axis
+ DataFrame.rename_axis : Set the name of the axis.
Examples
--------
@@ -3973,6 +3993,10 @@ def rename(self, *args, **kwargs):
1 2 5
2 3 6
+ >>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise")
+ Traceback (most recent call last):
+ KeyError: ['C'] not found in axis
+
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ee8f9cba951b3..7915d98662c9e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -981,11 +981,23 @@ def rename(self, *args, **kwargs):
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
+ errors : {'ignore', 'raise'}, default 'ignore'
+ If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
+ or `columns` contains labels that are not present in the Index
+ being transformed.
+ If 'ignore', existing keys will be renamed and extra keys will be
+ ignored.
Returns
-------
renamed : %(klass)s (new object)
+ Raises
+ ------
+ KeyError
+ If any of the labels is not found in the selected axis and
+ "errors='raise'".
+
See Also
--------
NDFrame.rename_axis
@@ -1065,6 +1077,7 @@ def rename(self, *args, **kwargs):
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
+ errors = kwargs.pop('errors', 'ignore')
if axis is not None:
# Validate the axis
self._get_axis_number(axis)
@@ -1085,10 +1098,19 @@ def rename(self, *args, **kwargs):
if v is None:
continue
f = com._get_rename_function(v)
-
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
+
+ # GH 13473
+ if not callable(v):
+ indexer = self.axes[axis].get_indexer_for(v)
+ if errors == 'raise' and len(indexer[indexer == -1]):
+ missing_labels = [label for index, label in enumerate(v)
+ if indexer[index] == -1]
+ raise KeyError('{} not found in axis'
+ .format(missing_labels))
+
result._data = result._data.rename_axis(f, axis=baxis, copy=copy,
level=level)
result._clear_item_cache()
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index a25e893e08900..f01b86f727fee 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -871,6 +871,23 @@ def test_rename_bug2(self):
columns=["a"])
tm.assert_frame_equal(df, expected)
+ def test_rename_errors_raises(self):
+ df = DataFrame(columns=['A', 'B', 'C', 'D'])
+ with pytest.raises(KeyError, match='\'E\'] not found in axis'):
+ df.rename(columns={'A': 'a', 'E': 'e'}, errors='raise')
+
+ @pytest.mark.parametrize('mapper, errors, expected_columns', [
+ ({'A': 'a', 'E': 'e'}, 'ignore', ['a', 'B', 'C', 'D']),
+ ({'A': 'a'}, 'raise', ['a', 'B', 'C', 'D']),
+ (str.lower, 'raise', ['a', 'b', 'c', 'd'])])
+ def test_rename_errors(self, mapper, errors, expected_columns):
+ # GH 13473
+ # rename now works with errors parameter
+ df = DataFrame(columns=['A', 'B', 'C', 'D'])
+ result = df.rename(columns=mapper, errors=errors)
+ expected = DataFrame(columns=expected_columns)
+ tm.assert_frame_equal(result, expected)
+
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
@@ -1328,7 +1345,7 @@ def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {"self", "mapper", "index", "columns", "axis",
- "inplace", "copy", "level"}
+ "inplace", "copy", "level", "errors"}
@pytest.mark.skipif(PY2, reason="inspect.signature")
def test_reindex_signature(self):
| - [X] closes #13473
- [x] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/25535 | 2019-03-04T14:34:08Z | 2019-03-05T22:15:21Z | 2019-03-05T22:15:19Z | 2019-03-07T10:10:05Z |
BUG: caught typeError in series.at (#25506) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 124ec8f4ab92c..e1a1c975b5ed8 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -150,7 +150,7 @@ Timezones
- Bug in :func:`to_datetime` with ``utc=True`` and datetime strings that would apply previously parsed UTC offsets to subsequent arguments (:issue:`24992`)
- Bug in :func:`Timestamp.tz_localize` and :func:`Timestamp.tz_convert` does not propagate ``freq`` (:issue:`25241`)
--
+- Bug in :func:`Series.at` where setting :class:`Timestamp` with timezone raises ``TypeError`` (:issue:`25506`)
Numeric
^^^^^^^
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cada6663ce651..3d275edc2f78b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1229,7 +1229,7 @@ def _set_value(self, label, value, takeable=False):
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
- except KeyError:
+ except (KeyError, TypeError):
# set using a non-recursive method
self.loc[label] = value
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 0cd41562541d1..20053264ac4f1 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -185,6 +185,14 @@ def test_at_with_tz(self):
result = df.at[0, 'date']
assert result == expected
+ def test_series_set_tz_timestamp(self, tz_naive_fixture):
+ # GH 25506
+ ts = Timestamp('2017-08-05 00:00:00+0100', tz=tz_naive_fixture)
+ result = Series(ts)
+ result.at[1] = ts
+ expected = Series([ts, ts])
+ tm.assert_series_equal(result, expected)
+
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])
| - [x] closes #25506
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25533 | 2019-03-04T12:43:55Z | 2019-03-05T21:11:49Z | 2019-03-05T21:11:49Z | 2019-03-05T21:11:53Z |
DOC: fix docstring for pandas.read_parquet | diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index ba322f42c07c1..db90844a1715f 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -260,8 +260,8 @@ def read_parquet(path, engine='auto', columns=None, **kwargs):
Parameters
----------
- path : string
- File path
+ path : str
+ File path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
@@ -277,6 +277,16 @@ def read_parquet(path, engine='auto', columns=None, **kwargs):
Returns
-------
DataFrame
+ DataFrame with appropriate data.
+
+ See Also
+ --------
+ DataFrame.to_parquet : Write DataFrame to a parquet file.
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
+
+ Examples
+ --------
+ >>> pd.read_parquet('data.parquet') # doctest: +SKIP
"""
impl = get_engine(engine)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
```
################################################################################
####################### Docstring (pandas.read_parquet) #######################
################################################################################
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : str
File path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
columns : list, default=None
If not None, only these columns will be read from the file.
.. versionadded 0.21.1
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
A parquet file is returned as two-dimensional data structure with
labeled axes.
See Also
--------
to_parquet : Write DataFrame to a parquet file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_parquet('data.parquet') # doctest: +SKIP
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.read_parquet" correct. :)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/25532 | 2019-03-04T11:01:30Z | 2019-04-10T05:28:12Z | null | 2019-04-10T05:28:12Z |
Bug groupby idxmin | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 8e72ce83ac028..9405e7804a461 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -254,6 +254,7 @@ Groupby/Resample/Rolling
- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`)
- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`)
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
+- Bug in :func:`idxmax` and :func:`idxmin` on :meth:`DataFrame.groupby` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
Reshaping
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 903c898b68873..a5804586bdf11 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -89,7 +89,8 @@ def _gotitem(self, key, ndim, subset=None):
cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
-cython_cast_blacklist = frozenset(['rank', 'count', 'size'])
+cython_cast_blacklist = frozenset(['rank', 'count', 'size', 'idxmin',
+ 'idxmax'])
def whitelist_method_generator(base, klass, whitelist):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index bdae6f36b5572..5823c39cbd8ed 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -254,8 +254,13 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
- result[item] = self._try_cast(
- colg.aggregate(func, *args, **kwargs), data)
+
+ cast = self._transform_should_cast(func)
+
+ result[item] = colg.aggregate(func, *args, **kwargs)
+ if cast:
+ result[item] = self._try_cast(result[item], data)
+
except ValueError:
cannot_agg.append(item)
continue
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index b5e328ef64424..4ea0d12656ee4 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -400,6 +400,25 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i):
assert res.iloc[0].b == data["expected"]
+@pytest.mark.parametrize("func, values", [
+ ("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
+ ("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
+])
+def test_idxmin_idxmax_returns_int_types(func, values):
+ # GH 25444
+ df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
+ 'c_int': [1, 2, 3, 4],
+ 'c_float': [4.02, 3.03, 2.04, 1.05],
+ 'c_date': ['2019', '2018', '2016', '2017']})
+ df['c_date'] = pd.to_datetime(df['c_date'])
+
+ result = getattr(df.groupby('name'), func)()
+
+ expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
+
+ tm.assert_frame_equal(result, expected)
+
+
def test_fill_consistency():
# GH9221
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index b645073fcf72a..26f39f8f41e2f 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -845,3 +845,22 @@ def test_groupby_transform_timezone_column(func):
expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id',
'max_end_time'])
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("func, values", [
+ ("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]),
+ ("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"])
+])
+def test_groupby_transform_with_datetimes(func, values):
+ # GH 15306
+ dates = pd.date_range('1/1/2011', periods=10, freq='D')
+
+ stocks = pd.DataFrame({'price': np.arange(10.0)}, index=dates)
+ stocks['week_id'] = pd.to_datetime(stocks.index).week
+
+ result = stocks.groupby(stocks['week_id'])['price'].transform(func)
+
+ expected = pd.Series(data=pd.to_datetime(values),
+ index=dates, name="price")
+
+ tm.assert_series_equal(result, expected)
| closes #25444
closes #15306
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25531 | 2019-03-04T08:59:34Z | 2019-03-30T19:17:29Z | 2019-03-30T19:17:29Z | 2019-03-30T19:17:32Z |
fix segfault when running with cython coverage enabled, xref cython#2879 | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index e38e9a1ca5df6..a5a50ea59753d 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -138,11 +138,11 @@ cdef int64_t get_daytime_conversion_factor(int from_index, int to_index) nogil:
return daytime_conversion_factor_matrix[row - 6][col - 6]
-cdef int64_t nofunc(int64_t ordinal, asfreq_info *af_info):
- return np.iinfo(np.int32).min
+cdef int64_t nofunc(int64_t ordinal, asfreq_info *af_info) nogil:
+ return INT32_MIN
-cdef int64_t no_op(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t no_op(int64_t ordinal, asfreq_info *af_info) nogil:
return ordinal
@@ -270,7 +270,8 @@ cdef int64_t DtoB_weekday(int64_t unix_date) nogil:
return ((unix_date + 4) // 7) * 5 + ((unix_date + 4) % 7) - 4
-cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, int64_t unix_date):
+cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back,
+ int64_t unix_date) nogil:
cdef:
int day_of_week = dayofweek(dts.year, dts.month, dts.day)
@@ -286,21 +287,23 @@ cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, int64_t unix_date):
return DtoB_weekday(unix_date)
-cdef inline int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info):
+cdef inline int64_t upsample_daytime(int64_t ordinal,
+ asfreq_info *af_info) nogil:
if (af_info.is_end):
return (ordinal + 1) * af_info.intraday_conversion_factor - 1
else:
return ordinal * af_info.intraday_conversion_factor
-cdef inline int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info):
+cdef inline int64_t downsample_daytime(int64_t ordinal,
+ asfreq_info *af_info) nogil:
return ordinal // (af_info.intraday_conversion_factor)
cdef inline int64_t transform_via_day(int64_t ordinal,
asfreq_info *af_info,
freq_conv_func first_func,
- freq_conv_func second_func):
+ freq_conv_func second_func) nogil:
cdef:
int64_t result
@@ -313,7 +316,7 @@ cdef inline int64_t transform_via_day(int64_t ordinal,
# Conversion _to_ Daily Freq
cdef void AtoD_ym(int64_t ordinal, int64_t *year,
- int *month, asfreq_info *af_info):
+ int *month, asfreq_info *af_info) nogil:
year[0] = ordinal + 1970
month[0] = 1
@@ -327,7 +330,7 @@ cdef void AtoD_ym(int64_t ordinal, int64_t *year,
year[0] -= 1
-cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int64_t unix_date, year
int month
@@ -341,7 +344,7 @@ cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info):
cdef void QtoD_ym(int64_t ordinal, int *year,
- int *month, asfreq_info *af_info):
+ int *month, asfreq_info *af_info) nogil:
year[0] = ordinal // 4 + 1970
month[0] = (ordinal % 4) * 3 + 1
@@ -353,7 +356,7 @@ cdef void QtoD_ym(int64_t ordinal, int *year,
year[0] -= 1
-cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int64_t unix_date
int year, month
@@ -366,12 +369,12 @@ cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info):
return upsample_daytime(unix_date, af_info)
-cdef void MtoD_ym(int64_t ordinal, int *year, int *month):
+cdef void MtoD_ym(int64_t ordinal, int *year, int *month) nogil:
year[0] = ordinal // 12 + 1970
month[0] = ordinal % 12 + 1
-cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int64_t unix_date
int year, month
@@ -384,7 +387,7 @@ cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info):
return upsample_daytime(unix_date, af_info)
-cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = (ordinal * 7 + af_info.from_end - 4 +
(7 - 1) * (af_info.is_end - 1))
return upsample_daytime(ordinal, af_info)
@@ -393,7 +396,7 @@ cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info):
# --------------------------------------------------------------------
# Conversion _to_ BusinessDay Freq
-cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int roll_back
npy_datetimestruct dts
@@ -404,7 +407,7 @@ cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info):
return DtoB(&dts, roll_back, unix_date)
-cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int roll_back
npy_datetimestruct dts
@@ -415,7 +418,7 @@ cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info):
return DtoB(&dts, roll_back, unix_date)
-cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int roll_back
npy_datetimestruct dts
@@ -426,7 +429,7 @@ cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info):
return DtoB(&dts, roll_back, unix_date)
-cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int roll_back
npy_datetimestruct dts
@@ -437,7 +440,7 @@ cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info):
return DtoB(&dts, roll_back, unix_date)
-cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int roll_back
npy_datetimestruct dts
@@ -452,7 +455,7 @@ cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info):
# ----------------------------------------------------------------------
# Conversion _from_ Daily Freq
-cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
npy_datetimestruct dts
@@ -464,7 +467,7 @@ cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info):
return <int64_t>(dts.year - 1970)
-cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year):
+cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year) nogil:
cdef:
npy_datetimestruct dts
int quarter
@@ -485,7 +488,7 @@ cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year):
return quarter
-cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int year, quarter
@@ -495,7 +498,7 @@ cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info):
return <int64_t>((year - 1970) * 4 + quarter - 1)
-cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
npy_datetimestruct dts
@@ -504,7 +507,7 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info):
return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
-cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
return (ordinal + 3 - af_info.to_end) // 7 + 1
@@ -512,30 +515,30 @@ cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info):
# --------------------------------------------------------------------
# Conversion _from_ BusinessDay Freq
-cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = ((ordinal + 3) // 5) * 7 + (ordinal + 3) % 5 -3
return upsample_daytime(ordinal, af_info)
-cdef int64_t asfreq_BtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_BtoA(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_BtoDT,
<freq_conv_func>asfreq_DTtoA)
-cdef int64_t asfreq_BtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_BtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_BtoDT,
<freq_conv_func>asfreq_DTtoQ)
-cdef int64_t asfreq_BtoM(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_BtoM(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_BtoDT,
<freq_conv_func>asfreq_DTtoM)
-cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_BtoDT,
<freq_conv_func>asfreq_DTtoW)
@@ -544,25 +547,25 @@ cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info):
# ----------------------------------------------------------------------
# Conversion _from_ Annual Freq
-cdef int64_t asfreq_AtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoA(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_AtoDT,
<freq_conv_func>asfreq_DTtoA)
-cdef int64_t asfreq_AtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_AtoDT,
<freq_conv_func>asfreq_DTtoQ)
-cdef int64_t asfreq_AtoM(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoM(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_AtoDT,
<freq_conv_func>asfreq_DTtoM)
-cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_AtoDT,
<freq_conv_func>asfreq_DTtoW)
@@ -571,25 +574,25 @@ cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info):
# ----------------------------------------------------------------------
# Conversion _from_ Quarterly Freq
-cdef int64_t asfreq_QtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_QtoDT,
<freq_conv_func>asfreq_DTtoQ)
-cdef int64_t asfreq_QtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoA(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_QtoDT,
<freq_conv_func>asfreq_DTtoA)
-cdef int64_t asfreq_QtoM(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoM(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_QtoDT,
<freq_conv_func>asfreq_DTtoM)
-cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_QtoDT,
<freq_conv_func>asfreq_DTtoW)
@@ -598,19 +601,19 @@ cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info):
# ----------------------------------------------------------------------
# Conversion _from_ Monthly Freq
-cdef int64_t asfreq_MtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_MtoA(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_MtoDT,
<freq_conv_func>asfreq_DTtoA)
-cdef int64_t asfreq_MtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_MtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_MtoDT,
<freq_conv_func>asfreq_DTtoQ)
-cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_MtoDT,
<freq_conv_func>asfreq_DTtoW)
@@ -619,25 +622,25 @@ cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info):
# ----------------------------------------------------------------------
# Conversion _from_ Weekly Freq
-cdef int64_t asfreq_WtoA(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoA(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_WtoDT,
<freq_conv_func>asfreq_DTtoA)
-cdef int64_t asfreq_WtoQ(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoQ(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_WtoDT,
<freq_conv_func>asfreq_DTtoQ)
-cdef int64_t asfreq_WtoM(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoM(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_WtoDT,
<freq_conv_func>asfreq_DTtoM)
-cdef int64_t asfreq_WtoW(int64_t ordinal, asfreq_info *af_info):
+cdef int64_t asfreq_WtoW(int64_t ordinal, asfreq_info *af_info) nogil:
return transform_via_day(ordinal, af_info,
<freq_conv_func>asfreq_WtoDT,
<freq_conv_func>asfreq_DTtoW)
@@ -971,7 +974,7 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year):
return qtr_freq
-cdef inline int month_to_quarter(int month):
+cdef inline int month_to_quarter(int month) nogil:
return (month - 1) // 3 + 1
@@ -1024,9 +1027,6 @@ def periodarr_to_dt64arr(int64_t[:] periodarr, int freq):
with nogil:
for i in range(l):
- if periodarr[i] == NPY_NAT:
- out[i] = NPY_NAT
- continue
out[i] = period_ordinal_to_dt64(periodarr[i], freq)
return out.base # .base to access underlying np.ndarray
| We have several functions getting called inside a `with nogil:` block that aren't currently declared as `nogil`. In the future cython should warn in this type of situation (cython#2879). | https://api.github.com/repos/pandas-dev/pandas/pulls/25529 | 2019-03-04T00:01:18Z | 2019-03-04T18:39:42Z | 2019-03-04T18:39:42Z | 2019-03-07T22:01:37Z |
DOC: Polishing typos out of doc/source/user_guide/indexing.rst | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index be1745e2664a1..00d4dc9efc8cc 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -435,7 +435,7 @@ Selection By Position
This is sometimes called ``chained assignment`` and should be avoided.
See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`.
-Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
+Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bound is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
The ``.iloc`` attribute is the primary access method. The following are valid inputs:
@@ -545,7 +545,7 @@ Selection By Callable
.. versionadded:: 0.18.1
``.loc``, ``.iloc``, and also ``[]`` indexing can accept a ``callable`` as indexer.
-The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing.
+The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) that returns valid output for indexing.
.. ipython:: python
@@ -569,7 +569,7 @@ You can use callable indexing in ``Series``.
df1.A.loc[lambda s: s > 0]
Using these methods / indexers, you can chain data selection operations
-without using temporary variable.
+without using a temporary variable.
.. ipython:: python
@@ -907,7 +907,7 @@ of the DataFrame):
df[df['A'] > 0]
-List comprehensions and ``map`` method of Series can also be used to produce
+List comprehensions and the ``map`` method of Series can also be used to produce
more complex criteria:
.. ipython:: python
@@ -1556,7 +1556,7 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
ind
``set_names``, ``set_levels``, and ``set_codes`` also take an optional
-`level`` argument
+``level`` argument
.. ipython:: python
| -[x] fixes 5 small (subject verb agreement, articles, formatting, duplicate conjunction) typos in the doc's user_guide _indexing.rst_ file.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25528 | 2019-03-03T23:35:36Z | 2019-03-04T18:54:13Z | 2019-03-04T18:54:13Z | 2019-03-04T18:54:13Z |
DOC: Fixed PeriodArray api ref | diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index a129b75636536..4cf8db895f0ac 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -259,7 +259,7 @@ Every period in a ``PeriodArray`` must have the same ``freq``.
.. autosummary::
:toctree: api/
- arrays.DatetimeArray
+ arrays.PeriodArray
PeriodDtype
.. _api.arrays.interval:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 3ddceb8c2839d..12d36695f5b79 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -104,13 +104,16 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
+ dtype : PeriodDtype, optional
+ A PeriodDtype instance from which to extract a `freq`. If both
+ `freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
See Also
--------
period_array : Create a new PeriodArray.
- pandas.PeriodIndex : Immutable Index for period data.
+ PeriodIndex : Immutable Index for period data.
Notes
-----
| [ci skip]
DatetimeArray was listed twice. PeriodArray was missed. | https://api.github.com/repos/pandas-dev/pandas/pulls/25526 | 2019-03-03T20:14:11Z | 2019-03-20T01:55:34Z | 2019-03-20T01:55:34Z | 2019-03-20T01:55:38Z |
DOC: resolve all GL03 docstring validation errors | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c4840f1e836c4..51df779341ed5 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -241,8 +241,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05
+ MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 6e40063fb925a..37aa05659b70f 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1156,7 +1156,6 @@ class Timedelta(_Timedelta):
Notes
-----
The ``.value`` attribute is always in ns.
-
"""
def __new__(cls, object value=_no_input, unit=None, **kwargs):
cdef _Timedelta td_base
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 75cf658423210..89f2b9961a4d7 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -119,7 +119,7 @@ def f(self):
return result
f.__name__ = name
- f.__doc__ = "\n{}\n".format(docstring)
+ f.__doc__ = docstring
return property(f)
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 9be2c9af169e8..fd7149edc8d7c 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -541,7 +541,6 @@ class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
-
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f896596dd5216..c0f3df1b36c03 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -870,7 +870,6 @@ def to_numpy(self, dtype=None, copy=False):
.. versionadded:: 0.24.0
-
Parameters
----------
dtype : str or numpy.dtype, optional
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 01664fffb1e27..b6264a5257dcb 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -651,7 +651,6 @@ def _build_option_description(k):
.format(rkey=d.rkey if d.rkey else ''))
s += u(')')
- s += '\n\n'
return s
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eadffb779734f..3996728a1cc90 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2184,7 +2184,6 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True,
Convert URLs to HTML links.
.. versionadded:: 0.24.0
-
%(returns)s
See Also
--------
@@ -6027,7 +6026,6 @@ def unstack(self, level=-1, fill_value=None):
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
-
%(versionadded)s
Parameters
----------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0b81576404e2f..d2b87d79c7d52 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -948,7 +948,6 @@ def swaplevel(self, i=-2, j=-1, axis=0):
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
-
"""
axis = self._get_axis_number(axis)
result = self.copy()
@@ -4951,9 +4950,7 @@ def pipe(self, func, *args, **kwargs):
_shared_docs['aggregate'] = dedent("""
Aggregate using one or more operations over the specified axis.
-
%(versionadded)s
-
Parameters
----------
func : function, str, list or dict
@@ -4983,17 +4980,13 @@ def pipe(self, func, *args, **kwargs):
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
-
%(see_also)s
-
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
-
- %(examples)s
- """)
+ %(examples)s""")
_shared_docs['transform'] = ("""
Call ``func`` on self producing a %(klass)s with transformed values
@@ -10307,7 +10300,7 @@ def _doc_parms(cls):
Returns
-------
-%(name1)s or %(name2)s (if level specified)
+%(name1)s or %(name2)s (if level specified)\
%(see_also)s
%(examples)s\
"""
@@ -10464,8 +10457,7 @@ def _doc_parms(cls):
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
-%(examples)s
-"""
+%(examples)s"""
_cummin_examples = """\
Examples
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ebba4a0a9395d..903c898b68873 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -126,9 +126,7 @@ class where members are defined.
property_wrapper_template = \
"""@property
def %(name)s(self) :
- \"""
- %(doc)s
- \"""
+ \"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 36dcb692bb079..3d0a6023ac29f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -221,8 +221,7 @@ class providing the base-class of operations.
Examples
--------
-%(examples)s
-"""
+%(examples)s"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
@@ -1106,9 +1105,7 @@ def mean(self, *args, **kwargs):
Returns
-------
pandas.Series or pandas.DataFrame
-
%(see_also)s
-
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
@@ -1564,9 +1561,7 @@ def nth(self, n, dropna=None):
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
-
%(see_also)s
-
Examples
--------
@@ -2139,9 +2134,7 @@ def head(self, n=5):
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
-
%(see_also)s
-
Examples
--------
@@ -2167,9 +2160,7 @@ def tail(self, n=5):
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
-
%(see_also)s
-
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index dee181fc1c569..29b9a47a92a48 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3104,7 +3104,6 @@ def reindex(self, target, method=None, level=None, limit=None,
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
-
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index d76e6b75d3762..b2c6dff4338b6 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -110,7 +110,6 @@ def read_feather(path, columns=None, use_threads=True):
Returns
-------
type of object stored in file
-
"""
feather, pyarrow = _try_import()
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 5171ea68fd497..b8073c89892c5 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -303,7 +303,6 @@ def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
Returns
-------
class:`matplotlip.axis.Axes`
-
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
| - [X] closes #25172
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Original number of errors: 95
Remaining errors: 0
| https://api.github.com/repos/pandas-dev/pandas/pulls/25525 | 2019-03-03T18:24:01Z | 2019-03-10T21:08:26Z | 2019-03-10T21:08:25Z | 2019-03-10T21:08:35Z |
DOC:Remove hard-coded examples from _flex_doc_SERIES (#24589) | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index dbdabecafae3a..4d88ce6836ca4 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -384,57 +384,252 @@ def _get_op_name(op, special):
# -----------------------------------------------------------------------------
# Docstring Generation and Templates
+_add_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.add(b, fill_value=0)
+a 2.0
+b 1.0
+c 1.0
+d 1.0
+e NaN
+dtype: float64
+"""
+
+_sub_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.subtract(b, fill_value=0)
+a 0.0
+b 1.0
+c 1.0
+d -1.0
+e NaN
+dtype: float64
+"""
+
+_mul_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.multiply(b, fill_value=0)
+a 1.0
+b 0.0
+c 0.0
+d 0.0
+e NaN
+dtype: float64
+"""
+
+_div_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.divide(b, fill_value=0)
+a 1.0
+b inf
+c inf
+d 0.0
+e NaN
+dtype: float64
+"""
+
+_floordiv_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.floordiv(b, fill_value=0)
+a 1.0
+b NaN
+c NaN
+d 0.0
+e NaN
+dtype: float64
+"""
+
+_mod_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.mod(b, fill_value=0)
+a 0.0
+b NaN
+c NaN
+d 0.0
+e NaN
+dtype: float64
+"""
+_pow_example_SERIES = """
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.pow(b, fill_value=0)
+a 1.0
+b 1.0
+c 1.0
+d 0.0
+e NaN
+dtype: float64
+"""
+
_op_descriptions = {
# Arithmetic Operators
'add': {'op': '+',
'desc': 'Addition',
- 'reverse': 'radd'},
+ 'reverse': 'radd',
+ 'series_examples': _add_example_SERIES},
'sub': {'op': '-',
'desc': 'Subtraction',
- 'reverse': 'rsub'},
+ 'reverse': 'rsub',
+ 'series_examples': _sub_example_SERIES},
'mul': {'op': '*',
'desc': 'Multiplication',
'reverse': 'rmul',
+ 'series_examples': _mul_example_SERIES,
'df_examples': None},
'mod': {'op': '%',
'desc': 'Modulo',
- 'reverse': 'rmod'},
+ 'reverse': 'rmod',
+ 'series_examples': _mod_example_SERIES},
'pow': {'op': '**',
'desc': 'Exponential power',
'reverse': 'rpow',
+ 'series_examples': _pow_example_SERIES,
'df_examples': None},
'truediv': {'op': '/',
'desc': 'Floating division',
'reverse': 'rtruediv',
+ 'series_examples': _div_example_SERIES,
'df_examples': None},
'floordiv': {'op': '//',
'desc': 'Integer division',
'reverse': 'rfloordiv',
+ 'series_examples': _floordiv_example_SERIES,
'df_examples': None},
'divmod': {'op': 'divmod',
'desc': 'Integer division and modulo',
'reverse': 'rdivmod',
+ 'series_examples': None,
'df_examples': None},
# Comparison Operators
'eq': {'op': '==',
'desc': 'Equal to',
- 'reverse': None},
+ 'reverse': None,
+ 'series_examples': None},
'ne': {'op': '!=',
'desc': 'Not equal to',
- 'reverse': None},
+ 'reverse': None,
+ 'series_examples': None},
'lt': {'op': '<',
'desc': 'Less than',
- 'reverse': None},
+ 'reverse': None,
+ 'series_examples': None},
'le': {'op': '<=',
'desc': 'Less than or equal to',
- 'reverse': None},
+ 'reverse': None,
+ 'series_examples': None},
'gt': {'op': '>',
'desc': 'Greater than',
- 'reverse': None},
+ 'reverse': None,
+ 'series_examples': None},
'ge': {'op': '>=',
'desc': 'Greater than or equal to',
- 'reverse': None}
+ 'reverse': None,
+ 'series_examples': None}
}
_op_names = list(_op_descriptions.keys())
@@ -472,51 +667,6 @@ def _get_op_name(op, special):
See Also
--------
Series.{reverse}
-
-Examples
---------
->>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
->>> a
-a 1.0
-b 1.0
-c 1.0
-d NaN
-dtype: float64
->>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
->>> b
-a 1.0
-b NaN
-d 1.0
-e NaN
-dtype: float64
->>> a.add(b, fill_value=0)
-a 2.0
-b 1.0
-c 1.0
-d 1.0
-e NaN
-dtype: float64
->>> a.subtract(b, fill_value=0)
-a 0.0
-b 1.0
-c 1.0
-d -1.0
-e NaN
-dtype: float64
->>> a.multiply(b)
-a 1.0
-b NaN
-c NaN
-d NaN
-e NaN
-dtype: float64
->>> a.divide(b, fill_value=0)
-a 1.0
-b inf
-c inf
-d 0.0
-e NaN
-dtype: float64
"""
_arith_doc_FRAME = """
@@ -906,16 +1056,32 @@ def _make_flex_doc(op_name, typ):
if typ == 'series':
base_doc = _flex_doc_SERIES
- doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,
- equiv=equiv, reverse=op_desc['reverse'])
+ doc_no_examples = base_doc.format(
+ desc=op_desc['desc'],
+ op_name=op_name,
+ equiv=equiv,
+ reverse=op_desc['reverse']
+ )
+ if op_desc['series_examples']:
+ doc = doc_no_examples + op_desc['series_examples']
+ else:
+ doc = doc_no_examples
elif typ == 'dataframe':
base_doc = _flex_doc_FRAME
- doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,
- equiv=equiv, reverse=op_desc['reverse'])
+ doc = base_doc.format(
+ desc=op_desc['desc'],
+ op_name=op_name,
+ equiv=equiv,
+ reverse=op_desc['reverse']
+ )
elif typ == 'panel':
base_doc = _flex_doc_PANEL
- doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,
- equiv=equiv, reverse=op_desc['reverse'])
+ doc = base_doc.format(
+ desc=op_desc['desc'],
+ op_name=op_name,
+ equiv=equiv,
+ reverse=op_desc['reverse']
+ )
else:
raise AssertionError('Invalid typ argument.')
return doc
| Initial work on #24589
- Removes hard-coded examples from _flex_doc_SERIES
- Adds separate examples for each op (_*_example_SERIES). At this stage I've just copied the examples which were in the _flex_doc_SERIES template (add, sub, mul, div)
- Modifies _make_flex_doc to format the _flex_doc_SERIES template with an example string from op_desc['series_examples']
- adds references to each of the examples to _op_descriptions['series_examples'], allowing them to be picked up in _make_flex_doc
Ops outside not in [add, sub, mul, div] will return their docstring with no examples in this revision.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25524 | 2019-03-03T17:56:04Z | 2019-03-11T11:58:35Z | 2019-03-11T11:58:35Z | 2019-03-11T11:58:36Z |
BUG-24971 copying blocks also considers ndim | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 6ad299de45e2a..c3b442e2352bb 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -49,6 +49,12 @@ Bug Fixes
- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`)
- Bug where float indexes could have misaligned values when printing (:issue:`25061`)
+**Categorical**
+
+- Bug where calling :meth:`Series.replace` on categorical data could return a ``Series`` with incorrect dimensions (:issue:`24971`)
+-
+-
+
**Reshaping**
- Bug in :meth:`~pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index e2ecb00f46df8..a61bc30a126e6 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -739,7 +739,7 @@ def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
- return self.make_block_same_class(values)
+ return self.make_block_same_class(values, ndim=self.ndim)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True):
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 2e7b746f6c9f2..d59927993debb 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -281,6 +281,20 @@ def test_replace_mixed_types_with_string(self):
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
+ @pytest.mark.parametrize("categorical, numeric", [
+ (pd.Categorical('A', categories=['A', 'B']), [1]),
+ (pd.Categorical(('A', ), categories=['A', 'B']), [1]),
+ (pd.Categorical(('A', 'B'), categories=['A', 'B']), [1, 2]),
+ ])
+ def test_replace_categorical(self, categorical, numeric):
+ # GH 24971
+ # Do not check if dtypes are equal due to a known issue that
+ # Categorical.replace sometimes coerces to object (GH 23305)
+ s = pd.Series(categorical)
+ result = s.replace({'A': 1, 'B': 2})
+ expected = pd.Series(numeric)
+ tm.assert_series_equal(expected, result, check_dtype=False)
+
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
| - [X] closes #24971
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
The following gives a series containing [1] instead of 1
```
>>> pd.Series(pd.Categorical('A', categories=['A', 'B'])).replace({'A': 1, 'B': 2})
0 [1]
dtype: object
```
This bug occurs because in the process of copying the original categorical block (which is needed as the operation is not inplace), the constructor class for the new object defaults to `ObjectBlock`, whose constructor has a default `ndim` of 2. This PR alters the block `copy` function to specify that the newly constructed block should have the same `ndim` as the block being copied. | https://api.github.com/repos/pandas-dev/pandas/pulls/25521 | 2019-03-03T03:00:18Z | 2019-03-20T12:26:56Z | 2019-03-20T12:26:55Z | 2019-03-20T12:27:02Z |
Backport PR #25368 on branch 0.24.x (BUG: Fix potential segfault after pd.Categorical(pd.Series(...), categories=...)) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 4fcde7769b362..3d72b62275dab 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -31,6 +31,8 @@ Fixed Regressions
- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
+- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
+
.. _whatsnew_0242.enhancements:
Enhancements
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 3f38785e6619e..73a03b4f71b6f 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -323,14 +323,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None,
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step futher below
- if is_categorical(values):
- # GH23814, for perf, if values._values already an instance of
- # Categorical, set values to codes, and run fastpath
- if (isinstance(values, (ABCSeries, ABCIndexClass)) and
- isinstance(values._values, type(self))):
- values = values._values.codes.copy()
- fastpath = True
-
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
@@ -382,7 +374,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None,
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
- old_codes = (values.cat.codes if isinstance(values, ABCSeries)
+ old_codes = (values._values.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
@@ -2627,6 +2619,9 @@ def _recode_for_categories(codes, old_categories, new_categories):
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
+ elif new_categories.equals(old_categories):
+ # Same categories, so no need to actually recode
+ return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 25c299692ceca..f07e3aba53cd4 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -212,6 +212,18 @@ def test_constructor(self):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
+ def test_constructor_with_existing_categories(self):
+ # GH25318: constructing with pd.Series used to bogusly skip recoding
+ # categories
+ c0 = Categorical(["a", "b", "c", "a"])
+ c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
+
+ c2 = Categorical(c0, categories=c1.categories)
+ tm.assert_categorical_equal(c1, c2)
+
+ c3 = Categorical(Series(c0), categories=c1.categories)
+ tm.assert_categorical_equal(c1, c3)
+
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
| Backport PR #25368: BUG: Fix potential segfault after pd.Categorical(pd.Series(...), categories=...) | https://api.github.com/repos/pandas-dev/pandas/pulls/25520 | 2019-03-03T02:36:46Z | 2019-03-03T03:29:58Z | 2019-03-03T03:29:58Z | 2019-03-03T03:44:43Z |
Backport PR #25498 on branch 0.24.x (BUG: Fix RecursionError during IntervalTree construction) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 4fcde7769b362..926239e7e5dc5 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -98,6 +98,7 @@ Bug Fixes
- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`)
- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`)
+- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`)
-
.. _whatsnew_0.242.contributors:
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index fb6f30c030f11..196841f35ed8d 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -284,7 +284,7 @@ cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode:
else:
# calculate a pivot so we can create child nodes
self.is_leaf_node = False
- self.pivot = np.median(left + right) / 2
+ self.pivot = np.median(left / 2 + right / 2)
left_set, right_set, center_set = self.classify_intervals(
left, right)
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 90722e66d8d8c..46b2d12015a22 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -171,3 +171,13 @@ def test_is_overlapping_trivial(self, closed, left, right):
# GH 23309
tree = IntervalTree(left, right, closed=closed)
assert tree.is_overlapping is False
+
+ def test_construction_overflow(self):
+ # GH 25485
+ left, right = np.arange(101), [np.iinfo(np.int64).max] * 101
+ tree = IntervalTree(left, right)
+
+ # pivot should be average of left/right medians
+ result = tree.root.pivot
+ expected = (50 + np.iinfo(np.int64).max) / 2
+ assert result == expected
| Backport PR #25498: BUG: Fix RecursionError during IntervalTree construction | https://api.github.com/repos/pandas-dev/pandas/pulls/25519 | 2019-03-03T01:44:51Z | 2019-03-03T02:57:33Z | 2019-03-03T02:57:33Z | 2019-03-03T02:57:34Z |
Backport PR #25517: TST: xfail non-writeable pytables tests with numpy 1.16x | diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 5e67cf2ee2837..6e9f768d8bd68 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -12,6 +12,8 @@
_np_version_under1p13 = _nlv < LooseVersion('1.13')
_np_version_under1p14 = _nlv < LooseVersion('1.14')
_np_version_under1p15 = _nlv < LooseVersion('1.15')
+_np_version_under1p16 = _nlv < LooseVersion('1.16')
+_np_version_under1p17 = _nlv < LooseVersion('1.17')
if _nlv < '1.12':
@@ -64,5 +66,7 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
__all__ = ['np',
'_np_version_under1p13',
'_np_version_under1p14',
- '_np_version_under1p15'
+ '_np_version_under1p15',
+ '_np_version_under1p16',
+ '_np_version_under1p17'
]
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index dca6180f39664..632ab7aa7be7a 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -4,6 +4,7 @@
import pytest
from pandas.compat import lrange
+from pandas.compat.numpy import _np_version_under1p17
import pandas as pd
from pandas import Index, MultiIndex, date_range, period_range
@@ -292,9 +293,15 @@ def test_numpy_ufuncs(func):
verify_integrity=False
)
- with pytest.raises(Exception):
- with np.errstate(all='ignore'):
- func(idx)
+ if _np_version_under1p17:
+ expected_exception = AttributeError
+ msg = "'tuple' object has no attribute '{}'".format(func.__name__)
+ else:
+ expected_exception = TypeError
+ msg = ("loop of ufunc does not support argument 0 of type tuple which"
+ " has no callable {} method").format(func.__name__)
+ with pytest.raises(expected_exception, match=msg):
+ func(idx)
@pytest.mark.parametrize('func', [
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 9430011288f27..73e632d538fde 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -34,6 +34,15 @@
tables = pytest.importorskip('tables')
+# TODO:
+# remove when gh-24839 is fixed; this affects numpy 1.16
+# and pytables 3.4.4
+xfail_non_writeable = pytest.mark.xfail(
+ LooseVersion(np.__version__) >= LooseVersion('1.16'),
+ reason=('gh-25511, gh-24839. pytables needs a '
+ 'release beyong 3.4.4 to support numpy 1.16x'))
+
+
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
@@ -875,6 +884,7 @@ def test_put_integer(self):
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
+ @xfail_non_writeable
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
@@ -1511,7 +1521,10 @@ def test_to_hdf_with_min_itemsize(self):
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
- @pytest.mark.parametrize("format", ['fixed', 'table'])
+ @pytest.mark.parametrize(
+ "format",
+ [pytest.param('fixed', marks=xfail_non_writeable),
+ 'table'])
def test_to_hdf_errors(self, format):
data = ['\ud800foo']
@@ -1958,6 +1971,7 @@ def test_pass_spec_to_storer(self):
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
+ @xfail_non_writeable
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
@@ -2189,6 +2203,7 @@ def test_unimplemented_dtypes_table_columns(self):
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
+ @xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion('1.15.0'),
reason=("Skipping pytables test when numpy version is "
@@ -2747,6 +2762,7 @@ def test_float_index(self):
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
+ @xfail_non_writeable
def test_tuple_index(self):
# GH #492
@@ -2759,6 +2775,7 @@ def test_tuple_index(self):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal)
+ @xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self):
@@ -2822,6 +2839,7 @@ def test_timeseries_preepoch(self):
except OverflowError:
pytest.skip('known failer on some windows platforms')
+ @xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
@@ -2852,6 +2870,7 @@ def test_frame(self, compression):
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
+ @xfail_non_writeable
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
@@ -2865,8 +2884,10 @@ def test_empty_series_frame(self):
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
- def test_empty_series(self):
- for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
+ @xfail_non_writeable
+ @pytest.mark.parametrize(
+ 'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]'])
+ def test_empty_series(self, dtype):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
@@ -2947,6 +2968,7 @@ def test_store_series_name(self):
recons = store['series']
tm.assert_series_equal(recons, series)
+ @xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
@@ -4538,6 +4560,7 @@ def test_pytables_native2_read(self, datapath):
d1 = store['detector']
assert isinstance(d1, DataFrame)
+ @xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath):
# GH 24510
# legacy table with fixed format written in Python 2
@@ -4725,6 +4748,7 @@ def test_unicode_longer_encoded(self):
result = store.get('df')
tm.assert_frame_equal(result, df)
+ @xfail_non_writeable
def test_store_datetime_mixed(self):
df = DataFrame(
@@ -5285,6 +5309,7 @@ def test_complex_table(self):
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
+ @xfail_non_writeable
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
| https://api.github.com/repos/pandas-dev/pandas/pulls/25518 | 2019-03-03T01:26:20Z | 2019-03-03T02:09:01Z | 2019-03-03T02:09:01Z | 2019-03-03T02:09:02Z | |
TST: xfail non-writeable pytables tests with numpy 1.16x | diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index bc9af01a97467..6e9f768d8bd68 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -13,6 +13,7 @@
_np_version_under1p14 = _nlv < LooseVersion('1.14')
_np_version_under1p15 = _nlv < LooseVersion('1.15')
_np_version_under1p16 = _nlv < LooseVersion('1.16')
+_np_version_under1p17 = _nlv < LooseVersion('1.17')
if _nlv < '1.12':
@@ -66,5 +67,6 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
'_np_version_under1p13',
'_np_version_under1p14',
'_np_version_under1p15',
- '_np_version_under1p16'
+ '_np_version_under1p16',
+ '_np_version_under1p17'
]
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index 27a5ba9e5434a..d5a6e9acaa5f3 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -4,7 +4,7 @@
import pytest
from pandas.compat import PY2, lrange
-from pandas.compat.numpy import _np_version_under1p16
+from pandas.compat.numpy import _np_version_under1p17
import pandas as pd
from pandas import Index, MultiIndex, date_range, period_range
@@ -287,7 +287,7 @@ def test_numpy_ufuncs(idx, func):
# test ufuncs of numpy. see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
- if _np_version_under1p16:
+ if _np_version_under1p17:
expected_exception = AttributeError
msg = "'tuple' object has no attribute '{}'".format(func.__name__)
else:
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index b464903d8b4e0..69ff32d1b728b 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -34,6 +34,15 @@
tables = pytest.importorskip('tables')
+# TODO:
+# remove when gh-24839 is fixed; this affects numpy 1.16
+# and pytables 3.4.4
+xfail_non_writeable = pytest.mark.xfail(
+ LooseVersion(np.__version__) >= LooseVersion('1.16'),
+ reason=('gh-25511, gh-24839. pytables needs a '
+ 'release beyong 3.4.4 to support numpy 1.16x'))
+
+
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
@@ -862,6 +871,7 @@ def test_put_integer(self):
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
+ @xfail_non_writeable
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
@@ -1438,7 +1448,10 @@ def test_to_hdf_with_min_itemsize(self):
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
- @pytest.mark.parametrize("format", ['fixed', 'table'])
+ @pytest.mark.parametrize(
+ "format",
+ [pytest.param('fixed', marks=xfail_non_writeable),
+ 'table'])
def test_to_hdf_errors(self, format):
data = ['\ud800foo']
@@ -1815,6 +1828,7 @@ def test_pass_spec_to_storer(self):
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
+ @xfail_non_writeable
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
@@ -2006,6 +2020,7 @@ def test_unimplemented_dtypes_table_columns(self):
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
+ @xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion('1.15.0'),
reason=("Skipping pytables test when numpy version is "
@@ -2245,6 +2260,7 @@ def test_float_index(self):
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
+ @xfail_non_writeable
def test_tuple_index(self):
# GH #492
@@ -2257,6 +2273,7 @@ def test_tuple_index(self):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal)
+ @xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self):
@@ -2320,6 +2337,7 @@ def test_timeseries_preepoch(self):
except OverflowError:
pytest.skip('known failer on some windows platforms')
+ @xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
@@ -2350,6 +2368,7 @@ def test_frame(self, compression):
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
+ @xfail_non_writeable
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
@@ -2363,8 +2382,10 @@ def test_empty_series_frame(self):
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
- def test_empty_series(self):
- for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
+ @xfail_non_writeable
+ @pytest.mark.parametrize(
+ 'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]'])
+ def test_empty_series(self, dtype):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
@@ -2445,6 +2466,7 @@ def test_store_series_name(self):
recons = store['series']
tm.assert_series_equal(recons, series)
+ @xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
@@ -3954,6 +3976,7 @@ def test_pytables_native2_read(self, datapath):
d1 = store['detector']
assert isinstance(d1, DataFrame)
+ @xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath):
# GH 24510
# legacy table with fixed format written in Python 2
@@ -4117,6 +4140,7 @@ def test_unicode_longer_encoded(self):
result = store.get('df')
tm.assert_frame_equal(result, df)
+ @xfail_non_writeable
def test_store_datetime_mixed(self):
df = DataFrame(
@@ -4677,6 +4701,7 @@ def test_complex_table(self):
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
+ @xfail_non_writeable
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
| closes #25511
| https://api.github.com/repos/pandas-dev/pandas/pulls/25517 | 2019-03-02T17:02:48Z | 2019-03-02T21:08:09Z | 2019-03-02T21:08:09Z | 2019-03-11T19:18:19Z |
STY: use pytest.raises context manager (frame) | diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index a25e893e08900..f4a2a5f8032a0 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -633,7 +633,8 @@ def test_rename(self, float_frame):
tm.assert_index_equal(renamed.index, Index(['BAR', 'FOO']))
# have to pass something
- pytest.raises(TypeError, float_frame.rename)
+ with pytest.raises(TypeError, match="must pass an index to rename"):
+ float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={'C': 'foo', 'D': 'bar'})
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 43a45bb915819..69d3ea6176ca3 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -898,6 +898,7 @@ def test_var_std(self, datetime_frame):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
@@ -919,10 +920,12 @@ def test_numeric_only_flag(self, meth):
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
- pytest.raises(TypeError, lambda: getattr(df1, meth)(
- axis=1, numeric_only=False))
- pytest.raises(TypeError, lambda: getattr(df2, meth)(
- axis=1, numeric_only=False))
+ msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
+ with pytest.raises(TypeError, match=msg):
+ getattr(df1, meth)(axis=1, numeric_only=False)
+ msg = "could not convert string to float: 'a'"
+ with pytest.raises(TypeError, match=msg):
+ getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
@@ -1367,6 +1370,7 @@ def test_pct_change(self):
# ----------------------------------------------------------------------
# Index of max / min
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
@@ -1379,8 +1383,11 @@ def test_idxmin(self, float_frame, int_frame):
skipna=skipna)
tm.assert_series_equal(result, expected)
- pytest.raises(ValueError, frame.idxmin, axis=2)
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ frame.idxmin(axis=2)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
@@ -1393,7 +1400,9 @@ def test_idxmax(self, float_frame, int_frame):
skipna=skipna)
tm.assert_series_equal(result, expected)
- pytest.raises(ValueError, frame.idxmax, axis=2)
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@@ -1879,7 +1888,9 @@ def test_round_issue(self):
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
- pytest.raises(ValueError, df.round, decimals)
+ msg = "Index of decimals must be unique"
+ with pytest.raises(ValueError, match=msg):
+ df.round(decimals)
def test_built_in_round(self):
if not compat.PY3:
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 0934dd20638e4..f66e4e94c3f97 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas.compat import long, lrange, range
+from pandas.compat import PY2, long, lrange, range
import pandas as pd
from pandas import (
@@ -144,8 +144,12 @@ def test_tab_completion(self):
def test_not_hashable(self, empty_frame):
df = self.klass([1])
- pytest.raises(TypeError, hash, df)
- pytest.raises(TypeError, hash, empty_frame)
+ msg = ("'(Sparse)?DataFrame' objects are mutable, thus they cannot be"
+ " hashed")
+ with pytest.raises(TypeError, match=msg):
+ hash(df)
+ with pytest.raises(TypeError, match=msg):
+ hash(empty_frame)
def test_new_empty_index(self):
df1 = self.klass(np.random.randn(0, 3))
@@ -169,7 +173,9 @@ def test_get_agg_axis(self, float_frame):
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
- pytest.raises(ValueError, float_frame._get_agg_axis, 2)
+ msg = r"Axis must be 0 or 1 \(got 2\)"
+ with pytest.raises(ValueError, match=msg):
+ float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame, empty_frame):
assert empty_frame.empty
@@ -351,12 +357,15 @@ def test_transpose(self, float_frame):
for col, s in compat.iteritems(mixed_T):
assert s.dtype == np.object_
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_swapaxes(self):
df = self.klass(np.random.randn(10, 5))
self._assert_frame_equal(df.T, df.swapaxes(0, 1))
self._assert_frame_equal(df.T, df.swapaxes(1, 0))
self._assert_frame_equal(df, df.swapaxes(0, 0))
- pytest.raises(ValueError, df.swapaxes, 2, 5)
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index dea925dcde676..fb00776b33cbb 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import lrange, lzip, u
+from pandas.compat import PY2, lrange, lzip, u
from pandas.errors import PerformanceWarning
import pandas as pd
@@ -38,8 +38,11 @@ def test_drop_names(self):
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
- pytest.raises(KeyError, df.drop, ['g'])
- pytest.raises(KeyError, df.drop, ['g'], 1)
+ msg = r"\['g'\] not found in axis"
+ with pytest.raises(KeyError, match=msg):
+ df.drop(['g'])
+ with pytest.raises(KeyError, match=msg):
+ df.drop(['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
@@ -84,10 +87,14 @@ def test_drop(self):
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
- pytest.raises(KeyError, simple.drop, 5)
- pytest.raises(KeyError, simple.drop, 'C', 1)
- pytest.raises(KeyError, simple.drop, [1, 5])
- pytest.raises(KeyError, simple.drop, ['A', 'C'], 1)
+ with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
+ simple.drop(5)
+ with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
+ simple.drop('C', 1)
+ with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
+ simple.drop([1, 5])
+ with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
+ simple.drop(['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
@@ -444,7 +451,9 @@ def test_reindex_dups(self):
assert_frame_equal(result, expected)
# reindex fails
- pytest.raises(ValueError, df.reindex, index=list(range(len(df))))
+ msg = "cannot reindex from a duplicate axis"
+ with pytest.raises(ValueError, match=msg):
+ df.reindex(index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
@@ -963,10 +972,15 @@ def test_take(self):
assert_frame_equal(result, expected, check_names=False)
# illegal indices
- pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0)
- pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0)
- pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1)
- pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1)
+ msg = "indices are out-of-bounds"
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, 30], axis=0)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, -31], axis=0)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, 5], axis=1)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
@@ -1037,6 +1051,7 @@ def test_reindex_corner(self):
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
@@ -1052,7 +1067,9 @@ def test_reindex_axis(self):
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
- pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ self.intframe.reindex_axis(rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 5419f4d5127f6..aa2d68b8c63c8 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -274,10 +274,12 @@ def f(dtype):
columns=["A", "B", "C"],
dtype=dtype)
- pytest.raises(NotImplementedError, f,
- [("A", "datetime64[h]"),
- ("B", "str"),
- ("C", "int32")])
+ msg = ("compound dtypes are not implemented in the DataFrame"
+ " constructor")
+ with pytest.raises(NotImplementedError, match=msg):
+ f([("A", "datetime64[h]"),
+ ("B", "str"),
+ ("C", "int32")])
# these work (though results may be unexpected)
f('int64')
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index a8a78b26e317c..3fa1a2d918071 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -12,7 +12,8 @@
import pytest
from pandas.compat import (
- PY3, PY36, is_platform_little_endian, lmap, long, lrange, lzip, range, zip)
+ PY2, PY3, PY36, is_platform_little_endian, lmap, long, lrange, lzip, range,
+ zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
@@ -58,8 +59,9 @@ def test_constructor_cast_failure(self):
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
- pytest.raises(ValueError, df.__setitem__, tuple(['test']),
- np.ones((4, 2)))
+ msg = "Wrong number of items passed 2, placement implies 1"
+ with pytest.raises(ValueError, match=msg):
+ df['test'] = np.ones((4, 2))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
@@ -1259,7 +1261,9 @@ def test_constructor_Series_named(self):
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
- pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
+ msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame(s, columns=[1, 2])
# #2234
a = Series([], name='x')
@@ -1433,8 +1437,10 @@ def test_constructor_column_duplicates(self):
tm.assert_frame_equal(idf, edf)
- pytest.raises(ValueError, DataFrame.from_dict,
- OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
+ msg = "If using all scalar values, you must pass an index"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict(
+ OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
@@ -1465,8 +1471,11 @@ def test_constructor_single_value(self):
dtype=object),
index=[1, 2], columns=['a', 'c']))
- pytest.raises(ValueError, DataFrame, 'a', [1, 2])
- pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
+ msg = "DataFrame constructor not properly called!"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame('a', [1, 2])
+ with pytest.raises(ValueError, match=msg):
+ DataFrame('a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
@@ -1692,6 +1701,7 @@ def test_constructor_series_copy(self):
assert not (series['A'] == 5).all()
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
@@ -1704,9 +1714,11 @@ def check(df):
# No NaN found -> error
if len(indexer) == 0:
- def f():
+ msg = ("cannot do label indexing on"
+ r" <class 'pandas\.core\.indexes\.range\.RangeIndex'>"
+ r" with these indexers \[nan\] of <class 'float'>")
+ with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
- pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
@@ -1782,13 +1794,15 @@ def test_constructor_categorical(self):
tm.assert_frame_equal(df, expected)
# invalid (shape)
- pytest.raises(ValueError,
- lambda: DataFrame([Categorical(list('abc')),
- Categorical(list('abdefg'))]))
+ msg = r"Shape of passed values is \(6, 2\), indices imply \(3, 2\)"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame([Categorical(list('abc')),
+ Categorical(list('abdefg'))])
# ndim > 1
- pytest.raises(NotImplementedError,
- lambda: Categorical(np.array([list('abcd')])))
+ msg = "> 1 ndim Categorical are not supported at this time"
+ with pytest.raises(NotImplementedError, match=msg):
+ Categorical(np.array([list('abcd')]))
def test_constructor_categorical_series(self):
@@ -2164,8 +2178,11 @@ def test_from_records_bad_index_column(self):
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
- pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
- pytest.raises(KeyError, DataFrame.from_records, df, index=2)
+ msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_records(df, index=[2])
+ with pytest.raises(KeyError, match=r"^2$"):
+ DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 601a4c6b72fe3..db60fbf0f8563 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -75,11 +75,15 @@ def test_to_dict_index_not_unique_with_index_orient(self):
# GH22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A'])
- pytest.raises(ValueError, df.to_dict, orient='index')
+ msg = "DataFrame index must be unique for orient='index'"
+ with pytest.raises(ValueError, match=msg):
+ df.to_dict(orient='index')
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
- pytest.raises(ValueError, df.to_dict, orient='xinvalid')
+ msg = "orient 'xinvalid' not understood"
+ with pytest.raises(ValueError, match=msg):
+ df.to_dict(orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index a8776c84b98ca..b37bf02a6b8e7 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -154,8 +154,8 @@ def test_select_dtypes_include_using_list_like(self):
ei = df[['h', 'i']]
assert_frame_equal(ri, ei)
- pytest.raises(NotImplementedError,
- lambda: df.select_dtypes(include=['period']))
+ with pytest.raises(NotImplementedError, match=r"^$"):
+ df.select_dtypes(include=['period'])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame({'a': list('abc'),
@@ -218,8 +218,8 @@ def test_select_dtypes_include_using_scalars(self):
ei = df[['f']]
assert_frame_equal(ri, ei)
- pytest.raises(NotImplementedError,
- lambda: df.select_dtypes(include='period'))
+ with pytest.raises(NotImplementedError, match=r"^$"):
+ df.select_dtypes(include='period')
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
@@ -245,8 +245,8 @@ def test_select_dtypes_exclude_using_scalars(self):
ei = df[['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k']]
assert_frame_equal(ri, ei)
- pytest.raises(NotImplementedError,
- lambda: df.select_dtypes(exclude='period'))
+ with pytest.raises(NotImplementedError, match=r"^$"):
+ df.select_dtypes(exclude='period')
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
@@ -601,8 +601,12 @@ def test_astype_dict_like(self, dtype_class):
# in the keys of the dtype dict
dt4 = dtype_class({'b': str, 2: str})
dt5 = dtype_class({'e': str})
- pytest.raises(KeyError, df.astype, dt4)
- pytest.raises(KeyError, df.astype, dt5)
+ msg = ("Only a column name can be used for the key in a dtype mappings"
+ " argument")
+ with pytest.raises(KeyError, match=msg):
+ df.astype(dt4)
+ with pytest.raises(KeyError, match=msg):
+ df.astype(dt5)
assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 19b8ae4eb6e0f..ffe54f7a94307 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -9,7 +9,7 @@
import pytest
from pandas._libs.tslib import iNaT
-from pandas.compat import long, lrange, lzip, map, range, zip
+from pandas.compat import PY2, long, lrange, lzip, map, range, zip
from pandas.core.dtypes.common import is_float_dtype, is_integer, is_scalar
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -431,8 +431,9 @@ def test_getitem_setitem_ix_negative_integers(self):
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
- pytest.raises(AttributeError, getattr, self.frame,
- 'NONEXISTENT_NAME')
+ msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
+ with pytest.raises(AttributeError, match=msg):
+ self.frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
@@ -793,7 +794,8 @@ def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
- pytest.raises(KeyError, f.__delitem__, 'D')
+ with pytest.raises(KeyError, match=r"^'D'$"):
+ del f['D']
del f['B']
assert len(f.columns) == 2
@@ -842,7 +844,9 @@ def test_getitem_fancy_2d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- pytest.raises(ValueError, f.ix.__getitem__, f > 0.5)
+ msg = "Cannot index with multidimensional key"
+ with pytest.raises(ValueError, match=msg):
+ f.ix[f > 0.5]
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
@@ -865,6 +869,7 @@ def test_getitem_fancy_slice_integers_step(self):
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
@@ -887,8 +892,10 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
- pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11))
- pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0)
+ with pytest.raises(KeyError, match=r"^3$"):
+ df2.loc[3:11]
+ with pytest.raises(KeyError, match=r"^3$"):
+ df2.loc[3:11] = 0
def test_setitem_fancy_2d(self):
@@ -1077,6 +1084,7 @@ def test_fancy_getitem_int_labels(self):
expected = df[3]
assert_series_equal(result, expected)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
@@ -1084,14 +1092,18 @@ def test_fancy_index_int_labels_exceptions(self):
simplefilter("ignore", DeprecationWarning)
# labels that aren't contained
- pytest.raises(KeyError, df.ix.__setitem__,
- ([0, 1, 2], [2, 3, 4]), 5)
+ with pytest.raises(KeyError, match=r"\[1\] not in index"):
+ df.ix[[0, 1, 2], [2, 3, 4]] = 5
# try to set indices not contained in frame
- pytest.raises(KeyError, self.frame.ix.__setitem__,
- ['foo', 'bar', 'baz'], 1)
- pytest.raises(KeyError, self.frame.ix.__setitem__,
- (slice(None, None), ['E']), 1)
+ msg = (r"None of \[Index\(\['foo', 'bar', 'baz'\],"
+ r" dtype='object'\)\] are in the \[index\]")
+ with pytest.raises(KeyError, match=msg):
+ self.frame.ix[['foo', 'bar', 'baz']] = 1
+ msg = (r"None of \[Index\(\['E'\], dtype='object'\)\] are in the"
+ r" \[columns\]")
+ with pytest.raises(KeyError, match=msg):
+ self.frame.ix[:, ['E']] = 1
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
@@ -1504,6 +1516,7 @@ def test_getitem_setitem_boolean_multi(self):
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
@@ -1537,7 +1550,11 @@ def test_getitem_setitem_float_labels(self):
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
- pytest.raises(TypeError, lambda: df.iloc[1.0:5])
+ msg = ("cannot do slice indexing on"
+ r" <class 'pandas\.core\.indexes\.numeric\.Float64Index'> with"
+ r" these indexers \[1.0\] of <class 'float'>")
+ with pytest.raises(TypeError, match=msg):
+ df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
@@ -1744,11 +1761,16 @@ def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
- pytest.raises(KeyError, df.loc.__getitem__, False)
- pytest.raises(KeyError, df.loc.__getitem__, True)
+ with pytest.raises(KeyError, match=r"^False$"):
+ df.loc[False]
+ with pytest.raises(KeyError, match=r"^True$"):
+ df.loc[True]
- pytest.raises(KeyError, df.loc.__setitem__, False, 0)
- pytest.raises(KeyError, df.loc.__setitem__, True, 0)
+ msg = "cannot use a single bool to index into setitem"
+ with pytest.raises(KeyError, match=msg):
+ df.loc[False] = 0
+ with pytest.raises(KeyError, match=msg):
+ df.loc[True] = 0
def test_getitem_list_duplicates(self):
# #1943
@@ -1813,6 +1835,7 @@ def test_set_value(self):
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_set_value_resize(self):
with tm.assert_produces_warning(FutureWarning,
@@ -1849,7 +1872,9 @@ def test_set_value_resize(self):
assert isna(res3['baz'].drop(['foobar'])).all()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
+ msg = "could not convert string to float: 'sam'"
+ with pytest.raises(ValueError, match=msg):
+ res3.set_value('foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(np.random.randn(3, 3),
@@ -1888,7 +1913,8 @@ def test_get_set_value_no_partial_indexing(self):
df = DataFrame(index=index, columns=lrange(4))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- pytest.raises(KeyError, df.get_value, 0, 1)
+ with pytest.raises(KeyError, match=r"^0$"):
+ df.get_value(0, 1)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
@@ -2158,10 +2184,15 @@ def test_non_monotonic_reindex_methods(self):
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
- pytest.raises(ValueError, df_rev.reindex, df.index, method='pad')
- pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill')
- pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill')
- pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest')
+ msg = "index must be monotonic increasing or decreasing"
+ with pytest.raises(ValueError, match=msg):
+ df_rev.reindex(df.index, method='pad')
+ with pytest.raises(ValueError, match=msg):
+ df_rev.reindex(df.index, method='ffill')
+ with pytest.raises(ValueError, match=msg):
+ df_rev.reindex(df.index, method='bfill')
+ with pytest.raises(ValueError, match=msg):
+ df_rev.reindex(df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
@@ -2669,14 +2700,20 @@ def _check_align(df, cond, other, check_dtypes=True):
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
- pytest.raises(ValueError, df.where, cond, err1)
+ msg = "other must be the same shape as self when an ndarray"
+ with pytest.raises(ValueError, match=msg):
+ df.where(cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
- pytest.raises(ValueError, df.where, err2, other1)
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ df.where(err2, other1)
- pytest.raises(ValueError, df.mask, True)
- pytest.raises(ValueError, df.mask, 0)
+ with pytest.raises(ValueError, match=msg):
+ df.mask(True)
+ with pytest.raises(ValueError, match=msg):
+ df.mask(0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 77a3d4785d295..2f3b0a9f76de9 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas.compat import lrange
+from pandas.compat import PY2, lrange
import pandas.util._test_decorators as td
import pandas as pd
@@ -83,6 +83,7 @@ def test_dropIncompleteRows(self):
tm.assert_index_equal(samesize_frame.index, self.frame.index)
tm.assert_index_equal(inp_frame2.index, self.frame.index)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = np.nan
@@ -139,7 +140,9 @@ def test_dropna(self):
assert_frame_equal(dropped, expected)
# bad input
- pytest.raises(ValueError, df.dropna, axis=3)
+ msg = "No axis named 3 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ df.dropna(axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
@@ -158,10 +161,15 @@ def test_drop_and_dropna_caching(self):
def test_dropna_corner(self):
# bad input
- pytest.raises(ValueError, self.frame.dropna, how='foo')
- pytest.raises(TypeError, self.frame.dropna, how=None)
+ msg = "invalid how option: foo"
+ with pytest.raises(ValueError, match=msg):
+ self.frame.dropna(how='foo')
+ msg = "must specify how or thresh"
+ with pytest.raises(TypeError, match=msg):
+ self.frame.dropna(how=None)
# non-existent column - 8303
- pytest.raises(KeyError, self.frame.dropna, subset=['A', 'X'])
+ with pytest.raises(KeyError, match=r"^\['X'\]$"):
+ self.frame.dropna(subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
@@ -226,8 +234,12 @@ def test_fillna(self):
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
- pytest.raises(ValueError, self.tsframe.fillna)
- pytest.raises(ValueError, self.tsframe.fillna, 5, method='ffill')
+ msg = "Must specify a fill 'value' or 'method'"
+ with pytest.raises(ValueError, match=msg):
+ self.tsframe.fillna()
+ msg = "Cannot specify both 'value' and 'method'"
+ with pytest.raises(ValueError, match=msg):
+ self.tsframe.fillna(5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
@@ -595,11 +607,18 @@ def test_fillna_invalid_method(self):
def test_fillna_invalid_value(self):
# list
- pytest.raises(TypeError, self.frame.fillna, [1, 2])
+ msg = ("\"value\" parameter must be a scalar or dict, but you passed"
+ " a \"{}\"")
+ with pytest.raises(TypeError, match=msg.format('list')):
+ self.frame.fillna([1, 2])
# tuple
- pytest.raises(TypeError, self.frame.fillna, (1, 2))
+ with pytest.raises(TypeError, match=msg.format('tuple')):
+ self.frame.fillna((1, 2))
# frame with series
- pytest.raises(TypeError, self.frame.iloc[:, 0].fillna, self.frame)
+ msg = ("\"value\" parameter must be a scalar, dict or Series, but you"
+ " passed a \"DataFrame\"")
+ with pytest.raises(TypeError, match=msg):
+ self.frame.iloc[:, 0].fillna(self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 1f4da1bbb0470..6bef7e3f65b21 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -177,7 +177,9 @@ def test_insert(self):
with pytest.raises(ValueError, match='already exists'):
df.insert(1, 'a', df['b'])
- pytest.raises(ValueError, df.insert, 1, 'c', df['b'])
+ msg = "cannot insert c, already exists"
+ with pytest.raises(ValueError, match=msg):
+ df.insert(1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index a5bed14cf06d2..799d548100b5e 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -187,8 +187,11 @@ def check(result, expected=None):
# reindex is invalid!
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
- pytest.raises(ValueError, df.reindex, columns=['bar'])
- pytest.raises(ValueError, df.reindex, columns=['bar', 'foo'])
+ msg = "cannot reindex from a duplicate axis"
+ with pytest.raises(ValueError, match=msg):
+ df.reindex(columns=['bar'])
+ with pytest.raises(ValueError, match=msg):
+ df.reindex(columns=['bar', 'foo'])
# drop
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
@@ -306,7 +309,9 @@ def check(result, expected=None):
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
- pytest.raises(ValueError, lambda: df[df.A > 6])
+ msg = "cannot reindex from a duplicate axis"
+ with pytest.raises(ValueError, match=msg):
+ df[df.A > 6]
# dup aligining operations should work
# GH 5185
@@ -323,7 +328,9 @@ def check(result, expected=None):
columns=['A', 'A'])
# not-comparing like-labelled
- pytest.raises(ValueError, lambda: df1 == df2)
+ msg = "Can only compare identically-labeled DataFrame objects"
+ with pytest.raises(ValueError, match=msg):
+ df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index d1f1299a5202e..19b6636978643 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas.compat import PY2
+
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.tests.frame.common import TestData
@@ -71,6 +73,7 @@ def test_quantile_axis_mixed(self):
with pytest.raises(TypeError):
df.quantile(.5, axis=1, numeric_only=False)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_quantile_axis_parameter(self):
# GH 9543/9544
@@ -92,8 +95,12 @@ def test_quantile_axis_parameter(self):
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
- pytest.raises(ValueError, df.quantile, 0.1, axis=-1)
- pytest.raises(ValueError, df.quantile, 0.1, axis="column")
+ msg = "No axis named -1 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ df.quantile(0.1, axis=-1)
+ msg = "No axis named column for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ df.quantile(0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 0d06d0006a9e2..ba02cb54bcea1 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -78,10 +78,10 @@ def test_query_numexpr(self):
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
- pytest.raises(ImportError,
- lambda: df.query('A>0', engine='numexpr'))
- pytest.raises(ImportError,
- lambda: df.eval('A+1', engine='numexpr'))
+ with pytest.raises(ImportError):
+ df.query('A>0', engine='numexpr')
+ with pytest.raises(ImportError):
+ df.eval('A+1', engine='numexpr')
class TestDataFrameEval(TestData):
@@ -852,9 +852,10 @@ def test_str_query_method(self, parser, engine):
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
- pytest.raises(NotImplementedError, df.query, ex,
- engine=engine, parser=parser,
- local_dict={'strings': df.strings})
+ msg = r"'(Not)?In' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
+ df.query(ex, engine=engine, parser=parser,
+ local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 127a64da38ba3..50c66d3f8db00 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -837,7 +837,9 @@ def test_replace_input_formats_listlike(self):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
- pytest.raises(ValueError, df.replace, to_rep, values[1:])
+ msg = r"Replacement lists must match in length\. Expecting 3 got 2"
+ with pytest.raises(ValueError, match=msg):
+ df.replace(to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
@@ -850,7 +852,9 @@ def test_replace_input_formats_scalar(self):
for k, v in compat.iteritems(df)}
assert_frame_equal(filled, DataFrame(expected))
- pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
+ msg = "value argument must be scalar, dict, or Series"
+ with pytest.raises(TypeError, match=msg):
+ df.replace(to_rep, [np.nan, 0, ''])
# list to scalar
to_rep = [np.nan, 0, '']
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index daac084f657af..3cac61977a486 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -394,7 +394,10 @@ def test_stack_mixed_levels(self):
# When mixed types are passed and the ints are not level
# names, raise
- pytest.raises(ValueError, df2.stack, level=['animal', 0])
+ msg = ("level should contain all level names or all level numbers, not"
+ " a mixture of the two")
+ with pytest.raises(ValueError, match=msg):
+ df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 85e6373b384e4..8b29394bcab84 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import lrange
+from pandas.compat import PY2, lrange
import pandas as pd
from pandas import (
@@ -21,6 +21,7 @@
class TestDataFrameSorting(TestData):
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
@@ -54,8 +55,9 @@ def test_sort_values(self):
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
- pytest.raises(ValueError, lambda: frame.sort_values(
- by=['A', 'B'], axis=2, inplace=True))
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ frame.sort_values(by=['A', 'B'], axis=2, inplace=True)
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 31e81a9ca77c2..716a9e30e4cc3 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -8,7 +8,7 @@
import pytest
import pytz
-from pandas.compat import product
+from pandas.compat import PY2, product
import pandas as pd
from pandas import (
@@ -395,7 +395,9 @@ def test_tshift(self):
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
- pytest.raises(ValueError, no_freq.tshift)
+ msg = "Freq was not given and was not set in the index"
+ with pytest.raises(ValueError, match=msg):
+ no_freq.tshift()
def test_truncate(self):
ts = self.tsframe[::3]
@@ -436,9 +438,10 @@ def test_truncate(self):
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
- pytest.raises(ValueError, ts.truncate,
- before=ts.index[-1] - ts.index.freq,
- after=ts.index[0] + ts.index.freq)
+ msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
+ with pytest.raises(ValueError, match=msg):
+ ts.truncate(before=ts.index[-1] - ts.index.freq,
+ after=ts.index[0] + ts.index.freq)
def test_truncate_copy(self):
index = self.tsframe.index
@@ -781,14 +784,18 @@ def test_between_time_axis_raises(self, axis):
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ('08:00:00', '09:00:00')
+ msg = "Index must be DatetimeIndex"
if axis in ['columns', 1]:
ts.index = mask
- pytest.raises(TypeError, ts.between_time, stime, etime)
- pytest.raises(TypeError, ts.between_time, stime, etime, axis=0)
+ with pytest.raises(TypeError, match=msg):
+ ts.between_time(stime, etime)
+ with pytest.raises(TypeError, match=msg):
+ ts.between_time(stime, etime, axis=0)
if axis in ['index', 0]:
ts.columns = mask
- pytest.raises(TypeError, ts.between_time, stime, etime, axis=1)
+ with pytest.raises(TypeError, match=msg):
+ ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
@@ -829,6 +836,7 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self):
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_frame_to_period(self):
K = 5
@@ -854,7 +862,9 @@ def test_frame_to_period(self):
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
- pytest.raises(ValueError, df.to_period, axis=2)
+ msg = "No axis named 2 for object type <class 'type'>"
+ with pytest.raises(ValueError, match=msg):
+ df.to_period(axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 61eefccede5dd..54a8712a9c645 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -109,8 +109,9 @@ def test_to_csv_from_csv2(self):
xp.columns = col_aliases
assert_frame_equal(xp, rs)
- pytest.raises(ValueError, self.frame2.to_csv, path,
- header=['AA', 'X'])
+ msg = "Writing 4 cols but got 2 aliases"
+ with pytest.raises(ValueError, match=msg):
+ self.frame2.to_csv(path, header=['AA', 'X'])
def test_to_csv_from_csv3(self):
| xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25516 | 2019-03-02T16:22:39Z | 2019-03-04T18:56:49Z | 2019-03-04T18:56:49Z | 2019-03-05T12:22:36Z |
fix MacPython / pandas-wheels ci failures | diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c56bf944699e2..3f75c508d22f9 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -229,7 +229,7 @@ def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
- msg = ("'<' not supported between instances of 'complex' and"
+ msg = (r"'(<|>)' not supported between instances of 'complex' and"
r" 'complex'|"
r"unorderable types: complex\(\) > complex\(\)")
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index e83bdb1af9121..2a64947042979 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -409,9 +409,9 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- msg = ("'<' not supported between instances of 'datetime.datetime'"
- r" and 'int'|"
- r"unorderable types: int\(\) > datetime.datetime\(\)")
+ msg = (r"'(<|>)' not supported between instances of"
+ r" 'datetime\.datetime' and 'int'|"
+ r"unorderable types: int\(\) > datetime\.datetime\(\)")
if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
| xref https://github.com/pandas-dev/pandas/pull/25452#issuecomment-468654194
| https://api.github.com/repos/pandas-dev/pandas/pulls/25505 | 2019-03-01T14:43:42Z | 2019-03-01T16:52:07Z | 2019-03-01T16:52:07Z | 2019-03-01T17:47:46Z |
STY: use pytest.raises context manager (arithmetic, arrays, computati… | diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index c31d7acad3111..0faed74d4a021 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -205,10 +205,20 @@ def test_subtraction_ops(self):
td = Timedelta('1 days')
dt = Timestamp('20130101')
- pytest.raises(TypeError, lambda: tdi - dt)
- pytest.raises(TypeError, lambda: tdi - dti)
- pytest.raises(TypeError, lambda: td - dt)
- pytest.raises(TypeError, lambda: td - dti)
+ msg = "cannot subtract a datelike from a TimedeltaArray"
+ with pytest.raises(TypeError, match=msg):
+ tdi - dt
+ with pytest.raises(TypeError, match=msg):
+ tdi - dti
+
+ msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
+ " but received a 'Timedelta'")
+ with pytest.raises(TypeError, match=msg):
+ td - dt
+
+ msg = "bad operand type for unary -: 'DatetimeArray'"
+ with pytest.raises(TypeError, match=msg):
+ td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
@@ -265,19 +275,38 @@ def _check(result, expected):
_check(result, expected)
# tz mismatches
- pytest.raises(TypeError, lambda: dt_tz - ts)
- pytest.raises(TypeError, lambda: dt_tz - dt)
- pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
- pytest.raises(TypeError, lambda: dt - dt_tz)
- pytest.raises(TypeError, lambda: ts - dt_tz)
- pytest.raises(TypeError, lambda: ts_tz2 - ts)
- pytest.raises(TypeError, lambda: ts_tz2 - dt)
- pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
+ msg = ("Timestamp subtraction must have the same timezones or no"
+ " timezones")
+ with pytest.raises(TypeError, match=msg):
+ dt_tz - ts
+ msg = "can't subtract offset-naive and offset-aware datetimes"
+ with pytest.raises(TypeError, match=msg):
+ dt_tz - dt
+ msg = ("Timestamp subtraction must have the same timezones or no"
+ " timezones")
+ with pytest.raises(TypeError, match=msg):
+ dt_tz - ts_tz2
+ msg = "can't subtract offset-naive and offset-aware datetimes"
+ with pytest.raises(TypeError, match=msg):
+ dt - dt_tz
+ msg = ("Timestamp subtraction must have the same timezones or no"
+ " timezones")
+ with pytest.raises(TypeError, match=msg):
+ ts - dt_tz
+ with pytest.raises(TypeError, match=msg):
+ ts_tz2 - ts
+ with pytest.raises(TypeError, match=msg):
+ ts_tz2 - dt
+ with pytest.raises(TypeError, match=msg):
+ ts_tz - ts_tz2
# with dti
- pytest.raises(TypeError, lambda: dti - ts_tz)
- pytest.raises(TypeError, lambda: dti_tz - ts)
- pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
+ with pytest.raises(TypeError, match=msg):
+ dti - ts_tz
+ with pytest.raises(TypeError, match=msg):
+ dti_tz - ts
+ with pytest.raises(TypeError, match=msg):
+ dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
@@ -349,8 +378,11 @@ def test_addition_ops(self):
tm.assert_index_equal(result, expected)
# unequal length
- pytest.raises(ValueError, lambda: tdi + dti[0:1])
- pytest.raises(ValueError, lambda: tdi[0:1] + dti)
+ msg = "cannot add indices of unequal length"
+ with pytest.raises(ValueError, match=msg):
+ tdi + dti[0:1]
+ with pytest.raises(ValueError, match=msg):
+ tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 5efcd527de8d8..7ce82d5bcdded 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -18,8 +18,11 @@ def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
- pytest.raises(TypeError, lambda: cat.min())
- pytest.raises(TypeError, lambda: cat.max())
+ msg = "Categorical is not ordered for operation {}"
+ with pytest.raises(TypeError, match=msg.format('min')):
+ cat.min()
+ with pytest.raises(TypeError, match=msg.format('max')):
+ cat.max()
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
@@ -108,18 +111,24 @@ def test_searchsorted(self):
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
- pytest.raises(KeyError, lambda: c1.searchsorted('cucumber'))
- pytest.raises(KeyError, lambda: s1.searchsorted('cucumber'))
+ msg = r"Value\(s\) to be inserted must be in categories"
+ with pytest.raises(KeyError, match=msg):
+ c1.searchsorted('cucumber')
+ with pytest.raises(KeyError, match=msg):
+ s1.searchsorted('cucumber')
# Searching for multiple values one of each is not from the Categorical
- pytest.raises(KeyError,
- lambda: c1.searchsorted(['bread', 'cucumber']))
- pytest.raises(KeyError,
- lambda: s1.searchsorted(['bread', 'cucumber']))
+ with pytest.raises(KeyError, match=msg):
+ c1.searchsorted(['bread', 'cucumber'])
+ with pytest.raises(KeyError, match=msg):
+ s1.searchsorted(['bread', 'cucumber'])
# searchsorted call for unordered Categorical
- pytest.raises(ValueError, lambda: c2.searchsorted('apple'))
- pytest.raises(ValueError, lambda: s2.searchsorted('apple'))
+ msg = "Categorical not ordered"
+ with pytest.raises(ValueError, match=msg):
+ c2.searchsorted('apple')
+ with pytest.raises(ValueError, match=msg):
+ s2.searchsorted('apple')
def test_unique(self):
# categories are reordered based on value when ordered=False
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index b2965bbcc456a..e1264722aedcd 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -4,6 +4,8 @@
import numpy as np
import pytest
+from pandas.compat import PY2
+
import pandas as pd
from pandas import Categorical, DataFrame, Series, date_range
from pandas.tests.arrays.categorical.common import TestCategorical
@@ -17,6 +19,7 @@ def test_categories_none_comparisons(self):
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_comparisons(self):
result = self.factor[self.factor == 'a']
@@ -95,16 +98,24 @@ def test_comparisons(self):
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
- pytest.raises(TypeError, lambda: cat > s)
- pytest.raises(TypeError, lambda: cat_rev > s)
- pytest.raises(TypeError, lambda: s < cat)
- pytest.raises(TypeError, lambda: s < cat_rev)
+ msg = ("Cannot compare a Categorical for op __gt__ with type"
+ r" <class 'numpy\.ndarray'>")
+ with pytest.raises(TypeError, match=msg):
+ cat > s
+ with pytest.raises(TypeError, match=msg):
+ cat_rev > s
+ with pytest.raises(TypeError, match=msg):
+ s < cat
+ with pytest.raises(TypeError, match=msg):
+ s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
- pytest.raises(TypeError, lambda: cat > a)
- pytest.raises(TypeError, lambda: cat_rev > a)
+ with pytest.raises(TypeError, match=msg):
+ cat > a
+ with pytest.raises(TypeError, match=msg):
+ cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
@@ -163,16 +174,23 @@ def test_comparison_with_unknown_scalars(self):
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
- pytest.raises(TypeError, lambda: cat < 4)
- pytest.raises(TypeError, lambda: cat > 4)
- pytest.raises(TypeError, lambda: 4 < cat)
- pytest.raises(TypeError, lambda: 4 > cat)
+ msg = ("Cannot compare a Categorical for op __{}__ with a scalar,"
+ " which is not a category")
+ with pytest.raises(TypeError, match=msg.format('lt')):
+ cat < 4
+ with pytest.raises(TypeError, match=msg.format('gt')):
+ cat > 4
+ with pytest.raises(TypeError, match=msg.format('gt')):
+ 4 < cat
+ with pytest.raises(TypeError, match=msg.format('lt')):
+ 4 > cat
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
@pytest.mark.parametrize('data,reverse,base', [
(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
@@ -219,16 +237,26 @@ def test_comparisons(self, data, reverse, base):
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
- pytest.raises(TypeError, lambda: cat > s)
- pytest.raises(TypeError, lambda: cat_rev > s)
- pytest.raises(TypeError, lambda: cat > a)
- pytest.raises(TypeError, lambda: cat_rev > a)
+ msg = ("Cannot compare a Categorical for op __gt__ with type"
+ r" <class 'numpy\.ndarray'>")
+ with pytest.raises(TypeError, match=msg):
+ cat > s
+ with pytest.raises(TypeError, match=msg):
+ cat_rev > s
+ with pytest.raises(TypeError, match=msg):
+ cat > a
+ with pytest.raises(TypeError, match=msg):
+ cat_rev > a
- pytest.raises(TypeError, lambda: s < cat)
- pytest.raises(TypeError, lambda: s < cat_rev)
+ with pytest.raises(TypeError, match=msg):
+ s < cat
+ with pytest.raises(TypeError, match=msg):
+ s < cat_rev
- pytest.raises(TypeError, lambda: a < cat)
- pytest.raises(TypeError, lambda: a < cat_rev)
+ with pytest.raises(TypeError, match=msg):
+ a < cat
+ with pytest.raises(TypeError, match=msg):
+ a < cat_rev
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
@@ -287,16 +315,21 @@ def test_numeric_like_ops(self):
right=False, labels=cat_labels)
# numeric ops should not succeed
- for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
- pytest.raises(TypeError,
- lambda: getattr(df, op)(df))
+ for op, str_rep in [('__add__', r'\+'),
+ ('__sub__', '-'),
+ ('__mul__', r'\*'),
+ ('__truediv__', '/')]:
+ msg = r"Series cannot perform the operation {}".format(str_rep)
+ with pytest.raises(TypeError, match=msg):
+ getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
- pytest.raises(TypeError,
- lambda: getattr(s, op)(numeric_only=False))
+ msg = "Categorical cannot perform the operation {}".format(op)
+ with pytest.raises(TypeError, match=msg):
+ getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
@@ -306,8 +339,13 @@ def test_numeric_like_ops(self):
np.sum(s)
# numeric ops on a Series
- for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
- pytest.raises(TypeError, lambda: getattr(s, op)(2))
+ for op, str_rep in [('__add__', r'\+'),
+ ('__sub__', '-'),
+ ('__mul__', r'\*'),
+ ('__truediv__', '/')]:
+ msg = r"Series cannot perform the operation {}".format(str_rep)
+ with pytest.raises(TypeError, match=msg):
+ getattr(s, op)(2)
# invalid ufunc
with pytest.raises(TypeError):
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index 6e9d790bf85f3..2cbe7d9ea084c 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -449,11 +449,13 @@ def test_check_integrity(self):
# also OK even though empty
index = BlockIndex(1, locs, lengths) # noqa
- # block extend beyond end
- pytest.raises(Exception, BlockIndex, 10, [5], [10])
+ msg = "Block 0 extends beyond end"
+ with pytest.raises(ValueError, match=msg):
+ BlockIndex(10, [5], [10])
- # block overlap
- pytest.raises(Exception, BlockIndex, 10, [2, 5], [5, 3])
+ msg = "Block 0 overlaps"
+ with pytest.raises(ValueError, match=msg):
+ BlockIndex(10, [2, 5], [5, 3])
def test_to_int_index(self):
locs = [0, 10]
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index c1ba15f428eb7..a14d8e4471c23 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -285,10 +285,14 @@ def check_operands(left, right, cmp_op):
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = 'lhs {0} rhs'.format(cmp1)
+ msg = (r"only list-like( or dict-like)? objects are allowed to be"
+ r" passed to (DataFrame\.)?isin\(\), you passed a"
+ r" (\[|')bool(\]|')|"
+ "argument of type 'bool' is not iterable")
if cmp1 in ('in', 'not in') and not is_list_like(rhs):
- pytest.raises(TypeError, pd.eval, ex, engine=self.engine,
- parser=self.parser, local_dict={'lhs': lhs,
- 'rhs': rhs})
+ with pytest.raises(TypeError, match=msg):
+ pd.eval(ex, engine=self.engine, parser=self.parser,
+ local_dict={'lhs': lhs, 'rhs': rhs})
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -341,9 +345,11 @@ def check_floor_division(self, lhs, arith1, rhs):
expected = lhs // rhs
self.check_equal(res, expected)
else:
- pytest.raises(TypeError, pd.eval, ex,
- local_dict={'lhs': lhs, 'rhs': rhs},
- engine=self.engine, parser=self.parser)
+ msg = (r"unsupported operand type\(s\) for //: 'VariableNode' and"
+ " 'VariableNode'")
+ with pytest.raises(TypeError, match=msg):
+ pd.eval(ex, local_dict={'lhs': lhs, 'rhs': rhs},
+ engine=self.engine, parser=self.parser)
def get_expected_pow_result(self, lhs, rhs):
try:
@@ -396,10 +402,14 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = 'in', 'not in'
ex = '~(lhs {0} rhs)'.format(cmp1)
+ msg = (r"only list-like( or dict-like)? objects are allowed to be"
+ r" passed to (DataFrame\.)?isin\(\), you passed a"
+ r" (\[|')float(\]|')|"
+ "argument of type 'float' is not iterable")
if is_scalar(rhs) and cmp1 in skip_these:
- pytest.raises(TypeError, pd.eval, ex, engine=self.engine,
- parser=self.parser, local_dict={'lhs': lhs,
- 'rhs': rhs})
+ with pytest.raises(TypeError, match=msg):
+ pd.eval(ex, engine=self.engine, parser=self.parser,
+ local_dict={'lhs': lhs, 'rhs': rhs})
else:
# compound
if is_scalar(lhs) and is_scalar(rhs):
@@ -1101,8 +1111,9 @@ def test_simple_arith_ops(self):
ex3 = '1 {0} (x + 1)'.format(op)
if op in ('in', 'not in'):
- pytest.raises(TypeError, pd.eval, ex,
- engine=self.engine, parser=self.parser)
+ msg = "argument of type 'int' is not iterable"
+ with pytest.raises(TypeError, match=msg):
+ pd.eval(ex, engine=self.engine, parser=self.parser)
else:
expec = _eval_single_bin(1, op, 1, self.engine)
x = self.eval(ex, engine=self.engine, parser=self.parser)
@@ -1236,19 +1247,25 @@ def test_assignment_fails(self):
df = DataFrame(np.random.randn(5, 3), columns=list('abc'))
df2 = DataFrame(np.random.randn(5, 3))
expr1 = 'df = df2'
- pytest.raises(ValueError, self.eval, expr1,
- local_dict={'df': df, 'df2': df2})
+ msg = "cannot assign without a target object"
+ with pytest.raises(ValueError, match=msg):
+ self.eval(expr1, local_dict={'df': df, 'df2': df2})
def test_assignment_column(self):
df = DataFrame(np.random.randn(5, 2), columns=list('ab'))
orig_df = df.copy()
# multiple assignees
- pytest.raises(SyntaxError, df.eval, 'd c = a + b')
+ with pytest.raises(SyntaxError, match="invalid syntax"):
+ df.eval('d c = a + b')
# invalid assignees
- pytest.raises(SyntaxError, df.eval, 'd,c = a + b')
- pytest.raises(SyntaxError, df.eval, 'Timestamp("20131001") = a + b')
+ msg = "left hand side of an assignment must be a single name"
+ with pytest.raises(SyntaxError, match=msg):
+ df.eval('d,c = a + b')
+ msg = "can't assign to function call"
+ with pytest.raises(SyntaxError, match=msg):
+ df.eval('Timestamp("20131001") = a + b')
# single assignment - existing variable
expected = orig_df.copy()
@@ -1291,7 +1308,9 @@ def f():
# multiple assignment
df = orig_df.copy()
df.eval('c = a + b', inplace=True)
- pytest.raises(SyntaxError, df.eval, 'c = a = b')
+ msg = "can only assign a single expression"
+ with pytest.raises(SyntaxError, match=msg):
+ df.eval('c = a = b')
# explicit targets
df = orig_df.copy()
@@ -1545,21 +1564,24 @@ def test_check_many_exprs(self):
def test_fails_and(self):
df = DataFrame(np.random.randn(5, 3))
- pytest.raises(NotImplementedError, pd.eval, 'df > 2 and df > 3',
- local_dict={'df': df}, parser=self.parser,
- engine=self.engine)
+ msg = "'BoolOp' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
+ pd.eval('df > 2 and df > 3', local_dict={'df': df},
+ parser=self.parser, engine=self.engine)
def test_fails_or(self):
df = DataFrame(np.random.randn(5, 3))
- pytest.raises(NotImplementedError, pd.eval, 'df > 2 or df > 3',
- local_dict={'df': df}, parser=self.parser,
- engine=self.engine)
+ msg = "'BoolOp' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
+ pd.eval('df > 2 or df > 3', local_dict={'df': df},
+ parser=self.parser, engine=self.engine)
def test_fails_not(self):
df = DataFrame(np.random.randn(5, 3))
- pytest.raises(NotImplementedError, pd.eval, 'not df > 2',
- local_dict={'df': df}, parser=self.parser,
- engine=self.engine)
+ msg = "'Not' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
+ pd.eval('not df > 2', local_dict={'df': df}, parser=self.parser,
+ engine=self.engine)
def test_fails_ampersand(self):
df = DataFrame(np.random.randn(5, 3)) # noqa
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 62e96fd39a759..5c1f6ff405b3b 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -607,13 +607,16 @@ def test__get_dtype(input_param, result):
assert com._get_dtype(input_param) == result
-@pytest.mark.parametrize('input_param', [None,
- 1, 1.2,
- 'random string',
- pd.DataFrame([1, 2])])
-def test__get_dtype_fails(input_param):
+@pytest.mark.parametrize('input_param,expected_error_message', [
+ (None, "Cannot deduce dtype from null object"),
+ (1, "data type not understood"),
+ (1.2, "data type not understood"),
+ ('random string', "data type 'random string' not understood"),
+ (pd.DataFrame([1, 2]), "data type not understood")])
+def test__get_dtype_fails(input_param, expected_error_message):
# python objects
- pytest.raises(TypeError, com._get_dtype, input_param)
+ with pytest.raises(TypeError, match=expected_error_message):
+ com._get_dtype(input_param)
@pytest.mark.parametrize('input_param,result', [
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 1c1442d6f2f23..4366f610871ff 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -38,7 +38,8 @@ def test_equality_invalid(self):
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
- pytest.raises(TypeError, np.dtype, self.dtype)
+ with pytest.raises(TypeError, match="data type not understood"):
+ np.dtype(self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
@@ -87,8 +88,9 @@ def test_equality(self):
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
- pytest.raises(
- TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
+ msg = "cannot construct a CategoricalDtype"
+ with pytest.raises(TypeError, match=msg):
+ CategoricalDtype.construct_from_string('foo')
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
@@ -202,8 +204,9 @@ def test_hash_vs_equality(self):
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
- pytest.raises(ValueError,
- lambda: DatetimeTZDtype('ms', 'US/Eastern'))
+ msg = "DatetimeTZDtype only supports ns units"
+ with pytest.raises(ValueError, match=msg):
+ DatetimeTZDtype('ms', 'US/Eastern')
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
@@ -226,8 +229,9 @@ def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
- pytest.raises(TypeError,
- lambda: DatetimeTZDtype.construct_from_string('foo'))
+ msg = "Could not construct DatetimeTZDtype from 'foo'"
+ with pytest.raises(TypeError, match=msg):
+ DatetimeTZDtype.construct_from_string('foo')
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
| …on, dtypes)
xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25504 | 2019-03-01T14:30:43Z | 2019-03-03T01:41:53Z | 2019-03-03T01:41:53Z | 2019-03-05T12:23:36Z |
BUG: User-facing AssertionError with add column to SparseDataFrame | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index e2ae7cfc0cc34..7b87f6a7f8d3c 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -402,7 +402,7 @@ Sparse
- Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`)
- Bug in :class:`SparseFrame` constructor where passing ``None`` as the data would cause ``default_fill_value`` to be ignored (:issue:`16807`)
--
+- Bug in `SparseDataFrame` when adding a column in which the length of values does not match length of index, ``AssertionError`` is raised instead of raising ``ValueError`` (:issue:`25484`)
Other
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index d7fe6d837f04d..08729442e701f 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -430,8 +430,8 @@ def sp_maker(x, index=None):
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
- raise AssertionError('Length of values does not match '
- 'length of index')
+ raise ValueError('Length of values does not match '
+ 'length of index')
clean = value
elif hasattr(value, '__iter__'):
@@ -441,8 +441,8 @@ def sp_maker(x, index=None):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
- raise AssertionError('Length of values does not match '
- 'length of index')
+ raise ValueError('Length of values does not match '
+ 'length of index')
clean = sp_maker(value)
# Scalar
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 954dd85f16c28..45df08ccfeb48 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -568,8 +568,9 @@ def _check_frame(frame, orig):
assert len(frame['I'].sp_values) == N // 2
# insert ndarray wrong size
- msg = "Length of values does not match length of index"
- with pytest.raises(AssertionError, match=msg):
+ # GH 25484
+ msg = 'Length of values does not match length of index'
+ with pytest.raises(ValueError, match=msg):
frame['foo'] = np.random.randn(N - 1)
# scalar value
| - [ ] closes #25484
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25503 | 2019-03-01T13:42:30Z | 2019-04-12T13:12:04Z | 2019-04-12T13:12:04Z | 2019-04-12T13:13:07Z |
Update documentation of read_csv to explain that index_col can be a string containg a column name | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index b23a0f10e9e2b..ee0b156027f5e 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -110,11 +110,14 @@ names : array-like, default ``None``
List of column names to use. If file contains no header row, then you should
explicitly pass ``header=None``. Duplicates in this list will cause
a ``UserWarning`` to be issued.
-index_col : int or sequence or ``False``, default ``None``
- Column to use as the row labels of the ``DataFrame``. If a sequence is given, a
- MultiIndex is used. If you have a malformed file with delimiters at the end of
- each line, you might consider ``index_col=False`` to force pandas to *not* use
- the first column as the index (row names).
+index_col : int, str, sequence of int / str, or False, default ``None``
+ Column(s) to use as the row labels of the ``DataFrame``, either given as
+ string name or column index. If a sequence of int / str is given, a
+ MultiIndex is used.
+
+ Note: ``index_col=False`` can be used to force pandas to *not* use the first
+ column as the index, e.g. when you have a malformed file with delimiters at
+ the end of each line.
usecols : list-like or callable, default ``None``
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 4163a571df800..4e465f39aa3e5 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -102,11 +102,14 @@
List of column names to use. If file contains no header row, then you
should explicitly pass ``header=None``. Duplicates in this list will cause
a ``UserWarning`` to be issued.
-index_col : int, sequence or bool, optional
- Column to use as the row labels of the DataFrame. If a sequence is given, a
- MultiIndex is used. If you have a malformed file with delimiters at the end
- of each line, you might consider ``index_col=False`` to force pandas to
- not use the first column as the index (row names).
+index_col : int, str, sequence of int / str, or False, default ``None``
+ Column(s) to use as the row labels of the ``DataFrame``, either given as
+ string name or column index. If a sequence of int / str is given, a
+ MultiIndex is used.
+
+ Note: ``index_col=False`` can be used to force pandas to *not* use the first
+ column as the index, e.g. when you have a malformed file with delimiters at
+ the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
| Uses the suggestion from #22276 and closes #22276. | https://api.github.com/repos/pandas-dev/pandas/pulls/25502 | 2019-03-01T13:02:21Z | 2019-03-15T21:29:05Z | 2019-03-15T21:29:05Z | 2019-03-15T21:52:15Z |
BUG: Fix RecursionError during IntervalTree construction | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 4fcde7769b362..926239e7e5dc5 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -98,6 +98,7 @@ Bug Fixes
- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`)
- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`)
+- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`)
-
.. _whatsnew_0.242.contributors:
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index fb6f30c030f11..196841f35ed8d 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -284,7 +284,7 @@ cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode:
else:
# calculate a pivot so we can create child nodes
self.is_leaf_node = False
- self.pivot = np.median(left + right) / 2
+ self.pivot = np.median(left / 2 + right / 2)
left_set, right_set, center_set = self.classify_intervals(
left, right)
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 90722e66d8d8c..46b2d12015a22 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -171,3 +171,13 @@ def test_is_overlapping_trivial(self, closed, left, right):
# GH 23309
tree = IntervalTree(left, right, closed=closed)
assert tree.is_overlapping is False
+
+ def test_construction_overflow(self):
+ # GH 25485
+ left, right = np.arange(101), [np.iinfo(np.int64).max] * 101
+ tree = IntervalTree(left, right)
+
+ # pivot should be average of left/right medians
+ result = tree.root.pivot
+ expected = (50 + np.iinfo(np.int64).max) / 2
+ assert result == expected
| - [X] closes #25485
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Credit to @kingsykes for identifying the issue and determining the fix. | https://api.github.com/repos/pandas-dev/pandas/pulls/25498 | 2019-03-01T08:53:55Z | 2019-03-03T01:44:23Z | 2019-03-03T01:44:22Z | 2019-03-06T01:44:31Z |
TST: Fix test_stringified_slice_with_tz failure | diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index a3ee5fe39769f..c7147e6fe7063 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -100,8 +100,7 @@ def test_hash_error(self):
def test_stringified_slice_with_tz(self):
# GH#2658
- import datetime
- start = datetime.datetime.now()
+ start = '2013-01-07'
idx = date_range(start=start, freq="1d", periods=10, tz='US/Eastern')
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
| - [x] closes #25492
- [x] tests added / passed
This test shouldn't need to depend on `datetime.now`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25496 | 2019-03-01T06:11:51Z | 2019-03-01T12:47:31Z | 2019-03-01T12:47:30Z | 2019-03-01T16:10:39Z |
DOC: Reword Series.interpolate docstring for clarity | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index eb427a42a249b..ee8f9cba951b3 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6618,10 +6618,10 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
- `scipy.interpolate.interp1d`. Both 'polynomial' and 'spline'
- require that you also specify an `order` (int),
- e.g. ``df.interpolate(method='polynomial', order=5)``.
- These use the numerical values of the index.
+ `scipy.interpolate.interp1d`. These methods use the numerical
+ values of the index. Both 'polynomial' and 'spline' require that
+ you also specify an `order` (int), e.g.
+ ``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
| - [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The description of one group of methods in the `Series.interpolate` docstring currently reads:
> ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘spline’, ‘barycentric’, ‘polynomial’: Passed to scipy.interpolate.interp1d. Both ‘polynomial’ and ‘spline’ require that you also specify an order (int), e.g. df.interpolate(method='polynomial', order=4). These use the numerical values of the index.
In this item, it is unclear whether "These" in the final sentence refers to the 'polynomial' and 'spline' methods, or all eight methods in the group. This ambiguity was the cause of confusion in issue #21662, for example. This PR reorders the sentences for improved clarity. | https://api.github.com/repos/pandas-dev/pandas/pulls/25491 | 2019-03-01T01:44:41Z | 2019-03-01T17:21:03Z | 2019-03-01T17:21:03Z | 2019-03-01T17:21:11Z |
DOC: Add conda uninstall pandas to contributing guide | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 027f2d90bbb73..dc7e2c7058fbc 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -178,6 +178,7 @@ We'll now kick off a three-step process:
# Create and activate the build environment
conda env create -f environment.yml
conda activate pandas-dev
+ conda uninstall --force pandas
# or with older versions of Anaconda:
source activate pandas-dev
| closes https://github.com/pandas-dev/pandas/issues/25487
add command
`conda uninstall --force pandas` | https://api.github.com/repos/pandas-dev/pandas/pulls/25490 | 2019-02-28T23:58:46Z | 2019-03-03T20:30:19Z | 2019-03-03T20:30:19Z | 2019-03-03T20:30:19Z |
Fix JSON orient='table' issues with numeric column names | diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index dbf7f4f49ce86..e34c8ea1fe899 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -162,14 +162,14 @@ def _write(self, obj, orient, double_precision, ensure_ascii,
class JSONTableWriter(FrameWriter):
- _default_orient = 'records'
+ _default_orient = 'values'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
- to know what the index is, forces orient to records, and forces
+ to know what the index is, forces orient to values, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
@@ -177,9 +177,9 @@ def __init__(self, obj, orient, date_format, double_precision,
date_unit, index, default_handler=default_handler)
if date_format != 'iso':
- msg = ("Trying to write with `orient='table'` and "
- "`date_format='{fmt}'`. Table Schema requires dates "
- "to be formatted with `date_format='iso'`"
+ msg = ("Trying to write with orient='table' and "
+ "date_format='{fmt}'. Table Schema requires dates "
+ "to be formatted with date_format='iso'"
.format(fmt=date_format))
raise ValueError(msg)
@@ -211,7 +211,7 @@ def __init__(self, obj, orient, date_format, double_precision,
else:
self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
- self.orient = 'records'
+ self.orient = 'values'
self.index = index
def _write(self, obj, orient, double_precision, ensure_ascii,
@@ -221,7 +221,12 @@ def _write(self, obj, orient, double_precision, ensure_ascii,
ensure_ascii, date_unit,
iso_dates,
default_handler)
- serialized = '{{"schema": {schema}, "data": {data}}}'.format(
+ # add column names
+ column_names = dumps(obj.columns)
+ if len(data) > 2:
+ column_names = column_names + ','
+ data = data[0] + column_names + data[1:]
+ serialized = '{{"schema":{schema},"data":{data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py
index 971386c91944e..d22f85372accb 100644
--- a/pandas/io/json/table_schema.py
+++ b/pandas/io/json/table_schema.py
@@ -255,7 +255,7 @@ def build_table_schema(data, index=True, primary_key=None, version=True):
schema['primaryKey'] = primary_key
if version:
- schema['pandas_version'] = '0.20.0'
+ schema['pandas_version'] = '0.25.0'
return schema
@@ -296,21 +296,28 @@ def parse_table_schema(json, precise_float):
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
- col_order = [field['name'] for field in table['schema']['fields']]
- df = DataFrame(table['data'], columns=col_order)[col_order]
+ version = table['schema']['pandas_version']
+ if version == '0.20.0':
+ # Each table row is represented by a dict
+ col_order = [field['name'] for field in table['schema']['fields']]
+ df = DataFrame(table['data'], columns=col_order)[col_order]
+ elif version == '0.25.0':
+ # Each table row is represented by a list
+ col_order = table['data'][0]
+ df = DataFrame(table['data'][1:], columns=col_order)[col_order]
dtypes = {field['name']: convert_json_field_to_pandas_type(field)
for field in table['schema']['fields']}
# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()):
- raise NotImplementedError('table="orient" can not yet read timezone '
- 'data')
+ raise NotImplementedError("orient='table' can not yet read timezone "
+ "data")
# No ISO constructor for Timedelta as of yet, so need to raise
if 'timedelta64' in dtypes.values():
- raise NotImplementedError('table="orient" can not yet read '
- 'ISO-formatted Timedelta data')
+ raise NotImplementedError("orient='table' can not yet read "
+ "ISO-formatted Timedelta data")
df = df.astype(dtypes)
@@ -322,5 +329,7 @@ def parse_table_schema(json, precise_float):
else:
df.index.names = [None if x.startswith('level_') else x for x in
df.index.names]
+ # Reset columns dtype
+ df.columns = df.columns.values.tolist()
return df
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 4cc62d3db124f..74a60d70cd194 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -208,8 +208,8 @@ def test_build_series(self):
expected = OrderedDict([
('schema', schema),
- ('data', [OrderedDict([('id', 0), ('a', 1)]),
- OrderedDict([('id', 1), ('a', 2)])])])
+ ('data', [['id', 'a'], [0, 1], [1, 2]])
+ ])
assert result == expected
def test_to_json(self):
@@ -243,32 +243,15 @@ def test_to_json(self):
'fields': fields,
'primaryKey': ['idx'],
}
- data = [
- OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
- ('C', '2016-01-01T00:00:00.000Z'),
- ('D', 'P0DT1H0M0S'),
- ('E', 'a'), ('F', 'a'), ('G', 1.),
- ('H', '2016-01-01T06:00:00.000Z')
- ]),
- OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
- ('C', '2016-01-02T00:00:00.000Z'),
- ('D', 'P0DT1H1M0S'),
- ('E', 'b'), ('F', 'b'), ('G', 2.),
- ('H', '2016-01-02T06:00:00.000Z')
- ]),
- OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
- ('C', '2016-01-03T00:00:00.000Z'),
- ('D', 'P0DT1H2M0S'),
- ('E', 'c'), ('F', 'c'), ('G', 3.),
- ('H', '2016-01-03T06:00:00.000Z')
- ]),
- OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
- ('C', '2016-01-04T00:00:00.000Z'),
- ('D', 'P0DT1H3M0S'),
- ('E', 'c'), ('F', 'c'), ('G', 4.),
- ('H', '2016-01-04T06:00:00.000Z')
- ]),
- ]
+ data = [['idx', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'],
+ [0, 1, 'a', '2016-01-01T00:00:00.000Z', 'P0DT1H0M0S', 'a', 'a',
+ 1., '2016-01-01T06:00:00.000Z'],
+ [1, 2, 'b', '2016-01-02T00:00:00.000Z', 'P0DT1H1M0S', 'b', 'b',
+ 2., '2016-01-02T06:00:00.000Z'],
+ [2, 3, 'c', '2016-01-03T00:00:00.000Z', 'P0DT1H2M0S', 'c', 'c',
+ 3., '2016-01-03T06:00:00.000Z'],
+ [3, 4, 'c', '2016-01-04T00:00:00.000Z', 'P0DT1H3M0S', 'c', 'c',
+ 4., '2016-01-04T06:00:00.000Z']]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
@@ -277,16 +260,14 @@ def test_to_json_float_index(self):
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
-
- expected = (
- OrderedDict([('schema', {
+ expected = (OrderedDict([
+ ('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
- ('data', [OrderedDict([('index', 1.0), ('values', 1)]),
- OrderedDict([('index', 2.0), ('values', 1)])])])
- )
+ ('data', [['index', 'values'], [1.0, 1], [2.0, 1]])
+ ]))
assert result == expected
def test_to_json_period_index(self):
@@ -300,10 +281,9 @@ def test_to_json_period_index(self):
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
- data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
- ('values', 1)]),
- OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
- ('values', 1)])]
+ data = [['index', 'values'],
+ ['2015-11-01T00:00:00.000Z', 1],
+ ['2016-02-01T00:00:00.000Z', 1]]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
@@ -320,10 +300,7 @@ def test_to_json_categorical_index(self):
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
- ('data', [
- OrderedDict([('index', 'a'),
- ('values', 1)]),
- OrderedDict([('index', 'b'), ('values', 1)])])])
+ ('data', [['index', 'values'], ['a', 1], ['b', 1]])])
)
assert result == expected
@@ -428,9 +405,7 @@ def test_categorical(self):
expected = OrderedDict([
('schema', {'fields': fields,
'primaryKey': ['idx']}),
- ('data', [OrderedDict([('idx', 0), ('values', 'a')]),
- OrderedDict([('idx', 1), ('values', 'b')]),
- OrderedDict([('idx', 2), ('values', 'a')])])])
+ ('data', [['idx', 'values'], [0, 'a'], [1, 'b'], [2, 'a']])])
assert result == expected
@pytest.mark.parametrize('idx,nm,prop', [
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index b222d679a6012..9b3e170ac1f5a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1197,9 +1197,10 @@ def test_data_frame_size_after_to_json(self):
@pytest.mark.parametrize('index', [None, [1, 2], [1., 2.], ['a', 'b'],
['1', '2'], ['1.', '2.']])
- @pytest.mark.parametrize('columns', [['a', 'b'], ['1', '2'], ['1.', '2.']])
+ @pytest.mark.parametrize('columns', [None, [1, 2], [1., 2.], ['a', 'b'],
+ ['1', '2'], ['1.', '2.']])
def test_from_json_to_json_table_index_and_columns(self, index, columns):
- # GH25433 GH25435
+ # GH19129 GH25433 GH25435
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
dfjson = expected.to_json(orient='table')
result = pd.read_json(dfjson, orient='table')
@@ -1229,6 +1230,50 @@ def test_read_json_table_convert_axes_raises(self):
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient='table', convert_axes=True)
+ @pytest.mark.parametrize('index, dfjson', [
+ (None,
+ '{"schema":{"fields":[{"name":"index","type":"integer"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":0,"a":1,"b":3.0,"c":"5"},{"index":1,"a":2,"b":4.0,'
+ '"c":"6"}]}'),
+ ([1, 2],
+ '{"schema":{"fields":[{"name":"index","type":"integer"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":1,"a":1,"b":3.0,"c":"5"},{"index":2,"a":2,"b":4.0,'
+ '"c":"6"}]}'),
+ ([1., 2.],
+ '{"schema":{"fields":[{"name":"index","type":"number"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":1.0,"a":1,"b":3.0,"c":"5"},{"index":2.0,"a":2,'
+ '"b":4.0,"c":"6"}]}'),
+ (['a', 'b'],
+ '{"schema":{"fields":[{"name":"index","type":"string"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":"a","a":1,"b":3.0,"c":"5"},{"index":"b","a":2,'
+ '"b":4.0,"c":"6"}]}'),
+ (['1', '2'],
+ '{"schema":{"fields":[{"name":"index","type":"string"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":"1","a":1,"b":3.0,"c":"5"},{"index":"2","a":2,'
+ '"b":4.0,"c":"6"}]}'),
+ (['1.', '2.'],
+ '{"schema":{"fields":[{"name":"index","type":"string"},{"name":"a",'
+ '"type":"integer"},{"name":"b","type":"number"},{"name":"c",'
+ '"type":"string"}],"primaryKey":["index"],"pandas_version":"0.20.0"},'
+ '"data":[{"index":"1.","a":1,"b":3.0,"c":"5"},{"index":"2.","a":2,'
+ '"b":4.0,"c":"6"}]}')
+ ])
+ def test_read_json_table_version_0_20_0(self, index, dfjson):
+ expected = pd.DataFrame([[1, 3., '5'], [2, 4., '6']],
+ index=index, columns=['a', 'b', 'c'])
+ result = pd.read_json(dfjson, orient='table')
+ assert_frame_equal(result, expected)
+
@pytest.mark.parametrize('data, expected', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
@@ -1265,16 +1310,9 @@ def test_index_false_to_json_split(self, data, expected):
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
-
result = data.to_json(orient='table', index=False)
result = json.loads(result)
-
- expected = {
- 'schema': pd.io.json.build_table_schema(data, index=False),
- 'data': DataFrame(data).to_dict(orient='records')
- }
-
- assert result == expected
+ assert 'primaryKey' not in result['schema']
@pytest.mark.parametrize('orient', [
'records', 'index', 'columns', 'values'
| closes #19129
closes #22525
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25488 | 2019-02-28T23:31:01Z | 2019-05-03T05:30:44Z | null | 2019-05-03T05:30:44Z |
DOC: Use correct pandas when building documentation | diff --git a/doc/make.py b/doc/make.py
index 438c4a04a3f08..75890658d7b30 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -323,7 +323,7 @@ def main():
# the import of `python_path` correctly. The latter is used to resolve
# the import within the module, injecting it into the global namespace
os.environ['PYTHONPATH'] = args.python_path
- sys.path.append(args.python_path)
+ sys.path.insert(0, args.python_path)
globals()['pandas'] = importlib.import_module('pandas')
# Set the matplotlib backend to the non-interactive Agg backend for all
| This manifested itself in #25419 - when trying to build documentation the version of pandas that gets inserted into the globals of this module may have inadvertently been picked up from another install on the system. This prioritizes the version of pandas dictated by command line args | https://api.github.com/repos/pandas-dev/pandas/pulls/25486 | 2019-02-28T22:00:57Z | 2019-03-01T17:57:39Z | 2019-03-01T17:57:39Z | 2019-03-01T17:57:42Z |
STY: use pytest.raises context manager (plotting, reductions, scalar...) | diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 7d721c7de3398..e6b9795aebe7c 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -267,13 +267,20 @@ def test_grouped_box_return_type(self):
def test_grouped_box_layout(self):
df = self.hist_df
- pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
- by=df.gender, layout=(1, 1))
- pytest.raises(ValueError, df.boxplot,
- column=['height', 'weight', 'category'],
- layout=(2, 1), return_type='dict')
- pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
- by=df.gender, layout=(-1, -1))
+ msg = "Layout of 1x1 must be larger than required size 2"
+ with pytest.raises(ValueError, match=msg):
+ df.boxplot(column=['weight', 'height'], by=df.gender,
+ layout=(1, 1))
+
+ msg = "The 'layout' keyword is not supported when 'by' is None"
+ with pytest.raises(ValueError, match=msg):
+ df.boxplot(column=['height', 'weight', 'category'],
+ layout=(2, 1), return_type='dict')
+
+ msg = "At least one dimension of layout must be positive"
+ with pytest.raises(ValueError, match=msg):
+ df.boxplot(column=['weight', 'height'], by=df.gender,
+ layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index ad79cc97f8b77..6702ad6cfb761 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -97,7 +97,9 @@ def test_nonnumeric_exclude(self):
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
- pytest.raises(TypeError, df['A'].plot)
+ msg = "Empty 'DataFrame': no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
+ df['A'].plot()
def test_tsplot_deprecated(self):
from pandas.tseries.plotting import tsplot
@@ -140,10 +142,15 @@ def f(*args, **kwds):
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
- pytest.raises(ValueError, ts.plot, style='b-', color='#000099')
+ msg = ("Cannot pass 'style' string with a color symbol and 'color' "
+ "keyword argument. Please use one or the other or pass 'style'"
+ " without a color symbol")
+ with pytest.raises(ValueError, match=msg):
+ ts.plot(style='b-', color='#000099')
s = ts.reset_index(drop=True)
- pytest.raises(ValueError, s.plot, style='b-', color='#000099')
+ with pytest.raises(ValueError, match=msg):
+ s.plot(style='b-', color='#000099')
@pytest.mark.slow
def test_high_freq(self):
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 7bdbdac54f7a6..4f0bef52b5e15 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -332,12 +332,17 @@ def test_grouped_hist_legacy2(self):
@pytest.mark.slow
def test_grouped_hist_layout(self):
df = self.hist_df
- pytest.raises(ValueError, df.hist, column='weight', by=df.gender,
- layout=(1, 1))
- pytest.raises(ValueError, df.hist, column='height', by=df.category,
- layout=(1, 3))
- pytest.raises(ValueError, df.hist, column='height', by=df.category,
- layout=(-1, -1))
+ msg = "Layout of 1x1 must be larger than required size 2"
+ with pytest.raises(ValueError, match=msg):
+ df.hist(column='weight', by=df.gender, layout=(1, 1))
+
+ msg = "Layout of 1x3 must be larger than required size 4"
+ with pytest.raises(ValueError, match=msg):
+ df.hist(column='height', by=df.category, layout=(1, 3))
+
+ msg = "At least one dimension of layout must be positive"
+ with pytest.raises(ValueError, match=msg):
+ df.hist(column='height', by=df.category, layout=(-1, -1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 44b95f7d1b00b..98248586f3d27 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -278,14 +278,20 @@ def test_subplot_titles(self, iris):
assert [p.get_title() for p in plot] == title
# Case len(title) > len(df)
- pytest.raises(ValueError, df.plot, subplots=True,
- title=title + ["kittens > puppies"])
+ msg = ("The length of `title` must equal the number of columns if"
+ " using `title` of type `list` and `subplots=True`")
+ with pytest.raises(ValueError, match=msg):
+ df.plot(subplots=True, title=title + ["kittens > puppies"])
# Case len(title) < len(df)
- pytest.raises(ValueError, df.plot, subplots=True, title=title[:2])
+ with pytest.raises(ValueError, match=msg):
+ df.plot(subplots=True, title=title[:2])
# Case subplots=False and title is of type list
- pytest.raises(ValueError, df.plot, subplots=False, title=title)
+ msg = ("Using `title` of type `list` is not supported unless"
+ " `subplots=True` is passed")
+ with pytest.raises(ValueError, match=msg):
+ df.plot(subplots=False, title=title)
# Case df with 3 numeric columns but layout of (2,2)
plot = df.drop('SepalWidth', axis=1).plot(subplots=True, layout=(2, 2),
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 8520855d14918..fbf7f610688ba 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -276,7 +276,9 @@ def test_timedelta_ops(self):
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
- pytest.raises(TypeError, getattr(td, op))
+ msg = "reduction operation '{}' not allowed for this dtype"
+ with pytest.raises(TypeError, match=msg.format(op)):
+ getattr(td, op)()
# GH#10040
# make sure NaT is properly handled by median()
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index d0f87618ad3af..8ca19745055a3 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -8,6 +8,7 @@
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
from pandas._libs.tslibs.parsing import DateParseError
+from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat import iteritems, text_type
from pandas.compat.numpy import np_datetime64_compat
@@ -35,7 +36,9 @@ def test_construction(self):
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
- pytest.raises(ValueError, i1.__ne__, i4)
+ msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
+ i1 != i4
assert i4 == i5
i1 = Period.now('Q')
@@ -74,9 +77,12 @@ def test_construction(self):
freq='U')
assert i1 == expected
- pytest.raises(ValueError, Period, ordinal=200701)
+ msg = "Must supply freq for ordinal value"
+ with pytest.raises(ValueError, match=msg):
+ Period(ordinal=200701)
- pytest.raises(ValueError, Period, '2007-1-1', freq='X')
+ with pytest.raises(ValueError, match="Invalid frequency: X"):
+ Period('2007-1-1', freq='X')
def test_construction_bday(self):
@@ -233,10 +239,6 @@ def test_period_constructor_offsets(self):
freq='U')
assert i1 == expected
- pytest.raises(ValueError, Period, ordinal=200701)
-
- pytest.raises(ValueError, Period, '2007-1-1', freq='X')
-
def test_invalid_arguments(self):
with pytest.raises(ValueError):
Period(datetime.now())
@@ -925,8 +927,9 @@ def test_properties_secondly(self):
class TestPeriodField(object):
def test_get_period_field_array_raises_on_out_of_range(self):
- pytest.raises(ValueError, libperiod.get_period_field_arr, -1,
- np.empty(1), 0)
+ msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'"
+ with pytest.raises(ValueError, match=msg):
+ libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestComparisons(object):
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index bf71c37aa9c3d..ee2c2e9e1959c 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -250,9 +250,13 @@ def check(value):
assert rng.microseconds == 0
assert rng.nanoseconds == 0
- pytest.raises(AttributeError, lambda: rng.hours)
- pytest.raises(AttributeError, lambda: rng.minutes)
- pytest.raises(AttributeError, lambda: rng.milliseconds)
+ msg = "'Timedelta' object has no attribute '{}'"
+ with pytest.raises(AttributeError, match=msg.format('hours')):
+ rng.hours
+ with pytest.raises(AttributeError, match=msg.format('minutes')):
+ rng.minutes
+ with pytest.raises(AttributeError, match=msg.format('milliseconds')):
+ rng.milliseconds
# GH 10050
check(rng.days)
@@ -272,9 +276,13 @@ def check(value):
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
- pytest.raises(AttributeError, lambda: rng.hours)
- pytest.raises(AttributeError, lambda: rng.minutes)
- pytest.raises(AttributeError, lambda: rng.milliseconds)
+ msg = "'Timedelta' object has no attribute '{}'"
+ with pytest.raises(AttributeError, match=msg.format('hours')):
+ rng.hours
+ with pytest.raises(AttributeError, match=msg.format('minutes')):
+ rng.minutes
+ with pytest.raises(AttributeError, match=msg.format('milliseconds')):
+ rng.milliseconds
# components
tup = pd.to_timedelta(-1, 'us').components
@@ -449,8 +457,12 @@ def test_round(self):
assert r2 == s2
# invalid
- for freq in ['Y', 'M', 'foobar']:
- pytest.raises(ValueError, lambda: t1.round(freq))
+ for freq, msg in [
+ ('Y', '<YearEnd: month=12> is a non-fixed frequency'),
+ ('M', '<MonthEnd> is a non-fixed frequency'),
+ ('foobar', 'Invalid frequency: foobar')]:
+ with pytest.raises(ValueError, match=msg):
+ t1.round(freq)
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
@@ -495,11 +507,15 @@ def test_round(self):
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
- tm.assert_index_equal(r2, s2)
+ tm.assert_index_equal(r2, s2)
# invalid
- for freq in ['Y', 'M', 'foobar']:
- pytest.raises(ValueError, lambda: t1.round(freq))
+ for freq, msg in [
+ ('Y', '<YearEnd: month=12> is a non-fixed frequency'),
+ ('M', '<MonthEnd> is a non-fixed frequency'),
+ ('foobar', 'Invalid frequency: foobar')]:
+ with pytest.raises(ValueError, match=msg):
+ t1.round(freq)
def test_contains(self):
# Checking for any NaT-like objects
@@ -609,9 +625,12 @@ def test_overflow(self):
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
- pytest.raises(ValueError, lambda: (s - s.min()).sum())
+ msg = "overflow in timedelta operation"
+ with pytest.raises(ValueError, match=msg):
+ (s - s.min()).sum()
s1 = s[0:10000]
- pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
+ with pytest.raises(ValueError, match=msg):
+ (s1 - s1.min()).sum()
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 7d81d905eac4f..b55d00b44fd67 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -60,7 +60,9 @@ def check(value, equal):
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
- pytest.raises(AttributeError, lambda: ts.millisecond)
+ msg = "'Timestamp' object has no attribute 'millisecond'"
+ with pytest.raises(AttributeError, match=msg):
+ ts.millisecond
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
@@ -78,7 +80,9 @@ def check(value, equal):
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
- pytest.raises(AttributeError, lambda: ts.millisecond)
+ msg = "'Timestamp' object has no attribute 'millisecond'"
+ with pytest.raises(AttributeError, match=msg):
+ ts.millisecond
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index bfb5103c97adc..b31738794c854 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -7,7 +7,7 @@
import pytest
from pandas._libs.sparse import BlockIndex, IntIndex
-from pandas.compat import lrange
+from pandas.compat import PY2, lrange
from pandas.errors import PerformanceWarning
import pandas as pd
@@ -145,8 +145,9 @@ def test_constructor_ndarray(self, float_frame):
tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A']))
# raise on level argument
- pytest.raises(TypeError, float_frame.reindex, columns=['A'],
- level=1)
+ msg = "Reindex by level not supported for sparse"
+ with pytest.raises(TypeError, match=msg):
+ float_frame.reindex(columns=['A'], level=1)
# wrong length index / columns
with pytest.raises(ValueError, match="^Index length"):
@@ -433,7 +434,8 @@ def test_getitem(self):
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
- pytest.raises(Exception, sdf.__getitem__, ['a', 'd'])
+ with pytest.raises(KeyError, match=r"\['d'\] not in index"):
+ sdf[['a', 'd']]
def test_iloc(self, float_frame):
@@ -504,7 +506,9 @@ def test_getitem_overload(self, float_frame):
subframe = float_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
- pytest.raises(Exception, float_frame.__getitem__, indexer[:-1])
+ msg = "Item wrong length 9 instead of 10"
+ with pytest.raises(ValueError, match=msg):
+ float_frame[indexer[:-1]]
def test_setitem(self, float_frame, float_frame_int_kind,
float_frame_dense,
@@ -551,8 +555,9 @@ def _check_frame(frame, orig):
assert len(frame['I'].sp_values) == N // 2
# insert ndarray wrong size
- pytest.raises(Exception, frame.__setitem__, 'foo',
- np.random.randn(N - 1))
+ msg = "Length of values does not match length of index"
+ with pytest.raises(AssertionError, match=msg):
+ frame['foo'] = np.random.randn(N - 1)
# scalar value
frame['J'] = 5
@@ -625,17 +630,22 @@ def test_delitem(self, float_frame):
def test_set_columns(self, float_frame):
float_frame.columns = float_frame.columns
- pytest.raises(Exception, setattr, float_frame, 'columns',
- float_frame.columns[:-1])
+ msg = ("Length mismatch: Expected axis has 4 elements, new values have"
+ " 3 elements")
+ with pytest.raises(ValueError, match=msg):
+ float_frame.columns = float_frame.columns[:-1]
def test_set_index(self, float_frame):
float_frame.index = float_frame.index
- pytest.raises(Exception, setattr, float_frame, 'index',
- float_frame.index[:-1])
+ msg = ("Length mismatch: Expected axis has 10 elements, new values"
+ " have 9 elements")
+ with pytest.raises(ValueError, match=msg):
+ float_frame.index = float_frame.index[:-1]
def test_ctor_reindex(self):
idx = pd.Index([0, 1, 2, 3])
- with pytest.raises(ValueError, match=''):
+ msg = "Length of passed values is 2, index implies 4"
+ with pytest.raises(ValueError, match=msg):
pd.SparseDataFrame({"A": [1, 2]}, index=idx)
def test_append(self, float_frame):
@@ -858,6 +868,7 @@ def test_describe(self, float_frame):
str(float_frame)
desc = float_frame.describe() # noqa
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_join(self, float_frame):
left = float_frame.loc[:, ['A', 'B']]
right = float_frame.loc[:, ['C', 'D']]
@@ -865,7 +876,10 @@ def test_join(self, float_frame):
tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)
right = float_frame.loc[:, ['B', 'D']]
- pytest.raises(Exception, left.join, right)
+ msg = (r"columns overlap but no suffix specified: Index\(\['B'\],"
+ r" dtype='object'\)")
+ with pytest.raises(ValueError, match=msg):
+ left.join(right)
with pytest.raises(ValueError, match='Other Series must have a name'):
float_frame.join(Series(
@@ -1046,8 +1060,11 @@ def _check(frame):
_check(float_frame_int_kind)
# for now
- pytest.raises(Exception, _check, float_frame_fill0)
- pytest.raises(Exception, _check, float_frame_fill2)
+ msg = "This routine assumes NaN fill value"
+ with pytest.raises(TypeError, match=msg):
+ _check(float_frame_fill0)
+ with pytest.raises(TypeError, match=msg):
+ _check(float_frame_fill2)
def test_transpose(self, float_frame, float_frame_int_kind,
float_frame_dense,
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 7eed47d0de888..93cf629f20957 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -452,12 +452,13 @@ def _check_getitem(sp, dense):
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
- pytest.raises(Exception, self.bseries.__getitem__,
- len(self.bseries) + 1)
+ with pytest.raises(IndexError, match="Out of bounds access"):
+ self.bseries[len(self.bseries) + 1]
# index not contained
- pytest.raises(Exception, self.btseries.__getitem__,
- self.btseries.index[-1] + BDay())
+ msg = r"Timestamp\('2011-01-31 00:00:00', freq='B'\)"
+ with pytest.raises(KeyError, match=msg):
+ self.btseries[self.btseries.index[-1] + BDay()]
def test_get_get_value(self):
tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
@@ -523,8 +524,9 @@ def _compare(idx):
self._check_all(_compare_with_dense)
- pytest.raises(Exception, self.bseries.take,
- [0, len(self.bseries) + 1])
+ msg = "index 21 is out of bounds for size 20"
+ with pytest.raises(IndexError, match=msg):
+ self.bseries.take([0, len(self.bseries) + 1])
# Corner case
# XXX: changed test. Why wsa this considered a corner case?
@@ -1138,25 +1140,35 @@ def test_to_coo_text_names_text_row_levels_nosort(self):
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
- pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
+ msg = "Is not a partition because intersection is not null"
+ with pytest.raises(ValueError, match=msg):
+ ss.to_coo(['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
- pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
+ msg = "Is not a partition because union is not the whole"
+ with pytest.raises(ValueError, match=msg):
+ ss.to_coo(['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
- pytest.raises(ValueError, ss.to_coo)
+ msg = "to_coo requires MultiIndex with nlevels > 2"
+ with pytest.raises(ValueError, match=msg):
+ ss.to_coo()
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
- pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
+ with pytest.raises(KeyError, match="Level E not found"):
+ ss.to_coo(['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat([self.sparse_series[0],
self.sparse_series[0]]).to_sparse()
- pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
+ msg = ("Duplicate index entries are not allowed in to_coo"
+ " transformation")
+ with pytest.raises(ValueError, match=msg):
+ ss.to_coo(['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 621572da57541..e6f21a7b47c3b 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -9,6 +9,7 @@
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG, get_freq_code, get_freq_str)
import pandas._libs.tslibs.offsets as liboffsets
+from pandas._libs.tslibs.offsets import ApplyTypeError
import pandas.compat as compat
from pandas.compat import range
from pandas.compat.numpy import np_datetime64_compat
@@ -150,7 +151,8 @@ def test_sub(self):
# offset2 attr
return
off = self.offset2
- with pytest.raises(Exception):
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
off - self.d
assert 2 * off - off == off
@@ -736,7 +738,10 @@ def test_apply_large_n(self):
assert rs == xp
def test_apply_corner(self):
- pytest.raises(TypeError, BDay().apply, BMonthEnd())
+ msg = ("Only know how to combine business day with datetime or"
+ " timedelta")
+ with pytest.raises(ApplyTypeError, match=msg):
+ BDay().apply(BMonthEnd())
class TestBusinessHour(Base):
@@ -812,7 +817,8 @@ def test_sub(self):
# we have to override test_sub here becasue self.offset2 is not
# defined as self._offset(2)
off = self.offset2
- with pytest.raises(Exception):
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
off - self.d
assert 2 * off - off == off
@@ -1796,7 +1802,10 @@ def test_apply_large_n(self):
assert rs == xp
def test_apply_corner(self):
- pytest.raises(Exception, CDay().apply, BMonthEnd())
+ msg = ("Only know how to combine trading day with datetime, datetime64"
+ " or timedelta")
+ with pytest.raises(ApplyTypeError, match=msg):
+ CDay().apply(BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 8023ee3139dd5..9ee03d2e886f3 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -713,7 +713,8 @@ class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
- pytest.raises(ValueError, YearBegin, month=13)
+ with pytest.raises(ValueError, match="Month must go from 1 to 12"):
+ YearBegin(month=13)
offset_cases = []
offset_cases.append((YearBegin(), {
@@ -804,7 +805,8 @@ class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
- pytest.raises(ValueError, YearEnd, month=13)
+ with pytest.raises(ValueError, match="Month must go from 1 to 12"):
+ YearEnd(month=13)
offset_cases = []
offset_cases.append((YearEnd(), {
@@ -900,8 +902,11 @@ class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
- pytest.raises(ValueError, BYearBegin, month=13)
- pytest.raises(ValueError, BYearEnd, month=13)
+ msg = "Month must go from 1 to 12"
+ with pytest.raises(ValueError, match=msg):
+ BYearBegin(month=13)
+ with pytest.raises(ValueError, match=msg):
+ BYearEnd(month=13)
offset_cases = []
offset_cases.append((BYearBegin(), {
@@ -993,8 +998,11 @@ class TestBYearEndLagged(Base):
_offset = BYearEnd
def test_bad_month_fail(self):
- pytest.raises(Exception, BYearEnd, month=13)
- pytest.raises(Exception, BYearEnd, month=0)
+ msg = "Month must go from 1 to 12"
+ with pytest.raises(ValueError, match=msg):
+ BYearEnd(month=13)
+ with pytest.raises(ValueError, match=msg):
+ BYearEnd(month=0)
offset_cases = []
offset_cases.append((BYearEnd(month=6), {
| xref #24332
| https://api.github.com/repos/pandas-dev/pandas/pulls/25483 | 2019-02-28T20:01:17Z | 2019-03-03T01:46:08Z | 2019-03-03T01:46:08Z | 2019-03-05T12:24:07Z |
Correct documentation on make.py --single parameter | diff --git a/doc/make.py b/doc/make.py
index 438c4a04a3f08..8b2a77987e663 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -294,14 +294,16 @@ def main():
help='number of jobs used by sphinx-build')
argparser.add_argument('--no-api',
default=False,
- help='ommit api and autosummary',
+ help='omit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=None,
- help=('filename of section or method name to '
- 'compile, e.g. "indexing", "DataFrame.join"'))
+ help=('filename (relative to the "source" folder)'
+ ' of section or method name to compile, e.g. '
+ '"development/contributing.rst",'
+ ' "ecosystem.rst", "pandas.DataFrame.join"'))
argparser.add_argument('--python-path',
type=str,
default=os.path.dirname(DOC_PATH),
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 027f2d90bbb73..a87a66cd08ad1 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -428,10 +428,10 @@ reducing the turn-around time for checking your changes.
python make.py clean
python make.py --no-api
- # compile the docs with only a single
- # section, that which is in indexing.rst
+ # compile the docs with only a single section, relative to the "source" folder.
+ # For example, compiling only this guide (docs/source/development/contributing.rst)
python make.py clean
- python make.py --single indexing
+ python make.py --single development/contributing.rst
# compile the reference docs for a single function
python make.py clean
| - [X] closes #25480
- [X] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25482 | 2019-02-28T19:47:48Z | 2019-03-01T12:57:50Z | 2019-03-01T12:57:50Z | 2019-03-04T08:42:58Z |
DOC: Fix documentation build command in contribution guide. | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 1270bfec098e8..027f2d90bbb73 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -435,7 +435,7 @@ reducing the turn-around time for checking your changes.
# compile the reference docs for a single function
python make.py clean
- python make.py --single DataFrame.join
+ python make.py --single pandas.DataFrame.join
For comparison, a full documentation build may take 15 minutes, but a single
section may take 15 seconds. Subsequent builds, which only process portions
| - [X] closes #25478
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25479 | 2019-02-28T14:29:07Z | 2019-02-28T15:17:30Z | 2019-02-28T15:17:29Z | 2019-02-28T15:17:33Z |
Backport PR #25266 on branch 0.24.x (BUG: Fix regression on DataFrame.replace for regex) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 8f4beb3f484a4..4fcde7769b362 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -23,6 +23,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.all` and :meth:`DataFrame.any` where ``bool_only=True`` was ignored (:issue:`25101`)
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
+- Fixed regression in :meth:`DataFrame.replace` where ``regex=True`` was only replacing patterns matching the start of the string (:issue:`25259`)
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 050c3d3e87fc6..5725b80990239 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -552,9 +552,9 @@ def comp(s, regex=False):
if isna(s):
return isna(values)
if hasattr(s, 'asm8'):
- return _compare_or_regex_match(maybe_convert_objects(values),
- getattr(s, 'asm8'), regex)
- return _compare_or_regex_match(values, s, regex)
+ return _compare_or_regex_search(maybe_convert_objects(values),
+ getattr(s, 'asm8'), regex)
+ return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
@@ -1901,11 +1901,11 @@ def _consolidate(blocks):
return new_blocks
-def _compare_or_regex_match(a, b, regex=False):
+def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
- Calls operator.eq or re.match, depending on regex argument. If regex is
+ Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
@@ -1921,7 +1921,7 @@ def _compare_or_regex_match(a, b, regex=False):
if not regex:
op = lambda x: operator.eq(x, b)
else:
- op = np.vectorize(lambda x: bool(re.match(b, x)) if isinstance(x, str)
+ op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str)
else False)
is_a_array = isinstance(a, np.ndarray)
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 219f7a1585fc2..127a64da38ba3 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -466,6 +466,13 @@ def test_regex_replace_dict_nested(self):
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
+ def test_regex_replace_dict_nested_non_first_character(self):
+ # GH 25259
+ df = pd.DataFrame({'first': ['abc', 'bca', 'cab']})
+ expected = pd.DataFrame({'first': ['.bc', 'bc.', 'c.b']})
+ result = df.replace({'a': '.'}, regex=True)
+ assert_frame_equal(result, expected)
+
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
| Backport PR #25266: BUG: Fix regression on DataFrame.replace for regex | https://api.github.com/repos/pandas-dev/pandas/pulls/25477 | 2019-02-28T13:55:59Z | 2019-02-28T14:42:30Z | 2019-02-28T14:42:30Z | 2019-02-28T14:43:49Z |
DataFrame.drop Raises KeyError definition | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 608e5c53ec094..a40733b7076b0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3797,7 +3797,12 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
- index, columns : single label or list-like
+ index : single label or list-like
+ Alternative to specifying axis (``labels, axis=0``
+ is equivalent to ``index=labels``).
+
+ .. versionadded:: 0.21.0
+ columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
@@ -3813,11 +3818,12 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
Returns
-------
DataFrame
+ DataFrame without the removed index or column labels.
Raises
------
KeyError
- If none of the labels are found in the selected axis
+ If any of the labels is not found in the selected axis.
See Also
--------
@@ -3830,7 +3836,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
Examples
--------
- >>> df = pd.DataFrame(np.arange(12).reshape(3,4),
+ >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
@@ -3867,7 +3873,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
- ... [1, 0.8], [0.3,0.2]])
+ ... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
| - [X] closes #25473
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25474 | 2019-02-28T11:41:56Z | 2019-02-28T12:41:08Z | 2019-02-28T12:41:08Z | 2019-02-28T14:11:26Z |
BUG: Keep column level name in resample nunique | diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst
index 6ed85ff2fac43..c7f9113b53c22 100644
--- a/doc/source/reference/groupby.rst
+++ b/doc/source/reference/groupby.rst
@@ -99,6 +99,7 @@ application to columns of a specific data type.
DataFrameGroupBy.idxmax
DataFrameGroupBy.idxmin
DataFrameGroupBy.mad
+ DataFrameGroupBy.nunique
DataFrameGroupBy.pct_change
DataFrameGroupBy.plot
DataFrameGroupBy.quantile
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..ee16246a1421d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -210,6 +210,7 @@ Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`)
+- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`)
-
-
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 52056a6842ed9..683c21f7bd47a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1579,6 +1579,7 @@ def groupby_series(obj, col=None):
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
+ results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index a884a37840f8a..1788b29a11082 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -897,6 +897,15 @@ def test_nunique_with_timegrouper():
tm.assert_series_equal(result, expected)
+def test_nunique_preserves_column_level_names():
+ # GH 23222
+ test = pd.DataFrame([1, 2, 2],
+ columns=pd.Index(['A'], name="level_0"))
+ result = test.groupby([0, 0, 0]).nunique()
+ expected = pd.DataFrame([2], columns=test.columns)
+ tm.assert_frame_equal(result, expected)
+
+
# count
# --------------------------------
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 71b100401ec21..ce675893d9907 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1135,6 +1135,15 @@ def test_resample_nunique():
assert_series_equal(result, expected)
+def test_resample_nunique_preserves_column_level_names():
+ # see gh-23222
+ df = tm.makeTimeDataFrame(freq="1D").abs()
+ df.columns = pd.MultiIndex.from_arrays([df.columns.tolist()] * 2,
+ names=["lev0", "lev1"])
+ result = df.resample("1h").nunique()
+ tm.assert_index_equal(df.columns, result.columns)
+
+
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
| Closes #23222
xref #23645 | https://api.github.com/repos/pandas-dev/pandas/pulls/25469 | 2019-02-28T04:25:05Z | 2019-02-28T12:42:55Z | 2019-02-28T12:42:55Z | 2019-02-28T18:17:16Z |
ERR: Correct error message in to_datetime | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..f8b57f668c44d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -104,7 +104,8 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
-
+- Bug in :func:`to_datetime` which would raise an (incorrect) ``ValueError`` when called with a date far into the future and the ``format`` argument specified instead of raising ``OutOfBoundsDatetime`` (:issue:`23830`)
+-
-
Categorical
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index f932e236b5218..624872c1c56c6 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -670,9 +670,11 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise',
# dateutil parser will return incorrect result because
# it will ignore nanoseconds
if is_raise:
- raise ValueError("time data {val} doesn't "
- "match format specified"
- .format(val=val))
+
+ # Still raise OutOfBoundsDatetime,
+ # as error message is informative.
+ raise
+
assert is_ignore
return values, tz_out
raise
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index b94935d2521eb..dd914d8a79837 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1868,6 +1868,15 @@ def test_invalid_origins_tzinfo(self):
pd.to_datetime(1, unit='D',
origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
+ @pytest.mark.parametrize("format", [
+ None, "%Y-%m-%d %H:%M:%S"
+ ])
+ def test_to_datetime_out_of_bounds_with_format_arg(self, format):
+ # see gh-23830
+ msg = "Out of bounds nanosecond timestamp"
+ with pytest.raises(OutOfBoundsDatetime, match=msg):
+ to_datetime("2417-10-27 00:00:00", format=format)
+
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
| Closes #23830
xref #23969 | https://api.github.com/repos/pandas-dev/pandas/pulls/25467 | 2019-02-28T04:02:31Z | 2019-02-28T12:44:44Z | 2019-02-28T12:44:44Z | 2019-02-28T18:17:17Z |
DOC: Fix encoding of docstring validation for Windows | diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 14172a790887d..34395435bd8c5 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -1052,6 +1052,14 @@ def test_raises_for_invalid_attribute_name(self, invalid_name):
with pytest.raises(AttributeError, match=msg):
validate_docstrings.Docstring(invalid_name)
+ @pytest.mark.parametrize('name', ['pandas.Series.str.isdecimal',
+ 'pandas.Series.str.islower'])
+ def test_encode_content_write_to_file(self, name):
+ # GH25466
+ docstr = validate_docstrings.Docstring(name).validate_pep8()
+ # the list of pep8 errors should be empty
+ assert not list(docstr)
+
class TestMainFunction:
def test_exit_status_for_validate_one(self, monkeypatch):
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index ebb09e8f311ee..63db50db45a7c 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -583,7 +583,7 @@ def validate_pep8(self):
application = flake8.main.application.Application()
application.initialize(["--quiet"])
- with tempfile.NamedTemporaryFile(mode='w') as file:
+ with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as file:
file.write(content)
file.flush()
application.run_checks([file.name])
| In Windows, the `validate_docstrings.py` script fails because an encoding error. It has been fixed here.
PR done in the London python sprints meetup.
CC: @datapythonista | https://api.github.com/repos/pandas-dev/pandas/pulls/25466 | 2019-02-27T21:04:20Z | 2019-05-07T01:26:51Z | 2019-05-07T01:26:51Z | 2019-05-07T04:19:07Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.