title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST: Fix broken test cases where Timedelta/Timestamp raise | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 08176af2b326d..5801384bf8db9 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -350,14 +350,21 @@ def maybe_promote(dtype, fill_value=np.nan):
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
- fill_value = tslibs.Timestamp(fill_value).to_datetime64()
+ try:
+ fill_value = tslibs.Timestamp(fill_value).to_datetime64()
+ except (TypeError, ValueError):
+ dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
- fv = tslibs.Timedelta(fill_value)
- if fv is NaT:
- # NaT has no `to_timedelta6` method
- fill_value = np.timedelta64("NaT", "ns")
+ try:
+ fv = tslibs.Timedelta(fill_value)
+ except ValueError:
+ dtype = np.dtype(np.object_)
else:
- fill_value = fv.to_timedelta64()
+ if fv is NaT:
+ # NaT has no `to_timedelta64` method
+ fill_value = np.timedelta64("NaT", "ns")
+ else:
+ fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index cf7a168074e9e..1b7de9b20f42f 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -272,10 +272,6 @@ def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box):
pytest.xfail("falsely upcasts to object")
if boxed and dtype not in (str, object) and box_dtype is None:
pytest.xfail("falsely upcasts to object")
- if not boxed and dtype.kind == "M":
- pytest.xfail("raises error")
- if not boxed and dtype.kind == "m":
- pytest.xfail("raises error")
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
@@ -348,8 +344,6 @@ def test_maybe_promote_any_with_datetime64(
or (box_dtype is None and is_datetime64_dtype(type(fill_value)))
):
pytest.xfail("mix of lack of upcasting, resp. wrong missing value")
- if not boxed and is_timedelta64_dtype(dtype):
- pytest.xfail("raises error")
# special case for box_dtype
box_dtype = np.dtype(datetime64_dtype) if box_dtype == "dt_dtype" else box_dtype
@@ -490,9 +484,7 @@ def test_maybe_promote_any_numpy_dtype_with_datetimetz(
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
boxed, box_dtype = box # read from parametrized fixture
- if dtype.kind == "m" and not boxed:
- pytest.xfail("raises error")
- elif dtype.kind == "M" and not boxed:
+ if dtype.kind == "M" and not boxed:
pytest.xfail("Comes back as M8 instead of object")
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
@@ -549,8 +541,6 @@ def test_maybe_promote_any_with_timedelta64(
else:
if boxed and box_dtype is None and is_timedelta64_dtype(type(fill_value)):
pytest.xfail("does not upcast correctly")
- if not boxed and is_datetime64_dtype(dtype):
- pytest.xfail("raises error")
# special case for box_dtype
box_dtype = np.dtype(timedelta64_dtype) if box_dtype == "td_dtype" else box_dtype
@@ -622,9 +612,6 @@ def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype, bo
fill_dtype = np.dtype(string_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if is_datetime_or_timedelta_dtype(dtype) and box_dtype != object:
- pytest.xfail("does not upcast or raises")
-
# create array of given dtype
fill_value = "abc"
@@ -678,9 +665,6 @@ def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype, bo
dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if not boxed and is_datetime_or_timedelta_dtype(dtype):
- pytest.xfail("raises error")
-
# create array of object dtype from a scalar value (i.e. passing
# dtypes.common.is_scalar), which can however not be cast to int/float etc.
fill_value = pd.DateOffset(1)
| Orthogonal to #28725, but will need to be rebased after that is merged. | https://api.github.com/repos/pandas-dev/pandas/pulls/28729 | 2019-10-01T15:30:13Z | 2019-10-02T11:55:36Z | 2019-10-02T11:55:36Z | 2019-10-02T13:27:34Z |
DOC: fix pr09,pr08 errors in frame.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9467978f13d30..0cfeb61871a85 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -304,7 +304,7 @@ class DataFrame(NDFrame):
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
- Dict can contain Series, arrays, constants, or list-like objects
+ Dict can contain Series, arrays, constants, or list-like objects.
.. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
@@ -316,14 +316,14 @@ class DataFrame(NDFrame):
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
- no indexing information part of input data and no index provided
+ no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
- RangeIndex (0, 1, 2, ..., n) if no column labels are provided
+ RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
- Data type to force. Only a single dtype is allowed. If None, infer
+ Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
- Copy data from inputs. Only affects DataFrame / 2d ndarray input
+ Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
@@ -1544,20 +1544,20 @@ def from_records(
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
- input labels to use
+ input labels to use.
exclude : sequence, default None
- Columns or fields to exclude
+ Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
- columns)
+ columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
- decimal.Decimal) to floating point, useful for SQL result sets
+ decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
- Number of rows to read if data is an iterator
+ Number of rows to read if data is an iterator.
Returns
-------
@@ -2118,8 +2118,8 @@ def to_parquet(
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
- Column names by which to partition the dataset
- Columns are partitioned in the order they are given
+ Column names by which to partition the dataset.
+ Columns are partitioned in the order they are given.
.. versionadded:: 0.24.0
@@ -3460,9 +3460,9 @@ def insert(self, loc, column, value, allow_duplicates=False):
Parameters
----------
loc : int
- Insertion index. Must verify 0 <= loc <= len(columns)
+ Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
- label of the inserted column
+ Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
@@ -3681,9 +3681,9 @@ def lookup(self, row_labels, col_labels):
Parameters
----------
row_labels : sequence
- The row labels to use for lookup
+ The row labels to use for lookup.
col_labels : sequence
- The column labels to use for lookup
+ The column labels to use for lookup.
Returns
-------
@@ -4770,14 +4770,14 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
- default use all of the columns
+ default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
- Whether to drop duplicates in place or to return a copy
+ Whether to drop duplicates in place or to return a copy.
Returns
-------
@@ -4805,7 +4805,7 @@ def duplicated(self, subset=None, keep="first"):
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
- default use all of the columns
+ default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
@@ -6233,9 +6233,9 @@ def unstack(self, level=-1, fill_value=None):
Parameters
----------
level : int, str, or list of these, default -1 (last level)
- Level(s) of index to unstack, can pass level name
+ Level(s) of index to unstack, can pass level name.
fill_value : int, string or dict
- Replace NaN with this value if the unstack produces missing values
+ Replace NaN with this value if the unstack produces missing values.
Returns
-------
@@ -7368,7 +7368,8 @@ def corr(self, method="pearson", min_periods=1):
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
- regardless of the callable's behavior
+ regardless of the callable's behavior.
+
.. versionadded:: 0.24.0
min_periods : int, optional
@@ -7572,7 +7573,7 @@ def corrwith(self, other, axis=0, drop=False, method="pearson"):
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
- and returning a float
+ and returning a float.
.. versionadded:: 0.24.0
@@ -8037,7 +8038,8 @@ def mode(self, axis=0, numeric_only=False, dropna=True):
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
- * 1 or 'columns' : get mode of each row
+ * 1 or 'columns' : get mode of each row.
+
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
| fixing methods defined in this file
this is part of a larger clean up of
the dataframe class
this fixes a subset of #28702
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28728 | 2019-10-01T15:22:44Z | 2019-10-01T18:46:32Z | 2019-10-01T18:46:32Z | 2019-10-01T18:46:38Z |
Added doctstring to fixture | diff --git a/pandas/tests/io/excel/conftest.py b/pandas/tests/io/excel/conftest.py
index 54acd2128369d..843b3c08421b3 100644
--- a/pandas/tests/io/excel/conftest.py
+++ b/pandas/tests/io/excel/conftest.py
@@ -7,6 +7,9 @@
@pytest.fixture
def frame(float_frame):
+ """
+ Returns the first ten items in fixture "float_frame".
+ """
return float_frame[:10]
| - xref https://github.com/pandas-dev/pandas/pull/26579#pullrequestreview-245987920
- 0 tests added
- passes `black pandas`
- passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28727 | 2019-10-01T15:17:33Z | 2019-10-02T10:04:31Z | 2019-10-02T10:04:31Z | 2019-10-02T10:09:38Z |
Change maybe_promote fill_value to dt64/td64 NaT instead of iNaT | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index a3ad84ff89a66..08176af2b326d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -339,7 +339,7 @@ def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
- fill_value = iNaT
+ fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
@@ -350,9 +350,14 @@ def maybe_promote(dtype, fill_value=np.nan):
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
- fill_value = tslibs.Timestamp(fill_value).value
+ fill_value = tslibs.Timestamp(fill_value).to_datetime64()
elif issubclass(dtype.type, np.timedelta64):
- fill_value = tslibs.Timedelta(fill_value).value
+ fv = tslibs.Timedelta(fill_value)
+ if fv is NaT:
+ # NaT has no `to_timedelta6` method
+ fill_value = np.timedelta64("NaT", "ns")
+ else:
+ fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
@@ -393,7 +398,7 @@ def maybe_promote(dtype, fill_value=np.nan):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
- fill_value = iNaT
+ fill_value = dtype.type("NaT", "ns")
else:
dtype = np.object_
fill_value = np.nan
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 211c550100018..cf7a168074e9e 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -22,7 +22,7 @@
is_string_dtype,
is_timedelta64_dtype,
)
-from pandas.core.dtypes.dtypes import DatetimeTZDtype, PandasExtensionDtype
+from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
import pandas as pd
@@ -92,20 +92,6 @@ def box(request):
return request.param
-def _safe_dtype_assert(left_dtype, right_dtype):
- """
- Compare two dtypes without raising TypeError.
- """
- __tracebackhide__ = True
- if isinstance(right_dtype, PandasExtensionDtype):
- # switch order of equality check because numpy dtypes (e.g. if
- # left_dtype is np.object_) do not know some expected dtypes (e.g.
- # DatetimeTZDtype) and would raise a TypeError in their __eq__-method.
- assert right_dtype == left_dtype
- else:
- assert left_dtype == right_dtype
-
-
def _check_promote(
dtype,
fill_value,
@@ -157,8 +143,11 @@ def _check_promote(
result_dtype, result_fill_value = maybe_promote(dtype, fill_value)
expected_fill_value = exp_val_for_scalar
- _safe_dtype_assert(result_dtype, expected_dtype)
+ assert result_dtype == expected_dtype
+ _assert_match(result_fill_value, expected_fill_value)
+
+def _assert_match(result_fill_value, expected_fill_value):
# GH#23982/25425 require the same type in addition to equality/NA-ness
res_type = type(result_fill_value)
ex_type = type(expected_fill_value)
@@ -369,8 +358,8 @@ def test_maybe_promote_any_with_datetime64(
if is_datetime64_dtype(dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to pd.Timestamp.value
- exp_val_for_scalar = pd.Timestamp(fill_value).value
- exp_val_for_array = iNaT
+ exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
+ exp_val_for_array = np.datetime64("NaT", "ns")
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
@@ -454,9 +443,7 @@ def test_maybe_promote_datetimetz_with_datetimetz(
)
-@pytest.mark.parametrize(
- "fill_value", [None, np.nan, NaT, iNaT], ids=["None", "np.nan", "pd.NaT", "iNaT"]
-)
+@pytest.mark.parametrize("fill_value", [None, np.nan, NaT, iNaT])
# override parametrization due to to many xfails; see GH 23982 / 25425
@pytest.mark.parametrize("box", [(False, None)])
def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value, box):
@@ -572,8 +559,8 @@ def test_maybe_promote_any_with_timedelta64(
if is_timedelta64_dtype(dtype):
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
- exp_val_for_scalar = pd.Timedelta(fill_value).value
- exp_val_for_array = iNaT
+ exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
+ exp_val_for_array = np.timedelta64("NaT", "ns")
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
@@ -714,9 +701,7 @@ def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype, bo
)
-@pytest.mark.parametrize(
- "fill_value", [None, np.nan, NaT, iNaT], ids=["None", "np.nan", "pd.NaT", "iNaT"]
-)
+@pytest.mark.parametrize("fill_value", [None, np.nan, NaT, iNaT])
# override parametrization due to to many xfails; see GH 23982 / 25425
@pytest.mark.parametrize("box", [(False, None)])
def test_maybe_promote_any_numpy_dtype_with_na(
@@ -764,7 +749,7 @@ def test_maybe_promote_any_numpy_dtype_with_na(
elif is_datetime_or_timedelta_dtype(dtype):
# datetime / timedelta cast all missing values to iNaT
expected_dtype = dtype
- exp_val_for_scalar = iNaT
+ exp_val_for_scalar = dtype.type("NaT", "ns")
elif fill_value is NaT:
# NaT upcasts everything that's not datetime/timedelta to object
expected_dtype = np.dtype(object)
@@ -783,7 +768,7 @@ def test_maybe_promote_any_numpy_dtype_with_na(
# integers cannot hold NaNs; maybe_promote_with_array returns None
exp_val_for_array = None
elif is_datetime_or_timedelta_dtype(expected_dtype):
- exp_val_for_array = iNaT
+ exp_val_for_array = expected_dtype.type("NaT", "ns")
else: # expected_dtype = float / complex / object
exp_val_for_array = np.nan
@@ -817,7 +802,4 @@ def test_maybe_promote_dimensions(any_numpy_dtype_reduced, dim):
result_dtype, result_missing_value = maybe_promote(dtype, fill_array)
assert result_dtype == expected_dtype
- # None == None, iNaT == iNaT, but np.nan != np.nan
- assert (result_missing_value == expected_missing_value) or (
- result_missing_value is np.nan and expected_missing_value is np.nan
- )
+ _assert_match(result_missing_value, expected_missing_value)
| Unambiguous, preliminary for fixing more xfails. | https://api.github.com/repos/pandas-dev/pandas/pulls/28725 | 2019-10-01T14:51:15Z | 2019-10-01T16:59:10Z | 2019-10-01T16:59:10Z | 2019-10-01T17:02:45Z |
DOC: Fixed PR06 docstrings errors in pandas.ExcelWriter | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 6dba5e042562b..039a0560af627 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -532,15 +532,15 @@ class ExcelWriter(metaclass=abc.ABCMeta):
Parameters
----------
- path : string
+ path : str
Path to xls or xlsx file.
- engine : string (optional)
+ engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
- date_format : string, default None
+ date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
- datetime_format : string, default None
+ datetime_format : str, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w', 'a'}, default 'w'
@@ -658,11 +658,11 @@ def write_cells(
----------
cells : generator
cell of formatted data to save to Excel sheet
- sheet_name : string, default None
+ sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
- freeze_panes: integer tuple of length 2
+ freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@@ -782,10 +782,10 @@ class ExcelFile:
Parameters
----------
- io : string, path object (pathlib.Path or py._path.local.LocalPath),
+ io : str, path object (pathlib.Path or py._path.local.LocalPath),
a file-like object, xlrd workbook or openpypl workbook.
If a string or path object, expected to be a path to xls, xlsx or odf file.
- engine : string, default None
+ engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None, ``xlrd``, ``openpyxl`` or ``odf``.
Note that ``odf`` reads tables out of OpenDocument formatted files.
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28723 | 2019-10-01T14:21:52Z | 2019-10-01T16:33:25Z | 2019-10-01T16:33:25Z | 2019-10-01T16:33:40Z |
API: Change matplotlib formatter registration | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 0b9aae6676710..f4761c5663c9f 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -208,7 +208,7 @@ import sys
import pandas
blacklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis',
- 'lxml', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
+ 'lxml', 'matplotlib', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
'tables', 'urllib.request', 'xlrd', 'xlsxwriter', 'xlwt'}
# GH#28227 for some of these check for top-level modules, while others are
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index fa16b2f216610..342e87289e993 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -1190,6 +1190,21 @@ with "(right)" in the legend. To turn off the automatic marking, use the
plt.close('all')
+.. _plotting.formatters:
+
+Custom formatters for timeseries plots
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionchanged:: 1.0.0
+
+Pandas provides custom formatters for timeseries plots. These change the
+formatting of the axis labels for dates and times. By default,
+the custom formatters are applied only to plots created by pandas with
+:meth:`DataFrame.plot` or :meth:`Series.plot`. To have them apply to all
+plots, including those made by matplotlib, set the option
+``pd.options.plotting.matplotlib.register_converters = True`` or use
+:meth:`pandas.plotting.register_matplotlib_converters`.
+
Suppressing tick resolution adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 48c1173a372a7..9262e8c325be3 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -174,7 +174,6 @@ Backwards incompatible API changes
pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)])
-
.. _whatsnew_1000.api.other:
Other API changes
@@ -186,8 +185,13 @@ Other API changes
- In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
To see which attributes are excluded, see an object's ``_deprecations`` attribute, for example ``pd.DataFrame._deprecations`` (:issue:`28805`).
- The returned dtype of ::func:`pd.unique` now matches the input dtype. (:issue:`27874`)
+- Changed the default configuration value for ``options.matplotlib.register_converters`` from ``True`` to ``"auto"`` (:issue:`18720`).
+ Now, pandas custom formatters will only be applied to plots created by pandas, through :meth:`~DataFrame.plot`.
+ Previously, pandas' formatters would be applied to all plots created *after* a :meth:`~DataFrame.plot`.
+ See :ref:`units registration <whatsnew_1000.matplotlib_units>` for more.
-
+
.. _whatsnew_1000.api.documentation:
Documentation Improvements
@@ -220,6 +224,27 @@ with migrating existing code.
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. _whatsnew_1000.matplotlib_units:
+
+**Matplotlib unit registration**
+
+Previously, pandas would register converters with matplotlib as a side effect of importing pandas (:issue:`18720`).
+This changed the output of plots made via matplotlib plots after pandas was imported, even if you were using
+matplotlib directly rather than rather than :meth:`~DataFrame.plot`.
+
+To use pandas formatters with a matplotlib plot, specify
+
+.. code-block:: python
+
+ >>> import pandas as pd
+ >>> pd.options.plotting.matplotlib.register_converters = True
+
+Note that plots created by :meth:`DataFrame.plot` and :meth:`Series.plot` *do* register the converters
+automatically. The only behavior change is when plotting a date-like object via ``matplotlib.pyplot.plot``
+or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
+
+**Other removals**
+
- Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`)
- Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`)
- :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`)
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index bc2eb3511629d..ba0a4d81a88d3 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -599,7 +599,7 @@ def register_plotting_backend_cb(key):
register_converter_doc = """
-: bool
+: bool or 'auto'.
Whether to register converters with matplotlib's units registry for
dates, times, datetimes, and Periods. Toggling to False will remove
the converters, restoring any converters that pandas overwrote.
@@ -619,8 +619,8 @@ def register_converter_cb(key):
with cf.config_prefix("plotting.matplotlib"):
cf.register_option(
"register_converters",
- True,
+ "auto",
register_converter_doc,
- validator=bool,
+ validator=is_one_of_factory(["auto", True, False]),
cb=register_converter_cb,
)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index eaf5b336bb8f6..127fdffafcf36 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -3,7 +3,6 @@
from pandas._config import get_option
-from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_integer, is_list_like
@@ -11,11 +10,6 @@
from pandas.core.base import PandasObject
-# Trigger matplotlib import, which implicitly registers our
-# converts. Implicit registration is deprecated, and when enforced
-# we can lazily import matplotlib.
-import_optional_dependency("pandas.plotting._matplotlib", raise_on_missing=False)
-
def hist_series(
self,
diff --git a/pandas/plotting/_matplotlib/__init__.py b/pandas/plotting/_matplotlib/__init__.py
index d3b7a34b6c923..206600ad37acc 100644
--- a/pandas/plotting/_matplotlib/__init__.py
+++ b/pandas/plotting/_matplotlib/__init__.py
@@ -1,5 +1,3 @@
-from pandas._config import get_option
-
from pandas.plotting._matplotlib.boxplot import (
BoxPlot,
boxplot,
@@ -42,9 +40,6 @@
"hexbin": HexBinPlot,
}
-if get_option("plotting.matplotlib.register_converters"):
- register(explicit=False)
-
def plot(data, kind, **kwargs):
# Importing pyplot at the top of the file (before the converters are
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py
index eed328131da92..cfd6c3519d82c 100644
--- a/pandas/plotting/_matplotlib/boxplot.py
+++ b/pandas/plotting/_matplotlib/boxplot.py
@@ -11,7 +11,6 @@
import pandas as pd
from pandas.io.formats.printing import pprint_thing
-from pandas.plotting._matplotlib import converter
from pandas.plotting._matplotlib.core import LinePlot, MPLPlot
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import _flatten, _subplots
@@ -364,7 +363,6 @@ def boxplot_frame(
):
import matplotlib.pyplot as plt
- converter._WARN = False # no warning for pandas plots
ax = boxplot(
self,
column=column,
@@ -396,7 +394,6 @@ def boxplot_frame_groupby(
sharey=True,
**kwds
):
- converter._WARN = False # no warning for pandas plots
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 446350cb5d915..946ce8bcec97f 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -1,6 +1,7 @@
+import contextlib
import datetime as pydt
from datetime import datetime, timedelta
-import warnings
+import functools
from dateutil.relativedelta import relativedelta
import matplotlib.dates as dates
@@ -23,6 +24,7 @@
)
from pandas.core.dtypes.generic import ABCSeries
+from pandas import get_option
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.indexes.datetimes import date_range
@@ -39,7 +41,6 @@
MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
-_WARN = True # Global for whether pandas has registered the units explicitly
_mpl_units = {} # Cache for units overwritten by us
@@ -55,13 +56,42 @@ def get_pairs():
return pairs
-def register(explicit=True):
- # Renamed in pandas.plotting.__init__
- global _WARN
+def register_pandas_matplotlib_converters(func):
+ """
+ Decorator applying pandas_converters.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ with pandas_converters():
+ return func(*args, **kwargs)
- if explicit:
- _WARN = False
+ return wrapper
+
+@contextlib.contextmanager
+def pandas_converters():
+ """
+ Context manager registering pandas' converters for a plot.
+
+ See Also
+ --------
+ register_pandas_matplotlib_converters : Decorator that applies this.
+ """
+ value = get_option("plotting.matplotlib.register_converters")
+
+ if value:
+ # register for True or "auto"
+ register()
+ try:
+ yield
+ finally:
+ if value == "auto":
+ # only deregister for "auto"
+ deregister()
+
+
+def register():
pairs = get_pairs()
for type_, cls in pairs:
# Cache previous converter if present
@@ -86,24 +116,6 @@ def deregister():
units.registry[unit] = formatter
-def _check_implicitly_registered():
- global _WARN
-
- if _WARN:
- msg = (
- "Using an implicitly registered datetime converter for a "
- "matplotlib plotting method. The converter was registered "
- "by pandas on import. Future versions of pandas will require "
- "you to explicitly register matplotlib converters.\n\n"
- "To register the converters:\n\t"
- ">>> from pandas.plotting import register_matplotlib_converters"
- "\n\t"
- ">>> register_matplotlib_converters()"
- )
- warnings.warn(msg, FutureWarning)
- _WARN = False
-
-
def _to_ordinalf(tm):
tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + float(tm.microsecond / 1e6)
return tot_sec
@@ -253,7 +265,6 @@ class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
# values might be a 1-d array, or a list-like of arrays.
- _check_implicitly_registered()
if is_nested_list_like(values):
values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values]
else:
@@ -330,7 +341,6 @@ def __init__(self, locator, tz=None, defaultfmt="%Y-%m-%d"):
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
"""Pick the best locator based on a distance."""
- _check_implicitly_registered()
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
@@ -372,7 +382,6 @@ def get_unit_generic(freq):
def __call__(self):
# if no data have been set, this will tank with a ValueError
- _check_implicitly_registered()
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
@@ -990,7 +999,6 @@ def _get_default_locs(self, vmin, vmax):
def __call__(self):
"Return the locations of the ticks."
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
- _check_implicitly_registered()
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
@@ -1075,7 +1083,6 @@ def set_locs(self, locs):
"Sets the locations of the ticks"
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
- _check_implicitly_registered()
self.locs = locs
@@ -1088,7 +1095,6 @@ def set_locs(self, locs):
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
- _check_implicitly_registered()
if self.formatdict is None:
return ""
@@ -1120,7 +1126,6 @@ def format_timedelta_ticks(x, pos, n_decimals):
return s
def __call__(self, x, pos=0):
- _check_implicitly_registered()
(vmin, vmax) = tuple(self.axis.get_view_interval())
n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
if n_decimals > 9:
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index a729951b3d7db..541dca715e814 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -4,8 +4,6 @@
import numpy as np
-from pandas._config import get_option
-
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
@@ -28,8 +26,8 @@
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
-from pandas.plotting._matplotlib import converter
from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0
+from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import (
_flatten,
@@ -41,9 +39,6 @@
table,
)
-if get_option("plotting.matplotlib.register_converters"):
- converter.register(explicit=False)
-
class MPLPlot:
"""
@@ -112,7 +107,6 @@ def __init__(
import matplotlib.pyplot as plt
- converter._WARN = False # no warning for pandas plots
self.data = data
self.by = by
@@ -648,6 +642,7 @@ def _get_xticks(self, convert_period=False):
return x
@classmethod
+ @register_pandas_matplotlib_converters
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index f95ff2578d882..c4ac9ead3f3d3 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -9,7 +9,6 @@
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
-from pandas.plotting._matplotlib import converter
from pandas.plotting._matplotlib.core import LinePlot, MPLPlot
from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots
@@ -255,7 +254,6 @@ def _grouped_hist(
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
- converter._WARN = False # no warning for pandas plots
xrot = xrot or rot
fig, axes = _grouped_plot(
@@ -363,7 +361,6 @@ def hist_frame(
bins=10,
**kwds
):
- converter._WARN = False # no warning for pandas plots
if by is not None:
axes = _grouped_hist(
data,
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index f160e50d8d99b..931c699d9b9fd 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -25,6 +25,7 @@
TimeSeries_DateFormatter,
TimeSeries_DateLocator,
TimeSeries_TimedeltaFormatter,
+ register_pandas_matplotlib_converters,
)
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
@@ -33,6 +34,7 @@
# Plotting functions and monkey patches
+@register_pandas_matplotlib_converters
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
@@ -56,7 +58,7 @@ def tsplot(series, plotf, ax=None, **kwargs):
"'tsplot' is deprecated and will be removed in a "
"future version. Please use Series.plot() instead.",
FutureWarning,
- stacklevel=2,
+ stacklevel=3,
)
# Used inferred freq is possible, need a test case for inferred
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 426ca9632af29..762ee4cf83224 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -30,7 +30,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
)
-def register(explicit=True):
+def register():
"""
Register Pandas Formatters and Converters with matplotlib.
@@ -49,7 +49,7 @@ def register(explicit=True):
deregister_matplotlib_converters
"""
plot_backend = _get_plot_backend("matplotlib")
- plot_backend.register(explicit=explicit)
+ plot_backend.register()
def deregister():
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index aabe16d5050f9..ccc2afbb8b824 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -28,18 +28,6 @@
pytest.importorskip("matplotlib.pyplot")
-def test_initial_warning():
- code = (
- "import pandas as pd; import matplotlib.pyplot as plt; "
- "s = pd.Series(1, pd.date_range('2000', periods=12)); "
- "fig, ax = plt.subplots(); "
- "ax.plot(s.index, s.values)"
- )
- call = [sys.executable, "-c", code]
- out = subprocess.check_output(call, stderr=subprocess.STDOUT).decode()
- assert "Using an implicitly" in out
-
-
def test_registry_mpl_resets():
# Check that Matplotlib converters are properly reset (see issue #27481)
code = (
@@ -71,27 +59,12 @@ def test_register_by_default(self):
call = [sys.executable, "-c", code]
assert subprocess.check_call(call) == 0
- def test_warns(self):
- plt = pytest.importorskip("matplotlib.pyplot")
- s = Series(range(12), index=date_range("2017", periods=12))
- _, ax = plt.subplots()
-
- # Set to the "warning" state, in case this isn't the first test run
- converter._WARN = True
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w:
- ax.plot(s.index, s.values)
- plt.close()
-
- assert len(w) == 1
- assert "Using an implicitly registered datetime converter" in str(w[0])
-
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
# Set to the "warn" state, in case this isn't the first test run
- converter._WARN = True
register_matplotlib_converters()
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
@@ -102,7 +75,6 @@ def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
# Set to the "warn" state, in case this isn't the first test run
- converter._WARN = True
with tm.assert_produces_warning(None) as w:
s.plot()
@@ -110,13 +82,15 @@ def test_pandas_plots_register(self):
def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")
- assert Timestamp in units.registry
- ctx = cf.option_context("plotting.matplotlib.register_converters", False)
- with ctx:
- assert Timestamp not in units.registry
+ # Can't make any assertion about the start state.
+ # We we check that toggling converters off remvoes it, and toggling it
+ # on restores it.
- assert Timestamp in units.registry
+ with cf.option_context("plotting.matplotlib.register_converters", True):
+ with cf.option_context("plotting.matplotlib.register_converters", False):
+ assert Timestamp not in units.registry
+ assert Timestamp in units.registry
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
@@ -125,7 +99,6 @@ def test_option_no_warning(self):
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
- converter._WARN = True
# Test without registering first, no warning
with ctx:
with tm.assert_produces_warning(None) as w:
@@ -134,7 +107,6 @@ def test_option_no_warning(self):
assert len(w) == 0
# Now test with registering
- converter._WARN = True
register_matplotlib_converters()
with ctx:
with tm.assert_produces_warning(None) as w:
| This PR does two things
1. Enforces the deprecation of pandas auto registering our formatters.
2. Provides a new option, "auto", which will only apply our formatters to our plots. By default, the value will be "auto".
Background: pandas used to register formatters for Period, datetime, date, and time. This formatters affected plots created using matplotlib only, not pandas. For various reasons (including import time) we don't want to do this on an `import pandas`.
A minimal change would be to just not do the registration on `import pandas`. Users would either manually do it, or pandas does it automatically when we do DataFrame.plot. The downside of pandas doing it automatically is then in the following
```
fig1 = plt.plot(x, y)
DataFrame.plot(...) # registers formatters
fig2 = plt.plot(x, y)
```
fig1 and fig2 will be different because pandas has implicilty registered formatters.
The `auto` option will automatically register the formatters, make the plot, then deregister the formatters.
Closes https://github.com/pandas-dev/pandas/issues/18720 | https://api.github.com/repos/pandas-dev/pandas/pulls/28722 | 2019-10-01T14:17:31Z | 2019-10-25T12:17:14Z | 2019-10-25T12:17:14Z | 2019-10-25T12:17:17Z |
DOC: Fixed PR06 docstrings errors in pandas.arrays.IntervalArray | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 1f4b76a259f00..6dd0b116b3b0d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -259,7 +259,7 @@ def _from_factorized(cls, values, original):
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
- copy : boolean, default False
+ copy : bool, default False
copy the data
dtype : dtype or None, default None
If None, dtype will be inferred
@@ -315,7 +315,7 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
- copy : boolean, default False
+ copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
@@ -387,7 +387,7 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
- copy : boolean, default False
+ copy : bool, default False
by-default copy the data, this is compat only and ignored
dtype : dtype or None, default None
If None, dtype will be inferred
@@ -811,7 +811,7 @@ def value_counts(self, dropna=True):
Parameters
----------
- dropna : boolean, default True
+ dropna : bool, default True
Don't include counts of NaN.
Returns
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28721 | 2019-10-01T14:15:01Z | 2019-10-01T15:50:41Z | 2019-10-01T15:50:41Z | 2019-10-01T15:50:50Z |
CLN: Exception in io/plotting | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 1c9bd01b16739..eddc9b4cd21bd 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -12,14 +12,11 @@
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
- try:
- for label in ax.get_xticklabels():
- label.set_ha("right")
- label.set_rotation(rot)
- fig = ax.get_figure()
- fig.subplots_adjust(bottom=0.2)
- except Exception: # pragma: no cover
- pass
+ for label in ax.get_xticklabels():
+ label.set_ha("right")
+ label.set_rotation(rot)
+ fig = ax.get_figure()
+ fig.subplots_adjust(bottom=0.2)
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 89bc98b5a1006..7491cef17ebfc 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -539,13 +539,16 @@ def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
+ class DummyException(Exception):
+ pass
+
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
- raise Exception("error")
- except Exception:
+ raise DummyException("error")
+ except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 5a591f72d7361..65d0c3d9fb17d 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -536,7 +536,7 @@ def _check_plot_works(f, filterwarnings="always", **kwargs):
plt.clf()
- ax = kwargs.get("ax", fig.add_subplot(211)) # noqa
+ kwargs.get("ax", fig.add_subplot(211))
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index e2b7f2819f957..be24f102851b7 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1553,12 +1553,9 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
assert ax.freq == freq
ax = fig.add_subplot(212)
- try:
- kwargs["ax"] = ax
- ret = f(*args, **kwargs)
- assert ret is not None # do something more intelligent
- except Exception:
- pass
+ kwargs["ax"] = ax
+ ret = f(*args, **kwargs)
+ assert ret is not None # TODO: do something more intelligent
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
| Broken off from #28649 to focus in on what is breaking there. | https://api.github.com/repos/pandas-dev/pandas/pulls/28720 | 2019-10-01T13:57:22Z | 2019-10-01T16:51:32Z | 2019-10-01T16:51:32Z | 2019-10-01T16:53:23Z |
DOC: Fixed PR06 docstrings errors in pandas.timedelta_range | diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index b03d60c7b5b37..49dcea4da5760 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -717,17 +717,17 @@ def timedelta_range(
Parameters
----------
- start : string or timedelta-like, default None
+ start : str or timedelta-like, default None
Left bound for generating timedeltas
- end : string or timedelta-like, default None
+ end : str or timedelta-like, default None
Right bound for generating timedeltas
- periods : integer, default None
+ periods : int, default None
Number of periods to generate
- freq : string or DateOffset, default 'D'
+ freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'
- name : string, default None
+ name : str, default None
Name of the resulting TimedeltaIndex
- closed : string, default None
+ closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28719 | 2019-10-01T13:49:05Z | 2019-10-02T08:59:25Z | 2019-10-02T08:59:25Z | 2019-10-02T08:59:41Z |
DOC: Fixed PR06 docstring errors in pandas.DataFrame | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 16f34fee5e1ff..9467978f13d30 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7948,7 +7948,7 @@ def idxmin(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -7985,7 +7985,7 @@ def idxmax(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28718 | 2019-10-01T13:38:33Z | 2019-10-01T15:43:52Z | 2019-10-01T15:43:52Z | 2019-10-01T15:44:04Z |
DOC: Series.between() can take arguments that are not just scalars | diff --git a/pandas/core/series.py b/pandas/core/series.py
index c87e371354f63..48f10bbfa8c9e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4410,9 +4410,9 @@ def between(self, left, right, inclusive=True):
Parameters
----------
- left : scalar
+ left : scalar or list-like
Left boundary.
- right : scalar
+ right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
| modified the docs of Series.between() to reflect that it can take any
list like object like Series
closes #28435 | https://api.github.com/repos/pandas-dev/pandas/pulls/28717 | 2019-10-01T13:36:56Z | 2019-10-01T16:55:05Z | 2019-10-01T16:55:05Z | 2019-10-01T18:51:25Z |
Backport PR #28710 on branch 0.25.x (CI Failing: TestReadHtml.test_spam_url #28708) | diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 615e2735cd288..d818b83202e83 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -134,8 +134,8 @@ def test_banklist_url(self):
@network
def test_spam_url(self):
url = (
- "http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&"
- "lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam"
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/spam.html"
)
df1 = self.read_html(url, ".*Water.*")
df2 = self.read_html(url, "Unit")
| Backport PR #28710: CI Failing: TestReadHtml.test_spam_url #28708 | https://api.github.com/repos/pandas-dev/pandas/pulls/28716 | 2019-10-01T12:55:24Z | 2019-10-01T14:22:45Z | 2019-10-01T14:22:45Z | 2019-10-01T14:23:12Z |
precursor to Split out test_pytables.py to sub-module of tests | diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py
index ae604b1141204..46d8ef04dd8e5 100644
--- a/pandas/tests/io/pytables/test_pytables.py
+++ b/pandas/tests/io/pytables/test_pytables.py
@@ -51,6 +51,19 @@
tables = pytest.importorskip("tables")
+@pytest.fixture
+def setup_path():
+ """Fixture for setup path"""
+ return "tmp.__{}__.h5".format(tm.rands(10))
+
+
+@pytest.fixture(scope="class", autouse=True)
+def setup_mode():
+ tm.reset_testing_mode()
+ yield
+ tm.set_testing_mode()
+
+
# TODO:
# remove when gh-24839 is fixed; this affects numpy 1.16
# and pytables 3.4.4
@@ -148,36 +161,16 @@ def _maybe_remove(store, key):
pass
-class Base:
- @classmethod
- def setup_class(cls):
-
- # Pytables 3.0.0 deprecates lots of things
- tm.reset_testing_mode()
-
- @classmethod
- def teardown_class(cls):
-
- # Pytables 3.0.0 deprecates lots of things
- tm.set_testing_mode()
-
- def setup_method(self, method):
- self.path = "tmp.__{}__.h5".format(tm.rands(10))
-
- def teardown_method(self, method):
- pass
-
-
@pytest.mark.single
-class TestHDFStore(Base):
- def test_format_kwarg_in_constructor(self):
+class TestHDFStore:
+ def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
- def test_context(self):
- path = create_tempfile(self.path)
+ def test_context(self, setup_path):
+ path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
@@ -196,8 +189,8 @@ def test_context(self):
finally:
safe_remove(path)
- def test_conv_read_write(self):
- path = create_tempfile(self.path)
+ def test_conv_read_write(self, setup_path):
+ path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
@@ -222,24 +215,24 @@ def roundtrip(key, obj, **kwargs):
finally:
safe_remove(path)
- def test_long_strings(self):
+ def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
assert_frame_equal(df, result)
- def test_api(self):
+ def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
@@ -251,7 +244,7 @@ def test_api(self):
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
assert_frame_equal(read_hdf(path, "df"), df)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
@@ -263,7 +256,7 @@ def test_api(self):
df.iloc[10:].to_hdf(path, "df", append=True)
assert_frame_equal(read_hdf(path, "df"), df)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
@@ -278,7 +271,7 @@ def test_api(self):
df.to_hdf(path, "df")
assert_frame_equal(read_hdf(path, "df"), df)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
@@ -305,7 +298,7 @@ def test_api(self):
store.append("df", df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select("df"), df)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
@@ -326,10 +319,10 @@ def test_api(self):
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
- def test_api_default_format(self):
+ def test_api_default_format(self, setup_path):
# default_format option
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
@@ -349,7 +342,7 @@ def test_api_default_format(self):
pd.set_option("io.hdf.default_format", None)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
@@ -370,9 +363,9 @@ def test_api_default_format(self):
pd.set_option("io.hdf.default_format", None)
- def test_keys(self):
+ def test_keys(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
@@ -382,12 +375,12 @@ def test_keys(self):
assert set(store.keys()) == expected
assert set(store) == expected
- def test_keys_ignore_hdf_softlink(self):
+ def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
@@ -399,15 +392,15 @@ def test_keys_ignore_hdf_softlink(self):
# Should ignore the softlink
assert store.keys() == ["/df"]
- def test_iter_empty(self):
+ def test_iter_empty(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
- def test_repr(self):
+ def test_repr(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
@@ -441,7 +434,7 @@ def test_repr(self):
store.info()
# storers
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
@@ -451,9 +444,9 @@ def test_repr(self):
str(s)
@ignore_natural_naming_warning
- def test_contains(self):
+ def test_contains(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
@@ -470,9 +463,9 @@ def test_contains(self):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
- def test_versioning(self):
+ def test_versioning(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
@@ -493,13 +486,13 @@ def test_versioning(self):
with pytest.raises(Exception):
store.select("df2")
- def test_mode(self):
+ def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
@@ -511,7 +504,7 @@ def check(mode):
assert store._handle.mode == mode
store.close()
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
@@ -522,7 +515,7 @@ def check(mode):
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
@@ -543,7 +536,7 @@ def check(mode):
def check_default_mode():
# read_hdf uses default mode
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
assert_frame_equal(result, df)
@@ -554,9 +547,9 @@ def check_default_mode():
check("w")
check_default_mode()
- def test_reopen_handle(self):
+ def test_reopen_handle(self, setup_path):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
@@ -602,9 +595,9 @@ def test_reopen_handle(self):
store.close()
assert not store.is_open
- def test_open_args(self):
+ def test_open_args(self, setup_path):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
@@ -623,16 +616,16 @@ def test_open_args(self):
# the file should not have actually been written
assert not os.path.exists(path)
- def test_flush(self):
+ def test_flush(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
- def test_get(self):
+ def test_get(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
@@ -666,7 +659,7 @@ def test_get(self):
),
],
)
- def test_walk(self, where, expected):
+ def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
@@ -705,9 +698,9 @@ def test_walk(self, where, expected):
else:
tm.assert_series_equal(obj, objs[leaf])
- def test_getattr(self):
+ def test_getattr(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
@@ -732,9 +725,9 @@ def test_getattr(self):
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
- def test_put(self):
+ def test_put(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
@@ -763,9 +756,9 @@ def test_put(self):
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
- def test_put_string_index(self):
+ def test_put_string_index(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
@@ -792,9 +785,9 @@ def test_put_string_index(self):
store["b"] = df
tm.assert_frame_equal(store["b"], df)
- def test_put_compression(self):
+ def test_put_compression(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
@@ -805,10 +798,10 @@ def test_put_compression(self):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
- def test_put_compression_blosc(self):
+ def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
@@ -817,13 +810,13 @@ def test_put_compression_blosc(self):
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
- def test_complibs_default_settings(self):
+ def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
- with ensure_clean_path(self.path) as tmpfile:
+ with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
@@ -834,7 +827,7 @@ def test_complibs_default_settings(self):
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
- with ensure_clean_path(self.path) as tmpfile:
+ with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
@@ -845,7 +838,7 @@ def test_complibs_default_settings(self):
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
- with ensure_clean_path(self.path) as tmpfile:
+ with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
@@ -856,7 +849,7 @@ def test_complibs_default_settings(self):
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
- with ensure_clean_path(self.path) as tmpfile:
+ with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
@@ -870,7 +863,7 @@ def test_complibs_default_settings(self):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
- def test_complibs(self):
+ def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
@@ -887,7 +880,7 @@ def test_complibs(self):
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
- with ensure_clean_path(self.path) as tmpfile:
+ with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
@@ -906,13 +899,13 @@ def test_complibs(self):
assert node.filters.complib == lib
h5table.close()
- def test_put_integer(self):
+ def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
- self._check_roundtrip(df, tm.assert_frame_equal)
+ self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@xfail_non_writeable
- def test_put_mixed_type(self):
+ def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
@@ -928,7 +921,7 @@ def test_put_mixed_type(self):
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
@@ -942,9 +935,9 @@ def test_put_mixed_type(self):
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
- def test_append(self):
+ def test_append(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
@@ -1010,9 +1003,9 @@ def test_append(self):
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
- def test_append_series(self):
+ def test_append_series(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
@@ -1056,11 +1049,11 @@ def test_append_series(self):
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
- def test_store_index_types(self):
+ def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
@@ -1093,9 +1086,9 @@ def check(format, index):
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
- def test_encoding(self):
+ def test_encoding(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
@@ -1122,7 +1115,7 @@ def test_encoding(self):
],
)
@pytest.mark.parametrize("dtype", ["category", object])
- def test_latin_encoding(self, dtype, val):
+ def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
@@ -1130,7 +1123,7 @@ def test_latin_encoding(self, dtype, val):
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
- with ensure_clean_path(self.path) as store:
+ with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
@@ -1147,9 +1140,9 @@ def test_latin_encoding(self, dtype, val):
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
- def test_append_some_nans(self):
+ def test_append_some_nans(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
@@ -1193,9 +1186,9 @@ def test_append_some_nans(self):
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
- def test_append_all_nans(self):
+ def test_append_all_nans(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
@@ -1283,14 +1276,14 @@ def test_append_all_nans(self):
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
- def test_read_missing_key_close_store(self):
+ def test_read_missing_key_close_store(self, setup_path):
# GH 25766
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
@@ -1301,9 +1294,9 @@ def test_read_missing_key_close_store(self):
# read with KeyError before another write
df.to_hdf(path, "k2")
- def test_append_frame_column_oriented(self):
+ def test_append_frame_column_oriented(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
@@ -1325,10 +1318,10 @@ def test_append_frame_column_oriented(self):
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
- def test_append_with_different_block_ordering(self):
+ def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
for i in range(10):
@@ -1351,7 +1344,7 @@ def test_append_with_different_block_ordering(self):
# test a different ordering but with more fields (like invalid
# combinate)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
@@ -1368,9 +1361,9 @@ def test_append_with_different_block_ordering(self):
with pytest.raises(ValueError):
store.append("df", df)
- def test_append_with_strings(self):
+ def test_append_with_strings(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
@@ -1444,7 +1437,7 @@ def check_col(key, name, size):
result = store.select("df")
tm.assert_frame_equal(result, df)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
@@ -1484,9 +1477,9 @@ def check_col(key, name, size):
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
- def test_append_with_empty_string(self):
+ def test_append_with_empty_string(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
@@ -1494,9 +1487,9 @@ def test_append_with_empty_string(self):
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
- def test_to_hdf_with_min_itemsize(self):
+ def test_to_hdf_with_min_itemsize(self, setup_path):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
@@ -1516,20 +1509,20 @@ def test_to_hdf_with_min_itemsize(self):
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=xfail_non_writeable), "table"]
)
- def test_to_hdf_errors(self, format):
+ def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
- def test_append_with_data_columns(self):
+ def test_append_with_data_columns(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
@@ -1570,7 +1563,7 @@ def check_col(key, name, size):
== size
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
@@ -1585,7 +1578,7 @@ def check_col(key, name, size):
)
check_col("df", "string", 30)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
@@ -1600,7 +1593,7 @@ def check_col(key, name, size):
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
@@ -1633,7 +1626,7 @@ def check_col(key, name, size):
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
@@ -1657,7 +1650,7 @@ def check_col(key, name, size):
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
@@ -1681,9 +1674,9 @@ def check_col(key, name, size):
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
- def test_create_table_index(self):
+ def test_create_table_index(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
@@ -1713,7 +1706,7 @@ def col(t, column):
with pytest.raises(TypeError):
store.create_table_index("f2")
- def test_append_hierarchical(self):
+ def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
@@ -1721,7 +1714,7 @@ def test_append_hierarchical(self):
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
@@ -1737,7 +1730,7 @@ def test_append_hierarchical(self):
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
- def test_column_multiindex(self):
+ def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
@@ -1749,7 +1742,7 @@ def test_column_multiindex(self):
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
@@ -1767,7 +1760,7 @@ def test_column_multiindex(self):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
@@ -1781,18 +1774,18 @@ def test_column_multiindex(self):
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
- def test_store_multiindex(self):
+ def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
@@ -1858,7 +1851,7 @@ def make_index(names=None):
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
- def test_select_columns_in_where(self):
+ def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
@@ -1872,7 +1865,7 @@ def test_select_columns_in_where(self):
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
@@ -1882,29 +1875,29 @@ def test_select_columns_in_where(self):
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
- def test_mi_data_columns(self):
+ def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
- def test_pass_spec_to_storer(self):
+ def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
@@ -1912,9 +1905,9 @@ def test_pass_spec_to_storer(self):
store.select("df", where=[("columns=A")])
@xfail_non_writeable
- def test_append_misc(self):
+ def test_append_misc(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
@@ -1927,7 +1920,7 @@ def test_append_misc(self):
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
- with ensure_clean_store(self.path, mode="w") as store:
+ with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
@@ -1942,7 +1935,7 @@ def check(obj, comparator):
check(df, tm.assert_frame_equal)
# empty frame, GH4273
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
@@ -1962,9 +1955,9 @@ def check(obj, comparator):
store.put("df2", df)
assert_frame_equal(store.select("df2"), df)
- def test_append_raise(self):
+ def test_append_raise(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
@@ -2007,18 +2000,18 @@ def test_append_raise(self):
with pytest.raises(ValueError):
store.append("df", df)
- def test_table_index_incompatible_dtypes(self):
+ def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
- def test_table_values_dtypes_roundtrip(self):
+ def test_table_values_dtypes_roundtrip(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
@@ -2072,7 +2065,7 @@ def test_table_values_dtypes_roundtrip(self):
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
- def test_table_mixed_dtypes(self):
+ def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
@@ -2090,13 +2083,13 @@ def test_table_mixed_dtypes(self):
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
- def test_unimplemented_dtypes_table_columns(self):
+ def test_unimplemented_dtypes_table_columns(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
@@ -2114,7 +2107,7 @@ def test_unimplemented_dtypes_table_columns(self):
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@@ -2127,7 +2120,7 @@ def test_unimplemented_dtypes_table_columns(self):
"exactly equal to 1.15.0: gh-22098"
),
)
- def test_calendar_roundtrip_issue(self):
+ def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
@@ -2145,7 +2138,7 @@ def test_calendar_roundtrip_issue(self):
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
@@ -2155,18 +2148,18 @@ def test_calendar_roundtrip_issue(self):
result = store.select("table")
assert_series_equal(result, s)
- def test_roundtrip_tz_aware_index(self):
+ def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
- def test_append_with_timedelta(self):
+ def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
@@ -2182,7 +2175,7 @@ def test_append_with_timedelta(self):
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
@@ -2215,9 +2208,9 @@ def test_append_with_timedelta(self):
result = store.select("df2")
assert_frame_equal(result, df)
- def test_remove(self):
+ def test_remove(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
@@ -2255,9 +2248,9 @@ def test_remove(self):
del store["b"]
assert len(store) == 0
- def test_invalid_terms(self):
+ def test_invalid_terms(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
@@ -2279,7 +2272,7 @@ def test_invalid_terms(self):
store.select("df", "index>")
# from the docs
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
@@ -2294,7 +2287,7 @@ def test_invalid_terms(self):
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
@@ -2305,9 +2298,9 @@ def test_invalid_terms(self):
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
- def test_same_name_scoping(self):
+ def test_same_name_scoping(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
import pandas as pd
@@ -2331,29 +2324,31 @@ def test_same_name_scoping(self):
result = store.select("df", "index>datetime(2013,1,5)")
assert_frame_equal(result, expected)
- def test_series(self):
+ def test_series(self, setup_path):
s = tm.makeStringSeries()
- self._check_roundtrip(s, tm.assert_series_equal)
+ self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
- self._check_roundtrip(ts, tm.assert_series_equal)
+ self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
- self._check_roundtrip(ts2, tm.assert_series_equal)
+ self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
- self._check_roundtrip(ts3, tm.assert_series_equal, check_index_type=False)
+ self._check_roundtrip(
+ ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
+ )
- def test_float_index(self):
+ def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
- self._check_roundtrip(s, tm.assert_series_equal)
+ self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@xfail_non_writeable
- def test_tuple_index(self):
+ def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
@@ -2363,11 +2358,11 @@ def test_tuple_index(self):
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
- self._check_roundtrip(DF, tm.assert_frame_equal)
+ self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
- def test_index_types(self):
+ def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
@@ -2378,54 +2373,54 @@ def test_index_types(self):
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
- self._check_roundtrip(ser, func)
+ self._check_roundtrip(ser, func, path=setup_path)
- def test_timeseries_preepoch(self):
+ def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
- self._check_roundtrip(ts, tm.assert_series_equal)
+ self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@@ -2433,7 +2428,7 @@ def test_timeseries_preepoch(self):
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
- def test_frame(self, compression):
+ def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
@@ -2441,13 +2436,19 @@ def test_frame(self, compression):
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
- self._check_roundtrip_table(df, tm.assert_frame_equal, compression=compression)
- self._check_roundtrip(df, tm.assert_frame_equal, compression=compression)
+ self._check_roundtrip_table(
+ df, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
+ self._check_roundtrip(
+ df, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
tdf = tm.makeTimeDataFrame()
- self._check_roundtrip(tdf, tm.assert_frame_equal, compression=compression)
+ self._check_roundtrip(
+ tdf, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
@@ -2455,38 +2456,38 @@ def test_frame(self, compression):
assert recons._data.is_consolidated()
# empty
- self._check_roundtrip(df[:0], tm.assert_frame_equal)
+ self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@xfail_non_writeable
- def test_empty_series_frame(self):
+ def test_empty_series_frame(self, setup_path):
s0 = Series()
s1 = Series(name="myseries")
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
- self._check_roundtrip(s0, tm.assert_series_equal)
- self._check_roundtrip(s1, tm.assert_series_equal)
- self._check_roundtrip(df0, tm.assert_frame_equal)
- self._check_roundtrip(df1, tm.assert_frame_equal)
- self._check_roundtrip(df2, tm.assert_frame_equal)
+ self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
+ self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
+ self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
+ self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
+ self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
- def test_empty_series(self, dtype):
+ def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
- self._check_roundtrip(s, tm.assert_series_equal)
+ self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
- def test_can_serialize_dates(self):
+ def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
- self._check_roundtrip(frame, tm.assert_frame_equal)
+ self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
- def test_store_hierarchical(self):
+ def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
@@ -2494,39 +2495,39 @@ def test_store_hierarchical(self):
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
- self._check_roundtrip(frame, tm.assert_frame_equal)
- self._check_roundtrip(frame.T, tm.assert_frame_equal)
- self._check_roundtrip(frame["A"], tm.assert_series_equal)
+ self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
+ self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
+ self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
- def test_store_index_name(self):
+ def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
- def test_store_index_name_with_tz(self):
+ def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
- def test_store_index_name_numpy_str(self, table_format):
+ def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
@@ -2539,7 +2540,7 @@ def test_store_index_name_numpy_str(self, table_format):
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
@@ -2548,11 +2549,11 @@ def test_store_index_name_numpy_str(self, table_format):
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
- def test_store_series_name(self):
+ def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@@ -2561,7 +2562,7 @@ def test_store_series_name(self):
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
- def test_store_mixed(self, compression):
+ def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
@@ -2575,10 +2576,10 @@ def _make_one():
df1 = _make_one()
df2 = _make_one()
- self._check_roundtrip(df1, tm.assert_frame_equal)
- self._check_roundtrip(df2, tm.assert_frame_equal)
+ self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
+ self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
@@ -2586,25 +2587,34 @@ def _make_one():
# check that can store Series of all of these types
self._check_roundtrip(
- df1["obj1"], tm.assert_series_equal, compression=compression
+ df1["obj1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
)
self._check_roundtrip(
- df1["bool1"], tm.assert_series_equal, compression=compression
+ df1["bool1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
)
self._check_roundtrip(
- df1["int1"], tm.assert_series_equal, compression=compression
+ df1["int1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
- def test_select_with_dups(self):
+ def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
@@ -2631,7 +2641,7 @@ def test_select_with_dups(self):
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
@@ -2651,7 +2661,7 @@ def test_select_with_dups(self):
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
@@ -2660,18 +2670,18 @@ def test_select_with_dups(self):
result = store.select("df", columns=["B", "A"])
assert_frame_equal(result, expected, by_blocks=True)
- def test_overwrite_node(self):
+ def test_overwrite_node(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
- def test_select(self):
+ def test_select(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
@@ -2709,9 +2719,9 @@ def test_select(self):
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
- def test_select_dtypes(self):
+ def test_select_dtypes(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
@@ -2767,7 +2777,7 @@ def test_select_dtypes(self):
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
@@ -2806,7 +2816,7 @@ def test_select_dtypes(self):
# test selection with comparison against numpy scalar
# GH 11283
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
@@ -2816,9 +2826,9 @@ def test_select_dtypes(self):
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
- def test_select_with_many_inputs(self):
+ def test_select_with_many_inputs(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
@@ -2869,10 +2879,10 @@ def test_select_with_many_inputs(self):
tm.assert_frame_equal(expected, result)
assert len(result) == 100
- def test_select_iterator(self):
+ def test_select_iterator(self, setup_path):
# single table
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
@@ -2893,7 +2903,7 @@ def test_select_iterator(self):
result = concat(results)
tm.assert_frame_equal(result, expected)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
@@ -2904,7 +2914,7 @@ def test_select_iterator(self):
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
@@ -2918,7 +2928,7 @@ def test_select_iterator(self):
# multiple
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
@@ -2939,14 +2949,14 @@ def test_select_iterator(self):
result = concat(results)
tm.assert_frame_equal(expected, result)
- def test_select_iterator_complete_8014(self):
+ def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
@@ -2980,7 +2990,7 @@ def test_select_iterator_complete_8014(self):
tm.assert_frame_equal(expected, result)
# with iterator, full range
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
@@ -3014,14 +3024,14 @@ def test_select_iterator_complete_8014(self):
result = concat(results)
tm.assert_frame_equal(expected, result)
- def test_select_iterator_non_complete_8014(self):
+ def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
@@ -3056,7 +3066,7 @@ def test_select_iterator_non_complete_8014(self):
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
@@ -3069,7 +3079,7 @@ def test_select_iterator_non_complete_8014(self):
results = [s for s in store.select("df", where=where, chunksize=chunksize)]
assert 0 == len(results)
- def test_select_iterator_many_empty_frames(self):
+ def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
@@ -3077,7 +3087,7 @@ def test_select_iterator_many_empty_frames(self):
chunksize = int(1e4)
# with iterator, range limited to the first chunk
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
@@ -3134,14 +3144,14 @@ def test_select_iterator_many_empty_frames(self):
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
- def test_retain_index_attributes(self):
+ def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
@@ -3194,8 +3204,8 @@ def test_retain_index_attributes(self):
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
- def test_retain_index_attributes2(self):
- with ensure_clean_path(self.path) as path:
+ def test_retain_index_attributes2(self, setup_path):
+ with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
@@ -3232,11 +3242,11 @@ def test_retain_index_attributes2(self):
assert read_hdf(path, "data").index.name is None
- def test_frame_select(self):
+ def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
@@ -3265,14 +3275,14 @@ def test_frame_select(self):
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
- def test_frame_select_complex(self):
+ def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
@@ -3317,7 +3327,7 @@ def test_frame_select_complex(self):
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
- def test_frame_select_complex2(self):
+ def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
@@ -3381,13 +3391,13 @@ def test_frame_select_complex2(self):
store.close()
- def test_invalid_filtering(self):
+ def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
@@ -3398,9 +3408,9 @@ def test_invalid_filtering(self):
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
- def test_string_select(self):
+ def test_string_select(self, setup_path):
# GH 2973
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
@@ -3440,11 +3450,11 @@ def test_string_select(self):
expected = df[df.int != 2]
assert_frame_equal(result, expected)
- def test_read_column(self):
+ def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
@@ -3513,10 +3523,10 @@ def test_read_column(self):
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
- def test_coordinates(self):
+ def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
@@ -3561,7 +3571,7 @@ def test_coordinates(self):
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
@@ -3617,13 +3627,13 @@ def test_coordinates(self):
expected = df[5:10]
tm.assert_frame_equal(result, expected)
- def test_append_to_multiple(self):
+ def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
@@ -3647,13 +3657,13 @@ def test_append_to_multiple(self):
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
- def test_append_to_multiple_dropna(self):
+ def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
@@ -3667,13 +3677,13 @@ def test_append_to_multiple_dropna(self):
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
- def test_append_to_multiple_dropna_false(self):
+ def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
@@ -3685,13 +3695,13 @@ def test_append_to_multiple_dropna_false(self):
assert not store.select("df1a").index.equals(store.select("df2a").index)
- def test_select_as_multiple(self):
+ def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
@@ -3759,9 +3769,9 @@ def test_select_as_multiple(self):
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
- def test_nan_selection_bug_4858(self):
+ def test_nan_selection_bug_4858(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
@@ -3777,9 +3787,9 @@ def test_nan_selection_bug_4858(self):
result = store.select("df", where="values>2.0")
assert_frame_equal(result, expected)
- def test_start_stop_table(self):
+ def test_start_stop_table(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
@@ -3795,10 +3805,10 @@ def test_start_stop_table(self):
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
- def test_start_stop_multiple(self):
+ def test_start_stop_multiple(self, setup_path):
# GH 16209
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
@@ -3811,9 +3821,9 @@ def test_start_stop_multiple(self):
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
- def test_start_stop_fixed(self):
+ def test_start_stop_fixed(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
@@ -3851,13 +3861,13 @@ def test_start_stop_fixed(self):
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
- def test_select_filter_corner(self):
+ def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
@@ -3868,7 +3878,7 @@ def test_select_filter_corner(self):
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
- def test_path_pathlib(self):
+ def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
@@ -3877,7 +3887,7 @@ def test_path_pathlib(self):
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
- def test_contiguous_mixed_data_table(self, start, stop):
+ def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
@@ -3887,13 +3897,13 @@ def test_contiguous_mixed_data_table(self, start, stop):
}
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
- def test_path_pathlib_hdfstore(self):
+ def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
@@ -3907,14 +3917,14 @@ def reader(path):
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
- def test_pickle_path_localpath(self):
+ def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
- def test_path_localpath_hdfstore(self):
+ def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
@@ -3928,23 +3938,25 @@ def reader(path):
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
- def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
+ def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
- with ensure_clean_store(self.path, "w", **options) as store:
+ with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
- def _check_double_roundtrip(self, obj, comparator, compression=False, **kwargs):
+ def _check_double_roundtrip(
+ self, obj, comparator, path, compression=False, **kwargs
+ ):
options = {}
if compression:
options["complib"] = compression or _default_compressor
- with ensure_clean_store(self.path, "w", **options) as store:
+ with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
@@ -3952,21 +3964,21 @@ def _check_double_roundtrip(self, obj, comparator, compression=False, **kwargs):
again = store["obj"]
comparator(again, obj, **kwargs)
- def _check_roundtrip_table(self, obj, comparator, compression=False):
+ def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
- with ensure_clean_store(self.path, "w", **options) as store:
+ with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
- def test_multiple_open_close(self):
+ def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
@@ -3980,7 +3992,7 @@ def test_multiple_open_close(self):
assert "CLOSED" in store.info()
assert not store.is_open
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
@@ -4042,7 +4054,7 @@ def test_multiple_open_close(self):
assert not store2.is_open
# ops on a closed store
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
@@ -4086,7 +4098,7 @@ def test_multiple_open_close(self):
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
- def test_pytables_native_read(self, datapath):
+ def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
@@ -4096,7 +4108,7 @@ def test_pytables_native_read(self, datapath):
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
- def test_pytables_native2_read(self, datapath):
+ def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
@@ -4105,7 +4117,7 @@ def test_pytables_native2_read(self, datapath):
assert isinstance(d1, DataFrame)
@xfail_non_writeable
- def test_legacy_table_fixed_format_read_py2(self, datapath):
+ def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
@@ -4119,7 +4131,7 @@ def test_legacy_table_fixed_format_read_py2(self, datapath):
)
assert_frame_equal(expected, result)
- def test_legacy_table_read_py2(self, datapath):
+ def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
@@ -4130,7 +4142,7 @@ def test_legacy_table_read_py2(self, datapath):
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
assert_frame_equal(expected, result)
- def test_copy(self):
+ def test_copy(self, setup_path):
with catch_warnings(record=True):
@@ -4179,7 +4191,7 @@ def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
df = tm.makeDataFrame()
try:
- path = create_tempfile(self.path)
+ path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
@@ -4188,17 +4200,17 @@ def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
finally:
safe_remove(path)
- def test_store_datetime_fractional_secs(self):
+ def test_store_datetime_fractional_secs(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
- def test_tseries_indices_series(self):
+ def test_tseries_indices_series(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
@@ -4217,9 +4229,9 @@ def test_tseries_indices_series(self):
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
- def test_tseries_indices_frame(self):
+ def test_tseries_indices_frame(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
@@ -4238,7 +4250,7 @@ def test_tseries_indices_frame(self):
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
- def test_unicode_index(self):
+ def test_unicode_index(self, setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
@@ -4246,30 +4258,30 @@ def test_unicode_index(self):
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
- self._check_roundtrip(s, tm.assert_series_equal)
+ self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
- def test_unicode_longer_encoded(self):
+ def test_unicode_longer_encoded(self, setup_path):
# GH 11234
char = "\u0394"
df = pd.DataFrame({"A": [char]})
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = pd.DataFrame({"A": ["a", char], "B": ["b", "b"]})
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
@xfail_non_writeable
- def test_store_datetime_mixed(self):
+ def test_store_datetime_mixed(self, setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
- self._check_roundtrip(df, tm.assert_frame_equal)
+ self._check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
# FIXME: don't leave commented-out code
# def test_cant_write_multiindex_table(self):
@@ -4281,14 +4293,14 @@ def test_store_datetime_mixed(self):
# with pytest.raises(Exception):
# store.put('foo', df, format='table')
- def test_append_with_diff_col_name_types_raises_value_error(self):
+ def test_append_with_diff_col_name_types_raises_value_error(self, setup_path):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({"a": np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({("1", 2): np.random.randn(10)})
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
name = "df_{}".format(tm.rands(10))
store.append(name, df)
@@ -4296,7 +4308,7 @@ def test_append_with_diff_col_name_types_raises_value_error(self):
with pytest.raises(ValueError):
store.append(name, d)
- def test_query_with_nested_special_character(self):
+ def test_query_with_nested_special_character(self, setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
@@ -4304,14 +4316,14 @@ def test_query_with_nested_special_character(self):
}
)
expected = df[df.a == "test & test"]
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
- def test_categorical(self):
+ def test_categorical(self, setup_path):
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# Basic
_maybe_remove(store, "s")
@@ -4429,7 +4441,7 @@ def test_categorical(self):
):
store.select("df3/meta/s/meta")
- def test_categorical_conversion(self):
+ def test_categorical_conversion(self, setup_path):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
@@ -4443,7 +4455,7 @@ def test_categorical_conversion(self):
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
@@ -4454,12 +4466,12 @@ def test_categorical_conversion(self):
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
- def test_categorical_nan_only_columns(self):
+ def test_categorical_nan_only_columns(self, setup_path):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
@@ -4475,15 +4487,15 @@ def test_categorical_nan_only_columns(self):
df["b"] = df.b.astype("category")
df["d"] = df.b.astype("category")
expected = df
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df")
tm.assert_frame_equal(result, expected)
- def test_duplicate_column_name(self):
+ def test_duplicate_column_name(self, setup_path):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", format="fixed")
@@ -4494,30 +4506,30 @@ def test_duplicate_column_name(self):
assert df.equals(other)
assert other.equals(df)
- def test_round_trip_equals(self):
+ def test_round_trip_equals(self, setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
- def test_preserve_timedeltaindex_type(self):
+ def test_preserve_timedeltaindex_type(self, setup_path):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example")
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["df"] = df
assert_frame_equal(store["df"], df)
- def test_columns_multiindex_modified(self):
+ def test_columns_multiindex_modified(self, setup_path):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
@@ -4527,7 +4539,7 @@ def test_columns_multiindex_modified(self):
df = df.set_index(keys="E", append=True)
data_columns = df.index.names + df.columns.tolist()
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(
path,
"df",
@@ -4542,7 +4554,7 @@ def test_columns_multiindex_modified(self):
assert cols2load_original == cols2load
@ignore_natural_naming_warning
- def test_to_hdf_with_object_column_names(self):
+ def test_to_hdf_with_object_column_names(self, setup_path):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
@@ -4562,7 +4574,7 @@ def test_to_hdf_with_object_column_names(self):
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
msg = "cannot have non-object label DataIndexableCol"
with pytest.raises(ValueError, match=msg):
@@ -4570,7 +4582,7 @@ def test_to_hdf_with_object_column_names(self):
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df.to_hdf(path, "df", format="table", data_columns=True)
result = pd.read_hdf(
@@ -4578,14 +4590,14 @@ def test_to_hdf_with_object_column_names(self):
)
assert len(result)
- def test_read_hdf_open_store(self):
+ def test_read_hdf_open_store(self, setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
@@ -4594,12 +4606,12 @@ def test_read_hdf_open_store(self):
assert store.is_open
store.close()
- def test_read_hdf_iterator(self):
+ def test_read_hdf_iterator(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
@@ -4608,10 +4620,10 @@ def test_read_hdf_iterator(self):
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
- def test_read_hdf_errors(self):
+ def test_read_hdf_errors(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(IOError):
read_hdf(path, "key")
@@ -4626,20 +4638,20 @@ def test_read_hdf_generic_buffer_errors(self):
with pytest.raises(NotImplementedError):
read_hdf(BytesIO(b""), "df")
- def test_invalid_complib(self):
+ def test_invalid_complib(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", complib="foolib")
# GH10443
- def test_read_nokey(self):
+ def test_read_nokey(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a")
reread = read_hdf(path)
assert_frame_equal(df, reread)
@@ -4648,11 +4660,11 @@ def test_read_nokey(self):
with pytest.raises(ValueError):
read_hdf(path)
- def test_read_nokey_table(self):
+ def test_read_nokey_table(self, setup_path):
# GH13231
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a", format="table")
reread = read_hdf(path)
assert_frame_equal(df, reread)
@@ -4661,8 +4673,8 @@ def test_read_nokey_table(self):
with pytest.raises(ValueError):
read_hdf(path)
- def test_read_nokey_empty(self):
- with ensure_clean_path(self.path) as path:
+ def test_read_nokey_empty(self, setup_path):
+ with ensure_clean_path(setup_path) as path:
store = HDFStore(path)
store.close()
@@ -4670,7 +4682,7 @@ def test_read_nokey_empty(self):
read_hdf(path)
@td.skip_if_no("pathlib")
- def test_read_from_pathlib_path(self):
+ def test_read_from_pathlib_path(self, setup_path):
# GH11773
from pathlib import Path
@@ -4678,7 +4690,7 @@ def test_read_from_pathlib_path(self):
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
- with ensure_clean_path(self.path) as filename:
+ with ensure_clean_path(setup_path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, "df", mode="a")
@@ -4687,7 +4699,7 @@ def test_read_from_pathlib_path(self):
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
- def test_read_from_py_localpath(self):
+ def test_read_from_py_localpath(self, setup_path):
# GH11773
from py.path import local as LocalPath
@@ -4695,7 +4707,7 @@ def test_read_from_py_localpath(self):
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
- with ensure_clean_path(self.path) as filename:
+ with ensure_clean_path(setup_path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, "df", mode="a")
@@ -4703,11 +4715,11 @@ def test_read_from_py_localpath(self):
tm.assert_frame_equal(expected, actual)
- def test_query_long_float_literal(self):
+ def test_query_long_float_literal(self, setup_path):
# GH 14241
df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
cutoff = 1000000000.0006
@@ -4724,7 +4736,7 @@ def test_query_long_float_literal(self):
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
- def test_query_compare_column_type(self):
+ def test_query_compare_column_type(self, setup_path):
# GH 15492
df = pd.DataFrame(
{
@@ -4736,7 +4748,7 @@ def test_query_compare_column_type(self):
columns=["date", "real_date", "float", "int"],
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
ts = pd.Timestamp("2014-01-01") # noqa
@@ -4773,12 +4785,12 @@ def test_query_compare_column_type(self):
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("format", ["fixed", "table"])
- def test_read_hdf_series_mode_r(self, format):
+ def test_read_hdf_series_mode_r(self, format, setup_path):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
series.to_hdf(path, key="data", format=format)
result = pd.read_hdf(path, key="data", mode="r")
tm.assert_series_equal(result, series)
@@ -4836,26 +4848,26 @@ def test_select_empty_where(self, where):
CategoricalIndex(list("abc")),
],
)
- def test_to_hdf_multiindex_extension_dtype(self, idx):
+ def test_to_hdf_multiindex_extension_dtype(self, idx, setup_path):
# GH 7775
mi = MultiIndex.from_arrays([idx, idx])
df = pd.DataFrame(0, index=mi, columns=["a"])
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(NotImplementedError, match="Saving a MultiIndex"):
df.to_hdf(path, "df")
-class TestHDFComplexValues(Base):
+class TestHDFComplexValues:
# GH10447
- def test_complex_fixed(self):
+ def test_complex_fixed(self, setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
@@ -4865,19 +4877,19 @@ def test_complex_fixed(self):
index=list("abcd"),
columns=list("ABCDE"),
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
- def test_complex_table(self):
+ def test_complex_table(self, setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
@@ -4888,13 +4900,13 @@ def test_complex_table(self):
columns=list("ABCDE"),
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", mode="w")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
@xfail_non_writeable
- def test_complex_mixed_fixed(self):
+ def test_complex_mixed_fixed(self, setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
@@ -4911,12 +4923,12 @@ def test_complex_mixed_fixed(self):
},
index=list("abcd"),
)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
- def test_complex_mixed_table(self):
+ def test_complex_mixed_table(self, setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
@@ -4934,17 +4946,17 @@ def test_complex_mixed_table(self):
index=list("abcd"),
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["A", "B"])
result = store.select("df", where="A>2")
assert_frame_equal(df.loc[df.A > 2], result)
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
assert_frame_equal(df, reread)
- def test_complex_across_dimensions_fixed(self):
+ def test_complex_across_dimensions_fixed(self, setup_path):
with catch_warnings(record=True):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
@@ -4953,12 +4965,12 @@ def test_complex_across_dimensions_fixed(self):
objs = [s, df]
comps = [tm.assert_series_equal, tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="fixed")
reread = read_hdf(path, "obj")
comp(obj, reread)
- def test_complex_across_dimensions(self):
+ def test_complex_across_dimensions(self, setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
@@ -4968,12 +4980,12 @@ def test_complex_across_dimensions(self):
objs = [df]
comps = [tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="table")
reread = read_hdf(path, "obj")
comp(obj, reread)
- def test_complex_indexing_error(self):
+ def test_complex_indexing_error(self, setup_path):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
@@ -4981,36 +4993,37 @@ def test_complex_indexing_error(self):
{"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
index=list("abcd"),
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with pytest.raises(TypeError):
store.append("df", df, data_columns=["C"])
- def test_complex_series_error(self):
+ def test_complex_series_error(self, setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pytest.raises(TypeError):
s.to_hdf(path, "obj", format="t")
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
s.to_hdf(path, "obj", format="t", index=False)
reread = read_hdf(path, "obj")
tm.assert_series_equal(s, reread)
- def test_complex_append(self):
+ def test_complex_append(self, setup_path):
df = DataFrame(
{"a": np.random.randn(100).astype(np.complex128), "b": np.random.randn(100)}
)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["b"])
store.append("df", df)
result = store.select("df")
assert_frame_equal(pd.concat([df, df], 0), result)
-class TestTimezones(Base):
+# @pytest.mark.usefixtures("setup_path")
+class TestTimezones:
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
@@ -5024,7 +5037,7 @@ def _compare_with_tz(self, a, b):
"invalid tz comparison [{a_e}] [{b_e}]".format(a_e=a_e, b_e=b_e)
)
- def test_append_with_timezones_dateutil(self):
+ def test_append_with_timezones_dateutil(self, setup_path):
from datetime import timedelta
@@ -5035,7 +5048,7 @@ def test_append_with_timezones_dateutil(self):
gettz = lambda x: maybe_get_tz("dateutil/" + x)
# as columns
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df_tz")
df = DataFrame(
@@ -5101,7 +5114,7 @@ def test_append_with_timezones_dateutil(self):
store.append("df_tz", df)
# as index
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# GH 4098 example
df = DataFrame(
@@ -5125,12 +5138,12 @@ def test_append_with_timezones_dateutil(self):
result = store.select("df")
assert_frame_equal(result, df)
- def test_append_with_timezones_pytz(self):
+ def test_append_with_timezones_pytz(self, setup_path):
from datetime import timedelta
# as columns
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df_tz")
df = DataFrame(
@@ -5195,7 +5208,7 @@ def test_append_with_timezones_pytz(self):
store.append("df_tz", df)
# as index
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
# GH 4098 example
df = DataFrame(
@@ -5219,7 +5232,7 @@ def test_append_with_timezones_pytz(self):
result = store.select("df")
assert_frame_equal(result, df)
- def test_tseries_select_index_column(self):
+ def test_tseries_select_index_column(self, setup_path):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
@@ -5228,7 +5241,7 @@ def test_tseries_select_index_column(self):
rng = date_range("1/1/2000", "1/30/2000")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == DatetimeIndex(result.values).tz
@@ -5237,7 +5250,7 @@ def test_tseries_select_index_column(self):
rng = date_range("1/1/2000", "1/30/2000", tz="UTC")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == result.dt.tz
@@ -5246,13 +5259,13 @@ def test_tseries_select_index_column(self):
rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == result.dt.tz
- def test_timezones_fixed(self):
- with ensure_clean_store(self.path) as store:
+ def test_timezones_fixed(self, setup_path):
+ with ensure_clean_store(setup_path) as store:
# index
rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
@@ -5277,24 +5290,24 @@ def test_timezones_fixed(self):
result = store["df"]
assert_frame_equal(result, df)
- def test_fixed_offset_tz(self):
+ def test_fixed_offset_tz(self, setup_path):
rng = date_range("1/1/2000 00:00:00-07:00", "1/30/2000 00:00:00-07:00")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
- def test_store_timezone(self):
+ def test_store_timezone(self, setup_path):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
@@ -5303,7 +5316,7 @@ def test_store_timezone(self):
assert_frame_equal(result, df)
# with tz setting
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
with set_timezone("EST5EDT"):
today = datetime.date(2013, 9, 10)
@@ -5315,7 +5328,7 @@ def test_store_timezone(self):
assert_frame_equal(result, df)
- def test_legacy_datetimetz_object(self, datapath):
+ def test_legacy_datetimetz_object(self, datapath, setup_path):
# legacy from < 0.17.0
# 8260
expected = DataFrame(
@@ -5331,9 +5344,9 @@ def test_legacy_datetimetz_object(self, datapath):
result = store["df"]
assert_frame_equal(result, expected)
- def test_dst_transitions(self):
+ def test_dst_transitions(self, setup_path):
# make sure we are not failing on transitions
- with ensure_clean_store(self.path) as store:
+ with ensure_clean_store(setup_path) as store:
times = pd.date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
@@ -5349,7 +5362,7 @@ def test_dst_transitions(self):
result = store.select("df")
assert_frame_equal(result, df)
- def test_read_with_where_tz_aware_index(self):
+ def test_read_with_where_tz_aware_index(self, setup_path):
# GH 11926
periods = 10
dts = pd.date_range("20151201", periods=periods, freq="D", tz="UTC")
@@ -5357,13 +5370,13 @@ def test_read_with_where_tz_aware_index(self):
expected = pd.DataFrame({"MYCOL": 0}, index=mi)
key = "mykey"
- with ensure_clean_path(self.path) as path:
+ with ensure_clean_path(setup_path) as path:
with pd.HDFStore(path) as store:
store.append(key, expected, format="table", append=True)
result = pd.read_hdf(path, key, where="DATE > 20151130")
assert_frame_equal(result, expected)
- def test_py2_created_with_datetimez(self, datapath):
+ def test_py2_created_with_datetimez(self, datapath, setup_path):
# The test HDF5 file was created in Python 2, but could not be read in
# Python 3.
#
| - precursor to #18498
- All tests in test_pytables are passing
@simonjayhawkins as discussed this is the pre work to splitting this large test file into smaller files. So far, I have fixturised the class base class and updated methods using self.path from base to use the setup_path fixture instead.
Next I plan to create a test file for TestTimezones class and make the test functional so the class itself is no longer required.
Once this is running fine, I will then apply similar approach to the other bigger classes. | https://api.github.com/repos/pandas-dev/pandas/pulls/28715 | 2019-10-01T12:50:51Z | 2019-10-01T15:51:05Z | 2019-10-01T15:51:05Z | 2019-10-01T15:54:04Z |
Backport PR #28671 on branch 0.25.x (BUG: restore limit in RangeIndex.get_indexer) | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 76c7ad208865d..cc072c6ee2491 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -49,7 +49,7 @@ Interval
Indexing
^^^^^^^^
--
+- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`).
-
-
-
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 16098c474a473..ae0ec5fcaea22 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -380,8 +380,10 @@ def get_loc(self, key, method=None, tolerance=None):
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(self, target, method=None, limit=None, tolerance=None):
- if not (method is None and tolerance is None and is_list_like(target)):
- return super().get_indexer(target, method=method, tolerance=tolerance)
+ if com._any_not_none(method, tolerance, limit) or not is_list_like(target):
+ return super().get_indexer(
+ target, method=method, tolerance=tolerance, limit=limit
+ )
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 0cb7db0e47123..7b1e7c58f2c06 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -2242,6 +2242,22 @@ def test_reindex_frame_add_nat(self):
assert mask[-5:].all()
assert not mask[:-5].any()
+ def test_reindex_limit(self):
+ # GH 28631
+ data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]]
+ exp_data = [
+ ["A", "A", "A"],
+ ["B", "B", "B"],
+ ["C", "C", "C"],
+ ["D", "D", "D"],
+ ["D", "D", "D"],
+ [np.nan, np.nan, np.nan],
+ ]
+ df = DataFrame(data)
+ result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1)
+ expected = DataFrame(exp_data)
+ tm.assert_frame_equal(result, expected)
+
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype("M8[ns]")
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 58b98297f00f3..7e08a5deaff7a 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -416,6 +416,14 @@ def test_get_indexer_backfill(self):
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
+ def test_get_indexer_limit(self):
+ # GH 28631
+ idx = RangeIndex(4)
+ target = RangeIndex(6)
+ result = idx.get_indexer(target, method="pad", limit=1)
+ expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_join_outer(self):
# join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
| Backport PR #28671: BUG: restore limit in RangeIndex.get_indexer | https://api.github.com/repos/pandas-dev/pandas/pulls/28712 | 2019-10-01T12:18:11Z | 2019-10-01T18:53:15Z | 2019-10-01T18:53:15Z | 2019-10-02T06:52:56Z |
CI Failing: TestReadHtml.test_spam_url #28708 | diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 183d217eb09d6..1045b72f0aa6e 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -135,8 +135,8 @@ def test_banklist_url(self):
@network
def test_spam_url(self):
url = (
- "http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&"
- "lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam"
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/spam.html"
)
df1 = self.read_html(url, ".*Water.*")
df2 = self.read_html(url, "Unit")
| - [x] closes #28708
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] fixed test_spam_url test by changing a url address to the same html page stored in the pandas github repo
| https://api.github.com/repos/pandas-dev/pandas/pulls/28710 | 2019-10-01T12:06:04Z | 2019-10-01T12:49:35Z | 2019-10-01T12:49:34Z | 2019-10-02T06:53:58Z |
DOC: Fixed PR08, PR09 doctring issues in pandas.core.groupby | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 0ab19448043f6..b5aec189700ce 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -709,7 +709,7 @@ def filter(self, func, dropna=True, *args, **kwargs):
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
- if False, groups that evaluate False are filled with NaNs.
+ If False, groups that evaluate False are filled with NaNs.
Returns
-------
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 984954fe14bb5..e93ce3ce93164 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -212,9 +212,9 @@ class providing the base-class of operations.
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
- positional arguments passed into `func`.
+ Positional arguments passed into `func`.
kwargs : dict, optional
- a dictionary of keyword arguments passed into `func`.
+ A dictionary of keyword arguments passed into `func`.
Returns
-------
@@ -664,11 +664,11 @@ def get_group(self, name, obj=None):
Parameters
----------
name : object
- the name of the group to get as a DataFrame
+ The name of the group to get as a DataFrame.
obj : DataFrame, default None
- the DataFrame to take the DataFrame out of. If
+ The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
- be used
+ be used.
Returns
-------
@@ -1114,7 +1114,7 @@ def any(self, skipna=True):
Parameters
----------
skipna : bool, default True
- Flag to ignore nan values during truth testing
+ Flag to ignore nan values during truth testing.
Returns
-------
@@ -1131,7 +1131,7 @@ def all(self, skipna=True):
Parameters
----------
skipna : bool, default True
- Flag to ignore nan values during truth testing
+ Flag to ignore nan values during truth testing.
Returns
-------
@@ -1252,7 +1252,7 @@ def std(self, ddof=1, *args, **kwargs):
Parameters
----------
ddof : int, default 1
- degrees of freedom
+ Degrees of freedom.
Returns
-------
@@ -1275,7 +1275,7 @@ def var(self, ddof=1, *args, **kwargs):
Parameters
----------
ddof : int, default 1
- degrees of freedom
+ Degrees of freedom.
Returns
-------
@@ -1310,7 +1310,7 @@ def sem(self, ddof=1):
Parameters
----------
ddof : int, default 1
- degrees of freedom
+ Degrees of freedom.
Returns
-------
@@ -1622,7 +1622,7 @@ def pad(self, limit=None):
Parameters
----------
limit : int, optional
- limit of how many values to fill
+ Limit of how many values to fill.
Returns
-------
@@ -1648,7 +1648,7 @@ def backfill(self, limit=None):
Parameters
----------
limit : int, optional
- limit of how many values to fill
+ Limit of how many values to fill.
Returns
-------
@@ -1680,10 +1680,10 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra
Parameters
----------
n : int or list of ints
- a single nth value for the row or a list of nth values
+ A single nth value for the row or a list of nth values.
dropna : None or str, optional
- apply the specified dropna operation before counting which row is
- the nth row. Needs to be None, 'any' or 'all'
+ Apply the specified dropna operation before counting which row is
+ the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
@@ -2098,13 +2098,13 @@ def rank(
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : bool, default True
- False for ranks by high (1) to low (N)
+ False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
pct : bool, default False
- Compute percentage rank of data within each group
+ Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
@@ -2312,7 +2312,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
Parameters
----------
periods : int, default 1
- number of periods to shift
+ Number of periods to shift.
freq : frequency string
axis : axis to shift, default 0
fill_value : optional
| - xref #25232.
- 0 tests added
- ✓ passes `black pandas`
- ✓passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28709 | 2019-10-01T11:51:12Z | 2019-10-01T14:55:26Z | 2019-10-01T14:55:26Z | 2019-10-01T14:55:42Z |
DOC: Fixed PR09 docstring errors in pandas.tseries (#27977) | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 4ebb4f353a8fd..81d8869dd7ba0 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -204,7 +204,8 @@ def __add__(date):
normalize : bool, default False
Whether to round the result of a DateOffset addition down to the
previous midnight.
- **kwds : Temporal parameter that add to or replace the offset value.
+ **kwds
+ Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
@@ -1005,12 +1006,12 @@ class CustomBusinessDay(_CustomMixin, BusinessDay):
----------
n : int, default 1
normalize : bool, default False
- Normalize start/end dates to midnight before generating date range
+ Normalize start/end dates to midnight before generating date range.
weekmask : str, Default 'Mon Tue Wed Thu Fri'
- Weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ Weekmask of valid business days, passed to ``numpy.busdaycalendar``.
holidays : list
List/array of dates to exclude from the set of valid business days,
- passed to ``numpy.busdaycalendar``
+ passed to ``numpy.busdaycalendar``.
calendar : pd.HolidayCalendar or np.busdaycalendar
offset : timedelta, default timedelta(0)
"""
@@ -1519,7 +1520,7 @@ class Week(DateOffset):
Parameters
----------
weekday : int, default None
- Always generate specific day of week. 0 for Monday
+ Always generate specific day of week. 0 for Monday.
"""
_adjust_dst = True
@@ -2085,7 +2086,9 @@ class FY5253(DateOffset):
The month in which the fiscal year ends.
variation : str, default "nearest"
- Method of employing 4-4-5 calendar. There are two options:
+ Method of employing 4-4-5 calendar.
+
+ There are two options:
- "nearest" means year end is **weekday** closest to last day of month in year.
- "last" means year end is final **weekday** of the final month in fiscal year.
@@ -2304,7 +2307,9 @@ class FY5253Quarter(DateOffset):
The quarter number that has the leap or 14 week when needed.
variation : str, default "nearest"
- Method of employing 4-4-5 calendar. There are two options:
+ Method of employing 4-4-5 calendar.
+
+ There are two options:
- "nearest" means year end is **weekday** closest to last day of month in year.
- "last" means year end is final **weekday** of the final month in fiscal year.
| - [ ] xref #27977
- [ ] tests added / passed
- [ ] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] fixed minor issues in the couple of docstrings (ErrorCode: PR09) in pandas.tseries.offsets.py, namely added dots at the end of parameters {param_name} descriptions. Some of the ErrorCodes: PR09 still appear when running the validation script. This is due the script not covering some of the special cases (such as extra info in the bullet points format in the descriptions of parameters {param_name}), see #25786 and #25461.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28707 | 2019-10-01T10:25:35Z | 2019-10-01T12:12:05Z | 2019-10-01T12:12:05Z | 2019-10-01T13:27:06Z |
read_hdf closes HDF5 stores that it didn't open. | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index fd1c1271a5e37..cde2a4279cf27 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -311,6 +311,7 @@ I/O
- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`)
- Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`)
- Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`)
+- Bug in :func:`read_hdf` closing stores that it didn't open when Exceptions are raised (:issue:`28699`)
- Bug in :meth:`DataFrame.read_json` where using ``orient="index"`` would not maintain the order (:issue:`28557`)
- Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 55ccd838f8a16..0db5b1b4eecfa 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -396,11 +396,12 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs):
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except (ValueError, TypeError, KeyError):
- # if there is an error, close the store
- try:
- store.close()
- except AttributeError:
- pass
+ if not isinstance(path_or_buf, HDFStore):
+ # if there is an error, close the store if we opened it.
+ try:
+ store.close()
+ except AttributeError:
+ pass
raise
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 140ee5082f55d..956438f1afdf4 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -1195,8 +1195,22 @@ def test_read_missing_key_close_store(self, setup_path):
# read with KeyError before another write
df.to_hdf(path, "k2")
- def test_append_frame_column_oriented(self, setup_path):
+ def test_read_missing_key_opened_store(self, setup_path):
+ # GH 28699
+ with ensure_clean_path(setup_path) as path:
+ df = pd.DataFrame({"a": range(2), "b": range(2)})
+ df.to_hdf(path, "k1")
+
+ store = pd.HDFStore(path, "r")
+ with pytest.raises(KeyError, match="'No object named k2 in the file'"):
+ pd.read_hdf(store, "k2")
+
+ # Test that the file is still open after a KeyError and that we can
+ # still read from it.
+ pd.read_hdf(store, "k1")
+
+ def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
| - [ ] closes #28699
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This commit fixes #28699. I added a test for the bug. | https://api.github.com/repos/pandas-dev/pandas/pulls/28700 | 2019-10-01T00:49:39Z | 2019-10-10T02:05:01Z | 2019-10-10T02:05:01Z | 2019-10-10T02:05:06Z |
TST: Add test for categorical with str and tuples | diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 704f9c94463e6..237ec17f56974 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas.compat.numpy import _np_version_under1p16
+
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -601,3 +603,10 @@ def test_constructor_imaginary(self):
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
+
+ @pytest.mark.skipif(_np_version_under1p16, reason="Skipping for NumPy <1.16")
+ def test_constructor_string_and_tuples(self):
+ # GH 21416
+ c = pd.Categorical(["c", ("a", "b"), ("b", "a"), "c"])
+ expected_index = pd.Index([("a", "b"), ("b", "a"), "c"])
+ assert c.categories.equals(expected_index)
| The bug is not present any more.
- [x] closes #21416
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- ~[ ] whatsnew entry~
| https://api.github.com/repos/pandas-dev/pandas/pulls/28693 | 2019-09-30T20:10:08Z | 2019-10-06T22:02:43Z | 2019-10-06T22:02:43Z | 2019-10-07T15:58:05Z |
To html encoding add | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 1112e42489342..6da8478a83630 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -109,6 +109,7 @@ Other enhancements
(:issue:`28368`)
- :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`)
- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`)
+- Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`)
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5200ad0ba0d23..435cd2e0a4cf5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2207,6 +2207,7 @@ def to_html(
border=None,
table_id=None,
render_links=False,
+ encoding=None,
):
"""
Render a DataFrame as an HTML table.
@@ -2222,6 +2223,10 @@ def to_html(
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
+ encoding : str, default "utf-8"
+ Set character encoding
+
+ .. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
@@ -2263,7 +2268,11 @@ def to_html(
)
# TODO: a generic formatter wld b in DataFrameFormatter
return formatter.to_html(
- buf=buf, classes=classes, notebook=notebook, border=border
+ buf=buf,
+ classes=classes,
+ notebook=notebook,
+ border=border,
+ encoding=encoding,
)
# ----------------------------------------------------------------------
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ad62c56a337b6..b8c40e3f62221 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -942,6 +942,7 @@ def _format_col(self, i: int) -> List[str]:
def to_html(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
classes: Optional[Union[str, List, Tuple]] = None,
notebook: bool = False,
border: Optional[int] = None,
@@ -963,7 +964,9 @@ def to_html(
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
- return Klass(self, classes=classes, border=border).get_result(buf=buf)
+ return Klass(self, classes=classes, border=border).get_result(
+ buf=buf, encoding=encoding
+ )
def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
from pandas.core.index import _sparsify
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index ef19319e208d9..6c4a226b7ebd2 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -99,6 +99,14 @@ def test_to_html_unicode(df, expected, datapath):
assert result == expected
+def test_to_html_encoding(float_frame, tmp_path):
+ # GH 28663
+ path = tmp_path / "test.html"
+ float_frame.to_html(path, encoding="gbk")
+ with open(str(path), "r", encoding="gbk") as f:
+ assert float_frame.to_html() == f.read()
+
+
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({"A": [6.0, 3.1, 2.2]})
| - [x ] closes https://github.com/pandas-dev/pandas/issues/28663
- [x ] tests added / passed
- [x ] passes `black pandas`
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] whatsnew entry
added encoding argument to DataFrame.to_html()
| https://api.github.com/repos/pandas-dev/pandas/pulls/28692 | 2019-09-30T19:35:35Z | 2019-10-16T12:29:24Z | 2019-10-16T12:29:24Z | 2019-10-16T12:29:27Z |
DEPR: DeprecationWarning -> FutureWarning for back-compat in pytables | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index f649c922257a8..b4854b6e9d2e5 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -398,6 +398,7 @@ Deprecations
- ``pd.tseries.frequencies.get_offset_name`` function is deprecated. Use offset's ``.freqstr`` property as alternative (:issue:`11192`)
- ``pandas.stats.fama_macbeth`` routines are deprecated and will be removed in a future version (:issue:`6077`)
- ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var`` routines are deprecated and will be removed in a future version (:issue:`6077`)
+- show a ``FutureWarning`` rather than a ``DeprecationWarning`` on using long-time deprecated syntax in ``HDFStore.select``, where ``where`` clause is not a string-like (:issue:`12027`)
.. _whatsnew_0180.prior_deprecations:
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 1bc5b8b723657..58359a815ed26 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -526,7 +526,7 @@ def parse_back_compat(self, w, op=None, value=None):
"where must be passed as a string if op/value are passed")
warnings.warn("passing a dict to Expr is deprecated, "
"pass the where as a single string",
- DeprecationWarning)
+ FutureWarning, stacklevel=10)
if isinstance(w, tuple):
if len(w) == 2:
w, value = w
@@ -535,7 +535,7 @@ def parse_back_compat(self, w, op=None, value=None):
w, op, value = w
warnings.warn("passing a tuple into Expr is deprecated, "
"pass the where as a single string",
- DeprecationWarning, stacklevel=10)
+ FutureWarning, stacklevel=10)
if op is not None:
if not isinstance(w, string_types):
@@ -564,7 +564,7 @@ def convert(v):
warnings.warn("passing multiple values to Expr is deprecated, "
"pass the where as a single string",
- DeprecationWarning)
+ FutureWarning, stacklevel=10)
return w
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index bb1f4a99a2c26..c13afb34dfb84 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -30,7 +30,8 @@
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
- assert_series_equal)
+ assert_series_equal,
+ assert_produces_warning)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
@@ -2329,13 +2330,12 @@ def test_terms(self):
assert_panel4d_equal(result, expected)
# back compat invalid terms
- terms = [
- dict(field='major_axis', op='>', value='20121114'),
- [ dict(field='major_axis', op='>', value='20121114') ],
- [ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
- ]
+ terms = [dict(field='major_axis', op='>', value='20121114'),
+ [dict(field='major_axis', op='>', value='20121114')],
+ ["minor_axis=['A','B']",
+ dict(field='major_axis', op='>', value='20121114')]]
for t in terms:
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ with tm.assert_produces_warning(expected_warning=FutureWarning,
check_stacklevel=False):
Term(t)
@@ -2428,12 +2428,14 @@ def test_backwards_compat_without_term_object(self):
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp',wp)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
- check_stacklevel=not compat.PY3):
+ store.append('wp', wp)
+ with assert_produces_warning(expected_warning=FutureWarning,
+ check_stacklevel=False):
result = store.select('wp', [('major_axis>20000102'),
- ('minor_axis', '=', ['A','B']) ])
- expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
+ ('minor_axis', '=', ['A', 'B'])])
+ expected = wp.loc[:,
+ wp.major_axis > Timestamp('20000102'),
+ ['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
@@ -2446,29 +2448,40 @@ def test_backwards_compat_without_term_object(self):
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp',wp)
+ store.append('wp', wp)
# stringified datetimes
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
- check_stacklevel=not compat.PY3):
- result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
- expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ with assert_produces_warning(expected_warning=FutureWarning,
+ check_stacklevel=False):
+ result = store.select('wp',
+ [('major_axis',
+ '>',
+ datetime.datetime(2000, 1, 2))])
+ expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
- check_stacklevel=not compat.PY3):
- result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
- expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ with assert_produces_warning(expected_warning=FutureWarning,
+ check_stacklevel=False):
+ result = store.select('wp',
+ [('major_axis',
+ '>',
+ datetime.datetime(2000, 1, 2, 0, 0))])
+ expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
- check_stacklevel=not compat.PY3):
- result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
- datetime.datetime(2000,1,3,0,0)])])
- expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
+ with assert_produces_warning(expected_warning=FutureWarning,
+ check_stacklevel=False):
+ result = store.select('wp',
+ [('major_axis',
+ '=',
+ [datetime.datetime(2000, 1, 2, 0, 0),
+ datetime.datetime(2000, 1, 3, 0, 0)])]
+ )
+ expected = wp.loc[:, [Timestamp('20000102'),
+ Timestamp('20000103')]]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning,
- check_stacklevel=not compat.PY3):
- result = store.select('wp', [('minor_axis','=',['A','B'])])
- expected = wp.loc[:,:,['A','B']]
+ with assert_produces_warning(expected_warning=FutureWarning,
+ check_stacklevel=False):
+ result = store.select('wp', [('minor_axis', '=', ['A', 'B'])])
+ expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
| already long deprecated, changing to `FutureWarning`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/12027 | 2016-01-12T21:55:00Z | 2016-01-12T23:19:37Z | 2016-01-12T23:19:37Z | 2016-01-12T23:19:37Z |
BUG: remove millisecond field that raises valueerror | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 9ebed15c05246..19181fa86f7ef 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -503,3 +503,6 @@ Bug Fixes
- Bug in ``.to_csv`` ignoring formatting parameters ``decimal``, ``na_rep``, ``float_format`` for float indexes (:issue:`11553`)
- Bug in ``DataFrame`` when masking an empty ``DataFrame`` (:issue:`11859`)
+
+- Removed ``millisecond`` property of ``DatetimeIndex``. This would always raise
+ a ``ValueError`` (:issue:`12019`).
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index a240b8c1c1e78..28db57a351ab7 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -4028,6 +4028,12 @@ def test_fillna_period(self):
with tm.assertRaisesRegexp(ValueError, 'Input has different freq=D from PeriodIndex\\(freq=H\\)'):
idx.fillna(pd.Period('2011-01-01', freq='D'))
+ def test_no_millisecond_field(self):
+ with self.assertRaises(AttributeError):
+ DatetimeIndex.millisecond
+
+ with self.assertRaises(AttributeError):
+ DatetimeIndex([]).millisecond
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 3e1ba006df5b7..166fcf759c7e1 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1469,7 +1469,6 @@ def _set_freq(self, value):
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
- millisecond = _field_accessor('millisecond', 'ms', "The milliseconds of the datetime")
microsecond = _field_accessor('microsecond', 'us', "The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns', "The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy', "The week ordinal of the year")
| This would appear in the `dir` and cause tab completion to a field that would raise a `ValueError`. When I brought this up in gitter it seemed like it was safe to remove it.
I couldn't find any tests that hit this descriptor.
| https://api.github.com/repos/pandas-dev/pandas/pulls/12019 | 2016-01-11T22:41:13Z | 2016-01-12T14:42:07Z | 2016-01-12T14:42:07Z | 2016-01-12T14:42:11Z |
BUG: GH11880 where __contains__ fails in unpacked DataFrame with object cols | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 4ce2ce5b69cb4..3496e9eea834c 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -463,6 +463,7 @@ Bug Fixes
- Bug in ``pd.read_clipboard`` and ``pd.to_clipboard`` functions not supporting Unicode; upgrade included ``pyperclip`` to v1.5.15 (:issue:`9263`)
- Bug in ``DataFrame.query`` containing an assignment (:issue:`8664`)
+- Bug in ``from_msgpack`` where ``__contains__()`` fails for columns of the unpacked ``DataFrame``, if the ``DataFrame`` has object columns. (:issue: `11880`)
- Bug in timezone info lost when broadcasting scalar datetime to ``DataFrame`` (:issue:`11682`)
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 1e5816e898baa..ce8fda9e932bc 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -965,6 +965,7 @@ def corr(self, other=None, pairwise=None, **kwargs):
Use a standard estimation bias correction
"""
+
class EWM(_Rolling):
r"""
Provides exponential weighted functions
diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx
index 58e9d64921e0d..a5fcbd3f2d0f1 100644
--- a/pandas/hashtable.pyx
+++ b/pandas/hashtable.pyx
@@ -342,7 +342,7 @@ cdef class Int64HashTable(HashTable):
self.table.vals[k] = <Py_ssize_t> values[i]
@cython.boundscheck(False)
- def map_locations(self, int64_t[:] values):
+ def map_locations(self, ndarray[int64_t, ndim=1] values):
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -570,7 +570,7 @@ cdef class Float64HashTable(HashTable):
return np.asarray(labels)
@cython.boundscheck(False)
- def map_locations(self, float64_t[:] values):
+ def map_locations(self, ndarray[float64_t, ndim=1] values):
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index d6a9feb1bd8f4..61b24c858b60d 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -9,8 +9,8 @@
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
- date_range, period_range, Index, SparseSeries, SparseDataFrame,
- SparsePanel)
+ date_range, period_range, Index)
+from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean, assert_index_equal,
assert_series_equal,
@@ -23,7 +23,19 @@
nan = np.nan
-from pandas.io.packers import to_msgpack, read_msgpack
+try:
+ import blosc # NOQA
+except ImportError:
+ _BLOSC_INSTALLED = False
+else:
+ _BLOSC_INSTALLED = True
+
+try:
+ import zlib # NOQA
+except ImportError:
+ _ZLIB_INSTALLED = False
+else:
+ _ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
@@ -483,6 +495,14 @@ class TestCompression(TestPackers):
"""
def setUp(self):
+ try:
+ from sqlalchemy import create_engine
+ self._create_sql_engine = create_engine
+ except ImportError:
+ self._SQLALCHEMY_INSTALLED = False
+ else:
+ self._SQLALCHEMY_INSTALLED = True
+
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
@@ -508,14 +528,56 @@ def test_compression_zlib(self):
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_blosc(self):
- try:
- import blosc
- except ImportError:
+ if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
i_rec = self.encode_decode(self.frame, compress='blosc')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
+ def test_readonly_axis_blosc(self):
+ # GH11880
+ if not _BLOSC_INSTALLED:
+ raise nose.SkipTest('no blosc')
+ df1 = DataFrame({'A': list('abcd')})
+ df2 = DataFrame(df1, index=[1., 2., 3., 4.])
+ self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
+ self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
+
+ def test_readonly_axis_zlib(self):
+ # GH11880
+ df1 = DataFrame({'A': list('abcd')})
+ df2 = DataFrame(df1, index=[1., 2., 3., 4.])
+ self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
+ self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
+
+ def test_readonly_axis_blosc_to_sql(self):
+ # GH11880
+ if not _BLOSC_INSTALLED:
+ raise nose.SkipTest('no blosc')
+ if not self._SQLALCHEMY_INSTALLED:
+ raise nose.SkipTest('no sqlalchemy')
+ expected = DataFrame({'A': list('abcd')})
+ df = self.encode_decode(expected, compress='blosc')
+ eng = self._create_sql_engine("sqlite:///:memory:")
+ df.to_sql('test', eng, if_exists='append')
+ result = pandas.read_sql_table('test', eng, index_col='index')
+ result.index.names = [None]
+ assert_frame_equal(expected, result)
+
+ def test_readonly_axis_zlib_to_sql(self):
+ # GH11880
+ if not _ZLIB_INSTALLED:
+ raise nose.SkipTest('no zlib')
+ if not self._SQLALCHEMY_INSTALLED:
+ raise nose.SkipTest('no sqlalchemy')
+ expected = DataFrame({'A': list('abcd')})
+ df = self.encode_decode(expected, compress='zlib')
+ eng = self._create_sql_engine("sqlite:///:memory:")
+ df.to_sql('test', eng, if_exists='append')
+ result = pandas.read_sql_table('test', eng, index_col='index')
+ result.index.names = [None]
+ assert_frame_equal(expected, result)
+
class TestEncoding(TestPackers):
def setUp(self):
| closes #11880
| https://api.github.com/repos/pandas-dev/pandas/pulls/12013 | 2016-01-10T23:25:59Z | 2016-01-15T13:46:42Z | 2016-01-15T13:46:42Z | 2016-01-15T13:47:24Z |
DOC: fix escape in EWM docstring | diff --git a/pandas/core/window.py b/pandas/core/window.py
index 4bbdf444ac2a7..1e5816e898baa 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -966,7 +966,7 @@ def corr(self, other=None, pairwise=None, **kwargs):
"""
class EWM(_Rolling):
- """
+ r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
| Because of the `\a` in the `\alpha` otherwise being interpreted
| https://api.github.com/repos/pandas-dev/pandas/pulls/12001 | 2016-01-08T22:46:06Z | 2016-01-09T15:15:38Z | 2016-01-09T15:15:38Z | 2016-01-09T15:15:38Z |
BUG: accept unicode in Timedelta constructor, #11995 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 4fd5edb21ace8..9ebed15c05246 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -438,7 +438,7 @@ Bug Fixes
- Regression in ``.clip`` with tz-aware datetimes (:issue:`11838`)
- Bug in ``date_range`` when the boundaries fell on the frequency (:issue:`11804`)
- Bug in consistency of passing nested dicts to ``.groupby(...).agg(...)`` (:issue:`9052`)
-
+- Accept unicode in ``Timedelta`` constructor (:issue:`11995`)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 21f0c44789f9b..3dec7414fa7d4 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -161,9 +161,22 @@ def test_construction(self):
self.assertTrue(isnull(Timedelta('nat')))
# offset
- self.assertEqual(to_timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
- self.assertEqual(Timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
- self.assertEqual(Timedelta(pd.offsets.Second(2)),Timedelta('0 days, 00:00:02'))
+ self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
+ Timedelta('0 days, 02:00:00'))
+ self.assertEqual(Timedelta(pd.offsets.Hour(2)),
+ Timedelta('0 days, 02:00:00'))
+ self.assertEqual(Timedelta(pd.offsets.Second(2)),
+ Timedelta('0 days, 00:00:02'))
+
+ # unicode
+ # GH 11995
+ expected = Timedelta('1H')
+ result = pd.Timedelta(u'1H')
+ self.assertEqual(result, expected)
+ self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
+ Timedelta(u'0 days, 02:00:00'))
+
+ self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_round(self):
@@ -171,15 +184,41 @@ def test_round(self):
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
- ('U', Timedelta('1 days 02:34:56.789123000'),Timedelta('-1 days 02:34:56.789123000')),
- ('L', Timedelta('1 days 02:34:56.789000000'),Timedelta('-1 days 02:34:56.789000000')),
- ('S', Timedelta('1 days 02:34:56'),Timedelta('-1 days 02:34:56')),
- ('2S', Timedelta('1 days 02:34:56'),Timedelta('-1 days 02:34:56')),
- ('5S', Timedelta('1 days 02:34:55'),Timedelta('-1 days 02:34:55')),
- ('T', Timedelta('1 days 02:34:00'),Timedelta('-1 days 02:34:00')),
- ('12T', Timedelta('1 days 02:24:00'),Timedelta('-1 days 02:24:00')),
- ('H', Timedelta('1 days 02:00:00'),Timedelta('-1 days 02:00:00')),
- ('d', Timedelta('1 days'),Timedelta('-1 days'))]:
+ ('U',
+ Timedelta('1 days 02:34:56.789123000'),
+ Timedelta('-1 days 02:34:56.789123000')
+ ),
+ ('L',
+ Timedelta('1 days 02:34:56.789000000'),
+ Timedelta('-1 days 02:34:56.789000000')
+ ),
+ ('S',
+ Timedelta('1 days 02:34:56'),
+ Timedelta('-1 days 02:34:56')
+ ),
+ ('2S',
+ Timedelta('1 days 02:34:56'),
+ Timedelta('-1 days 02:34:56')
+ ),
+ ('5S',
+ Timedelta('1 days 02:34:55'),
+ Timedelta('-1 days 02:34:55')
+ ),
+ ('T',
+ Timedelta('1 days 02:34:00'),
+ Timedelta('-1 days 02:34:00')
+ ),
+ ('12T',
+ Timedelta('1 days 02:24:00'),
+ Timedelta('-1 days 02:24:00')),
+ ('H',
+ Timedelta('1 days 02:00:00'),
+ Timedelta('-1 days 02:00:00')
+ ),
+ ('d',
+ Timedelta('1 days'),
+ Timedelta('-1 days')
+ )]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
@@ -1104,20 +1143,32 @@ def test_components(self):
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
- expected = TimedeltaIndex(['1 days','1 days 00:00:05',
- '2 days','2 days 00:00:02','0 days 00:00:03'])
- result = TimedeltaIndex(['1 days','1 days, 00:00:05',
- np.timedelta64(2,'D'),
- timedelta(days=2,seconds=2),
+ expected = TimedeltaIndex(['1 days', '1 days 00:00:05',
+ '2 days', '2 days 00:00:02',
+ '0 days 00:00:03'])
+ result = TimedeltaIndex(['1 days', '1 days, 00:00:05',
+ np.timedelta64(2, 'D'),
+ timedelta(days=2, seconds=2),
+ pd.offsets.Second(3)])
+ tm.assert_index_equal(result, expected)
+
+ # unicode
+ result = TimedeltaIndex([u'1 days', '1 days, 00:00:05',
+ np.timedelta64(2, 'D'),
+ timedelta(days=2, seconds=2),
pd.offsets.Second(3)])
- tm.assert_index_equal(result,expected)
- expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02'])
+ expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01',
+ '0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
- expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05', '0 days 00:00:09'])
+ expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05',
+ '0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
- expected = TimedeltaIndex(['0 days 00:00:00.400', '0 days 00:00:00.450', '0 days 00:00:01.200'])
- tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'), expected)
+ expected = TimedeltaIndex(['0 days 00:00:00.400',
+ '0 days 00:00:00.450',
+ '0 days 00:00:01.200'])
+ tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'),
+ expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 45dc42f5ce302..49ffdcbf29414 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -17,8 +17,9 @@ from cpython cimport (
PyLong_Check,
PyObject_RichCompareBool,
PyObject_RichCompare,
- PyString_Check,
- Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE
+ Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE,
+ PyUnicode_Check,
+ PyUnicode_AsUTF8String,
)
# Cython < 0.17 doesn't have this in cpython
@@ -31,10 +32,9 @@ cdef extern from "datetime_helper.h":
# this is our datetime.pxd
from datetime cimport cmp_pandas_datetimestruct
-from util cimport is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object
-
from libc.stdlib cimport free
+from util cimport is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object
cimport util
from datetime cimport *
@@ -2769,7 +2769,7 @@ cdef inline parse_timedelta_string(object ts, coerce=False):
"""
cdef:
- str c
+ unicode c
bint neg=0, have_dot=0, have_value=0, have_hhmmss=0
object current_unit=None
int64_t result=0, m=0, r
@@ -2783,6 +2783,10 @@ cdef inline parse_timedelta_string(object ts, coerce=False):
if ts in _nat_strings or not len(ts):
return NPY_NAT
+ # decode ts if necessary
+ if not PyUnicode_Check(ts) and not PY3:
+ ts = str(ts).decode('utf-8')
+
for c in ts:
# skip whitespace / commas
| closes #11995
| https://api.github.com/repos/pandas-dev/pandas/pulls/11997 | 2016-01-08T14:32:32Z | 2016-01-08T18:20:15Z | 2016-01-08T18:20:15Z | 2016-01-08T18:28:57Z |
COMPAT: drop suppport for python 2.6, #7718 | diff --git a/.travis.yml b/.travis.yml
index 6252b5654890f..087d7f1565707 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -19,14 +19,15 @@ git:
matrix:
fast_finish: true
include:
- - python: 2.6
+ - python: 2.7
env:
- - JOB_NAME: "26_nslow_nnet"
+ - JOB_NAME: "27_nslow_nnet_COMPAT"
- NOSE_ARGS="not slow and not network and not disabled"
- CLIPBOARD=xclip
- LOCALE_OVERRIDE="it_IT.UTF-8"
- BUILD_TYPE=conda
- INSTALL_TEST=true
+ - JOB_TAG=_COMPAT
- python: 2.7
env:
- JOB_NAME: "27_slow_nnet_LOCALE"
diff --git a/ci/requirements-2.6.build b/ci/requirements-2.7_COMPAT.build
similarity index 100%
rename from ci/requirements-2.6.build
rename to ci/requirements-2.7_COMPAT.build
diff --git a/ci/requirements-2.6.pip b/ci/requirements-2.7_COMPAT.pip
similarity index 100%
rename from ci/requirements-2.6.pip
rename to ci/requirements-2.7_COMPAT.pip
diff --git a/ci/requirements-2.6.run b/ci/requirements-2.7_COMPAT.run
similarity index 100%
rename from ci/requirements-2.6.run
rename to ci/requirements-2.7_COMPAT.run
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 0f2f7e6f83d78..049fc75184c96 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.6, 2.7, 3.4, and 3.5
+Officially Python 2.7, 3.4, and 3.5
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index eaeabbf3f9245..83bd22db53ce0 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -7,6 +7,10 @@ This is a major release from 0.17.1 and includes a small number of API changes,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
+.. warning::
+
+ pandas >= 0.18.0 will no longer support compatibility with Python version 2.6 (:issue:`7718`)
+
.. warning::
pandas >= 0.18.0 will no longer support compatibility with Python version 3.3 (:issue:`11273`)
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 639da4176cd61..2da4427af4cb6 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -20,10 +20,6 @@
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
-Python 2.6 compatibility:
-* OrderedDict
-* Counter
-
Other items:
* OrderedDefaultDict
* platform checker
@@ -268,480 +264,7 @@ def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
-
-# ----------------------------------------------------------------------------
-# Python 2.6 compatibility shims
-#
-
-# OrderedDict Shim from Raymond Hettinger, python core dev
-# http://code.activestate.com/recipes/576693-ordered-dictionary-for-py24/
-# here to support versions before 2.6
-if not PY3:
- # don't need this except in 2.6
- try:
- from thread import get_ident as _get_ident
- except ImportError:
- from dummy_thread import get_ident as _get_ident
-
-try:
- from _abcoll import KeysView, ValuesView, ItemsView
-except ImportError:
- pass
-
-
-class _OrderedDict(dict):
-
- """Dictionary that remembers insertion order"""
- # An inherited dict maps keys to values.
- # The inherited dict provides __getitem__, __len__, __contains__, and get.
- # The remaining methods are order-aware.
- # Big-O running times for all methods are the same as for regular
- # dictionaries.
-
- # The internal self.__map dictionary maps keys to links in a doubly linked
- # list. The circular doubly linked list starts and ends with a sentinel
- # element. The sentinel element never gets deleted (this simplifies the
- # algorithm). Each link is stored as a list of length three: [PREV, NEXT,
- # KEY].
-
- def __init__(self, *args, **kwds):
- """Initialize an ordered dictionary. Signature is the same as for
- regular dictionaries, but keyword arguments are not recommended
- because their insertion order is arbitrary.
- """
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__root
- except AttributeError:
- self.__root = root = [] # sentinel node
- root[:] = [root, root, None]
- self.__map = {}
- self.__update(*args, **kwds)
-
- def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
- """od.__setitem__(i, y) <==> od[i]=y"""
- # Setting a new item creates a new link which goes at the end of the
- # linked list, and the inherited dictionary is updated with the new
- # key/value pair.
- if key not in self:
- root = self.__root
- last = root[0]
- last[1] = root[0] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
-
- def __delitem__(self, key, dict_delitem=dict.__delitem__):
- """od.__delitem__(y) <==> del od[y]"""
- # Deleting an existing item uses self.__map to find the link which is
- # then removed by updating the links in the predecessor and successor
- # nodes.
- dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[1] = link_next
- link_next[0] = link_prev
-
- def __iter__(self):
- """od.__iter__() <==> iter(od)"""
- root = self.__root
- curr = root[1]
- while curr is not root:
- yield curr[2]
- curr = curr[1]
-
- def __reversed__(self):
- """od.__reversed__() <==> reversed(od)"""
- root = self.__root
- curr = root[0]
- while curr is not root:
- yield curr[2]
- curr = curr[0]
-
- def clear(self):
- """od.clear() -> None. Remove all items from od."""
- try:
- for node in itervalues(self.__map):
- del node[:]
- root = self.__root
- root[:] = [root, root, None]
- self.__map.clear()
- except AttributeError:
- pass
- dict.clear(self)
-
- def popitem(self, last=True):
- """od.popitem() -> (k, v), return and remove a (key, value) pair.
-
- Pairs are returned in LIFO order if last is true or FIFO order if
- false.
- """
- if not self:
- raise KeyError('dictionary is empty')
- root = self.__root
- if last:
- link = root[0]
- link_prev = link[0]
- link_prev[1] = root
- root[0] = link_prev
- else:
- link = root[1]
- link_next = link[1]
- root[1] = link_next
- link_next[0] = root
- key = link[2]
- del self.__map[key]
- value = dict.pop(self, key)
- return key, value
-
- # -- the following methods do not depend on the internal structure --
-
- def keys(self):
- """od.keys() -> list of keys in od"""
- return list(self)
-
- def values(self):
- """od.values() -> list of values in od"""
- return [self[key] for key in self]
-
- def items(self):
- """od.items() -> list of (key, value) pairs in od"""
- return [(key, self[key]) for key in self]
-
- def iterkeys(self):
- """od.iterkeys() -> an iterator over the keys in od"""
- return iter(self)
-
- def itervalues(self):
- """od.itervalues -> an iterator over the values in od"""
- for k in self:
- yield self[k]
-
- def iteritems(self):
- """od.iteritems -> an iterator over the (key, value) items in od"""
- for k in self:
- yield (k, self[k])
-
- def update(*args, **kwds):
- """od.update(E, **F) -> None. Update od from dict/iterable E and F.
-
- If E is a dict instance, does: for k in E: od[k] = E[k]
- If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
- Or if E is an iterable of items, does:for k, v in E: od[k] = v
- In either case, this is followed by: for k, v in F.items(): od[k] = v
- """
- if len(args) > 2:
- raise TypeError('update() takes at most 2 positional '
- 'arguments (%d given)' % (len(args),))
- elif not args:
- raise TypeError('update() takes at least 1 argument (0 given)')
- self = args[0]
- # Make progressively weaker assumptions about "other"
- other = ()
- if len(args) == 2:
- other = args[1]
- if isinstance(other, dict):
- for key in other:
- self[key] = other[key]
- elif hasattr(other, 'keys'):
- for key in other.keys():
- self[key] = other[key]
- else:
- for key, value in other:
- self[key] = value
- for key, value in kwds.items():
- self[key] = value
- # let subclasses override update without breaking __init__
- __update = update
-
- __marker = object()
-
- def pop(self, key, default=__marker):
- """od.pop(k[,d]) -> v, remove specified key and return the
- corresponding value. If key is not found, d is returned if given,
- otherwise KeyError is raised.
- """
- if key in self:
- result = self[key]
- del self[key]
- return result
- if default is self.__marker:
- raise KeyError(key)
- return default
-
- def setdefault(self, key, default=None):
- """od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od
- """
- if key in self:
- return self[key]
- self[key] = default
- return default
-
- def __repr__(self, _repr_running=None):
- """od.__repr__() <==> repr(od)"""
- if _repr_running is None:
- _repr_running = {}
- call_key = id(self), _get_ident()
- if call_key in _repr_running:
- return '...'
- _repr_running[call_key] = 1
- try:
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, list(self.items()))
- finally:
- del _repr_running[call_key]
-
- def __reduce__(self):
- """Return state information for pickling"""
- items = [[k, self[k]] for k in self]
- inst_dict = vars(self).copy()
- for k in vars(OrderedDict()):
- inst_dict.pop(k, None)
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def copy(self):
- """od.copy() -> a shallow copy of od"""
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- """OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and
- values equal to v (which defaults to None).
- """
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- """od.__eq__(y) <==> od==y. Comparison to another OD is
- order-sensitive while comparison to a regular mapping is
- order-insensitive.
- """
- if isinstance(other, OrderedDict):
- return (len(self) == len(other) and
- list(self.items()) == list(other.items()))
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
-
- # -- the following methods are only used in Python 2.7 --
-
- def viewkeys(self):
- """od.viewkeys() -> a set-like object providing a view on od's keys"""
- return KeysView(self)
-
- def viewvalues(self):
- """od.viewvalues() -> an object providing a view on od's values"""
- return ValuesView(self)
-
- def viewitems(self):
- """od.viewitems() -> a set-like object providing a view on od's items
- """
- return ItemsView(self)
-
-
-# {{{ http://code.activestate.com/recipes/576611/ (r11)
-
-try:
- from operator import itemgetter
- from heapq import nlargest
-except ImportError:
- pass
-
-
-class _Counter(dict):
-
- """Dict subclass for counting hashable objects. Sometimes called a bag
- or multiset. Elements are stored as dictionary keys and their counts
- are stored as dictionary values.
-
- >>> Counter('zyzygy')
- Counter({'y': 3, 'z': 2, 'g': 1})
-
- """
-
- def __init__(self, iterable=None, **kwds):
- """Create a new, empty Counter object. And if given, count elements
- from an input iterable. Or, initialize the count from another mapping
- of elements to their counts.
-
- >>> c = Counter() # a new, empty counter
- >>> c = Counter('gallahad') # a new counter from an iterable
- >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
- >>> c = Counter(a=4, b=2) # a new counter from keyword args
-
- """
- self.update(iterable, **kwds)
-
- def __missing__(self, key):
- return 0
-
- def most_common(self, n=None):
- """List the n most common elements and their counts from the most
- common to the least. If n is None, then list all element counts.
-
- >>> Counter('abracadabra').most_common(3)
- [('a', 5), ('r', 2), ('b', 2)]
-
- """
- if n is None:
- return sorted(iteritems(self), key=itemgetter(1), reverse=True)
- return nlargest(n, iteritems(self), key=itemgetter(1))
-
- def elements(self):
- """Iterator over elements repeating each as many times as its count.
-
- >>> c = Counter('ABCABC')
- >>> sorted(c.elements())
- ['A', 'A', 'B', 'B', 'C', 'C']
-
- If an element's count has been set to zero or is a negative number,
- elements() will ignore it.
-
- """
- for elem, count in iteritems(self):
- for _ in range(count):
- yield elem
-
- # Override dict methods where the meaning changes for Counter objects.
-
- @classmethod
- def fromkeys(cls, iterable, v=None):
- raise NotImplementedError(
- 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
-
- def update(self, iterable=None, **kwds):
- """Like dict.update() but add counts instead of replacing them.
-
- Source can be an iterable, a dictionary, or another Counter instance.
-
- >>> c = Counter('which')
- >>> c.update('witch') # add elements from another iterable
- >>> d = Counter('watch')
- >>> c.update(d) # add elements from another counter
- >>> c['h'] # four 'h' in which, witch, and watch
- 4
-
- """
- if iterable is not None:
- if hasattr(iterable, 'iteritems'):
- if self:
- self_get = self.get
- for elem, count in iteritems(iterable):
- self[elem] = self_get(elem, 0) + count
- else:
- dict.update(
- self, iterable) # fast path when counter is empty
- else:
- self_get = self.get
- for elem in iterable:
- self[elem] = self_get(elem, 0) + 1
- if kwds:
- self.update(kwds)
-
- def copy(self):
- """Like dict.copy() but returns a Counter instance instead of a dict.
- """
- return Counter(self)
-
- def __delitem__(self, elem):
- """Like dict.__delitem__() but does not raise KeyError for missing
- values.
- """
- if elem in self:
- dict.__delitem__(self, elem)
-
- def __repr__(self):
- if not self:
- return '%s()' % self.__class__.__name__
- items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
- return '%s({%s})' % (self.__class__.__name__, items)
-
- # Multiset-style mathematical operations discussed in:
- # Knuth TAOCP Volume II section 4.6.3 exercise 19
- # and at http://en.wikipedia.org/wiki/Multiset
- #
- # Outputs guaranteed to only include positive counts.
- #
- # To strip negative and zero counts, add-in an empty counter:
- # c += Counter()
-
- def __add__(self, other):
- """Add counts from two counters.
-
- >>> Counter('abbb') + Counter('bcc')
- Counter({'b': 4, 'c': 2, 'a': 1})
-
- """
- if not isinstance(other, Counter):
- return NotImplemented
- result = Counter()
- for elem in set(self) | set(other):
- newcount = self[elem] + other[elem]
- if newcount > 0:
- result[elem] = newcount
- return result
-
- def __sub__(self, other):
- """Subtract count, but keep only results with positive counts.
-
- >>> Counter('abbbc') - Counter('bccd')
- Counter({'b': 2, 'a': 1})
-
- """
- if not isinstance(other, Counter):
- return NotImplemented
- result = Counter()
- for elem in set(self) | set(other):
- newcount = self[elem] - other[elem]
- if newcount > 0:
- result[elem] = newcount
- return result
-
- def __or__(self, other):
- """Union is the maximum of value in either of the input counters.
-
- >>> Counter('abbb') | Counter('bcc')
- Counter({'b': 3, 'c': 2, 'a': 1})
-
- """
- if not isinstance(other, Counter):
- return NotImplemented
- _max = max
- result = Counter()
- for elem in set(self) | set(other):
- newcount = _max(self[elem], other[elem])
- if newcount > 0:
- result[elem] = newcount
- return result
-
- def __and__(self, other):
- """Intersection is the minimum of corresponding counts.
-
- >>> Counter('abbb') & Counter('bcc')
- Counter({'b': 1})
-
- """
- if not isinstance(other, Counter):
- return NotImplemented
- _min = min
- result = Counter()
- if len(self) < len(other):
- self, other = other, self
- for elem in filter(self.__contains__, other):
- newcount = _min(self[elem], other[elem])
- if newcount > 0:
- result[elem] = newcount
- return result
-
-if sys.version_info[:2] < (2, 7):
- OrderedDict = _OrderedDict
- Counter = _Counter
-else:
- from collections import OrderedDict, Counter
+from collections import OrderedDict, Counter
if PY3:
def raise_with_traceback(exc, traceback=Ellipsis):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 87450ddde636e..e36fcdf34a707 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2502,8 +2502,6 @@ def test_eof_states(self):
self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
- # Python 2.6 won't throw an exception for this case (see http://bugs.python.org/issue16013)
- tm._skip_if_python26()
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\')
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index b18bd7b2b3978..252250c5a55b8 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -567,7 +567,7 @@ def test_group_var_constant(self):
self.algo(out, counts, values, labels)
self.assertEqual(counts[0], 3)
- self.assertTrue(out[0, 0] >= 0) # Python 2.6 has no assertGreaterEqual
+ self.assertTrue(out[0, 0] >= 0)
tm.assert_almost_equal(out[0, 0], 0.0)
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 3a8fdd877f5a0..0e286e93160b8 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -134,7 +134,6 @@ def test_case_insensitive(self):
self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
- # testing warning with catch_warning was only added in 2.6
self.assertTrue(self.cf._is_deprecated('kAnBaN'))
def test_get_option(self):
@@ -249,10 +248,6 @@ def test_deprecate_option(self):
self.cf.deprecate_option(
'foo') # we can deprecate non-existent options
- # testing warning with catch_warning was only added in 2.6
- if sys.version_info[:2] < (2, 6):
- raise nose.SkipTest("Need py > 2.6")
-
self.assertTrue(self.cf._is_deprecated('foo'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 960e931383310..5bab0dcdc91fd 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5053,8 +5053,6 @@ def test_from_records_empty_with_nonempty_fields_gh3682(self):
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
- if sys.version < LooseVersion('2.7'):
- raise nose.SkipTest('rec arrays dont work properly with py2.6')
# this may fail on certain platforms because of a numpy issue
# related GH6140
@@ -13487,13 +13485,8 @@ def test_round(self):
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
- if sys.version < LooseVersion('2.7'):
- # np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
- with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- df.round(non_int_round_dict)
- else:
- with self.assertRaises(TypeError):
- df.round(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 0f46c1106ed08..23e8aad01bf52 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -1026,10 +1026,8 @@ def test_zero_step_raises(self):
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
- # assertRaisesRegexp is not available in py2.6
- # self.assertRaisesRegexp(ValueError, "unbounded slice",
- # lambda: BlockPlacement(slc))
- self.assertRaises(ValueError, BlockPlacement, slc)
+ self.assertRaisesRegexp(ValueError, "unbounded slice",
+ lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 254704f21387c..3e1ba006df5b7 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1493,8 +1493,6 @@ def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
- # can't call self.map() which tries to treat func as ufunc
- # and causes recursion warnings on python 2.6
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values,
lambda x: np.nan if x is tslib.NaT else x.time()))
diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index 0bdcbedee0900..a4cb84d530336 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -106,7 +106,6 @@ def show_versions(as_json=False):
deps_blob.append((modname, None))
if (as_json):
- # 2.6-safe
try:
import json
except:
@@ -134,7 +133,6 @@ def show_versions(as_json=False):
def main():
- # optparse is 2.6-safe
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index d912e745788c8..1c21863415c62 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -249,12 +249,6 @@ def _skip_if_no_cday():
raise nose.SkipTest("CustomBusinessDay not available.")
-def _skip_if_python26():
- if sys.version_info[:2] == (2, 6):
- import nose
- raise nose.SkipTest("skipping on python2.6")
-
-
def _skip_if_no_pathlib():
try:
from pathlib import Path
diff --git a/scripts/test_py25.bat b/scripts/test_py25.bat
deleted file mode 100644
index fbf00b0451e3d..0000000000000
--- a/scripts/test_py25.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-SET PATH=C:\MinGW\bin;C:\Python25;C:\Python25\Scripts;%PATH%
-del pandas\_tseries.pyd
-del pandas\_sparse.pyd
-del pandas\src\tseries.c
-del pandas\src\sparse.c
-python setup.py clean
-python setup.py build_ext -c mingw32 --inplace
-nosetests pandas
\ No newline at end of file
diff --git a/scripts/test_py26.bat b/scripts/test_py26.bat
deleted file mode 100644
index e2502e87ad459..0000000000000
--- a/scripts/test_py26.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-SET PATH=C:\MinGW\bin;E:\Python26;E:\Python26\Scripts;%PATH%
-del pandas\_tseries.pyd
-del pandas\_sparse.pyd
-del pandas\src\tseries.c
-del pandas\src\sparse.c
-python setup.py clean
-python setup.py build_ext -c mingw32 --inplace
-nosetests pandas
\ No newline at end of file
diff --git a/scripts/test_py31.bat b/scripts/test_py31.bat
deleted file mode 100644
index e146ef2826cff..0000000000000
--- a/scripts/test_py31.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-set BASE=E:\python31
-set PYTHON=%BASE%\python.exe
-set NOSETESTS=%BASE%\scripts\nosetests-script.py
-
-%PYTHON% setup.py install
-cd bench
-%PYTHON% %NOSETESTS% pandas
-cd ..
\ No newline at end of file
diff --git a/scripts/test_py32.bat b/scripts/test_py32.bat
deleted file mode 100644
index 31685ae40aee5..0000000000000
--- a/scripts/test_py32.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-set BASE=E:\python32
-set PYTHON=%BASE%\python.exe
-set NOSETESTS=%BASE%\scripts\nosetests-script.py
-
-%PYTHON% setup.py install
-cd bench
-%PYTHON% %NOSETESTS% pandas
-cd ..
\ No newline at end of file
diff --git a/scripts/winbuild_py25.bat b/scripts/winbuild_py25.bat
deleted file mode 100644
index 5ecebab71b851..0000000000000
--- a/scripts/winbuild_py25.bat
+++ /dev/null
@@ -1,2 +0,0 @@
-SET PATH=C:\MinGW\bin;C:\Python25;C:\Python25\Scripts;%PATH%
-python setup.py build -c mingw32 bdist_wininst
diff --git a/scripts/windows_builder/build_26-32.bat b/scripts/windows_builder/build_26-32.bat
deleted file mode 100644
index 00cf016ff3bad..0000000000000
--- a/scripts/windows_builder/build_26-32.bat
+++ /dev/null
@@ -1,21 +0,0 @@
-@echo off
-echo "starting 26-32"
-
-
-title 26-32 build
-echo "building"
-cd "c:\users\Jeff Reback\documents\github\pandas"
-C:\python26-32\python.exe setup.py build > build.26-32.log 2>&1
-
-echo "installing"
-C:\python26-32\python.exe setup.py bdist --formats=wininst > install.26-32.log 2>&1
-
-echo "testing"
-C:\python26-32\scripts\nosetests -A "not slow" build\lib.win32-2.6\pandas > test.26-32.log 2>&1
-
-echo "versions"
-cd build\lib.win32-2.6
-C:\python26-32\python.exe ../../ci/print_versions.py > ../../versions.26-32.log 2>&1
-
-
-exit
diff --git a/scripts/windows_builder/build_26-64.bat b/scripts/windows_builder/build_26-64.bat
deleted file mode 100644
index 55abf37c6c37a..0000000000000
--- a/scripts/windows_builder/build_26-64.bat
+++ /dev/null
@@ -1,25 +0,0 @@
-@echo off
-echo "starting 26-64"
-
-setlocal EnableDelayedExpansion
-set MSSdk=1
-CALL "C:\Program Files\Microsoft SDKs\Windows\v7.0\Bin\SetEnv.cmd" /x64 /release
-set DISTUTILS_USE_SDK=1
-
-title 26-64 build
-echo "building"
-cd "c:\users\Jeff Reback\documents\github\pandas"
-C:\python26-64\python.exe setup.py build > build.26-64.log 2>&1
-
-echo "installing"
-C:\python26-64\python.exe setup.py bdist --formats=wininst > install.26-64.log 2>&1
-
-echo "testing"
-C:\python26-64\scripts\nosetests -A "not slow" build\lib.win-amd64-2.6\pandas > test.26-64.log 2>&1
-
-echo "versions"
-cd build\lib.win-amd64-2.6
-C:\python26-64\python.exe ../../ci/print_versions.py > ../../versions.26-64.log 2>&1
-
-
-exit
diff --git a/scripts/windows_builder/build_33-32.bat b/scripts/windows_builder/build_33-32.bat
deleted file mode 100644
index cf629bc71f34e..0000000000000
--- a/scripts/windows_builder/build_33-32.bat
+++ /dev/null
@@ -1,27 +0,0 @@
-@echo off
-echo "starting 33-32"
-
-setlocal EnableDelayedExpansion
-set MSSdk=1
-CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x86 /release
-set DISTUTILS_USE_SDK=1
-
-title 33-32 build
-echo "building"
-cd "c:\users\Jeff Reback\documents\github\pandas"
-C:\python33-32\python.exe setup.py build > build.33-32.log 2>&1
-
-echo "installing"
-C:\python33-32\python.exe setup.py bdist --formats=wininst > install.33-32.log 2>&1
-
-echo "testing"
-C:\python33-32\scripts\nosetests -A "not slow" build\lib.win32-3.3\pandas > test.33-32.log 2>&1
-
-echo "versions"
-cd build\lib.win32-3.3
-C:\python33-32\python.exe ../../ci/print_versions.py > ../../versions.33-32.log 2>&1
-
-exit
-
-
-
diff --git a/scripts/windows_builder/build_33-64.bat b/scripts/windows_builder/build_33-64.bat
deleted file mode 100644
index 8f037941868f9..0000000000000
--- a/scripts/windows_builder/build_33-64.bat
+++ /dev/null
@@ -1,27 +0,0 @@
-@echo off
-echo "starting 33-64"
-
-setlocal EnableDelayedExpansion
-set MSSdk=1
-CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 /release
-set DISTUTILS_USE_SDK=1
-
-title 33-64 build
-echo "building"
-cd "c:\users\Jeff Reback\documents\github\pandas"
-C:\python33-64\python.exe setup.py build > build.33-64.log 2>&1
-
-echo "installing"
-C:\python33-64\python.exe setup.py bdist --formats=wininst > install.33-64.log 2>&1
-
-echo "testing"
-C:\python33-64\scripts\nosetests -A "not slow" build\lib.win-amd64-3.3\pandas > test.33-64.log 2>&1
-
-echo "versions"
-cd build\lib.win-amd64-3.3
-C:\python33-64\python.exe ../../ci/print_versions.py > ../../versions.33-64.log 2>&1
-
-exit
-
-
-
diff --git a/scripts/windows_builder/check_and_build.py b/scripts/windows_builder/check_and_build.py
index 81669972b991d..2eb32fb4265d9 100644
--- a/scripts/windows_builder/check_and_build.py
+++ b/scripts/windows_builder/check_and_build.py
@@ -45,7 +45,7 @@
args = parser.parse_args()
dry_run = args.dry
-builds = ['26-32','26-64','27-32','27-64','33-32','33-64','34-32','34-64']
+builds = ['27-32','27-64','34-32','34-64']
base_dir = "C:\Users\Jeff Reback\Documents\GitHub\pandas"
remote_host='pandas.pydata.org'
username='pandas'
@@ -140,7 +140,7 @@ def do_update(is_verbose=True):
if is_verbose:
logger.info("commits changed : {0} -> {1}".format(start_commit,master.commit))
return result
-
+
def run_install():
# send the installation binaries
diff --git a/scripts/windows_builder/readme.txt b/scripts/windows_builder/readme.txt
index 85c011e515b74..789e2a9ee0c63 100644
--- a/scripts/windows_builder/readme.txt
+++ b/scripts/windows_builder/readme.txt
@@ -8,9 +8,9 @@ Full python installs for each version with the deps
Currently supporting
-26-32,26-64,27-32,27-64,33-32,33-64,34-32,34-64
+27-32,27-64,34-32,34-64
-Note that 33 and 34 use the 4.0 SDK, while the other suse 3.5 SDK
+Note that 34 use the 4.0 SDK, while the other suse 3.5 SDK
I installed these scripts in C:\Builds
diff --git a/setup.py b/setup.py
index e52c19167c65f..0f4492d9821ee 100755
--- a/setup.py
+++ b/setup.py
@@ -178,7 +178,6 @@ def build_extensions(self):
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
diff --git a/tox.ini b/tox.ini
index 9fbb15087c4d5..5d6c8975307b6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py26, py27, py32, py33, py34
+envlist = py27, py34, py35
[testenv]
deps =
@@ -41,13 +41,6 @@ commands =
# tox should provide a preinstall-commands hook.
pip uninstall pandas -qy
-[testenv:py26]
-deps =
- numpy==1.6.1
- boto
- bigquery
- {[testenv]deps}
-
[testenv:py27]
deps =
numpy==1.8.1
@@ -55,19 +48,14 @@ deps =
bigquery
{[testenv]deps}
-[testenv:py32]
-deps =
- numpy==1.7.1
- {[testenv]deps}
-
-[testenv:py33]
+[testenv:py34]
deps =
numpy==1.8.0
{[testenv]deps}
-[testenv:py34]
+[testenv:py35]
deps =
- numpy==1.8.0
+ numpy==1.10.0
{[testenv]deps}
[testenv:openpyxl1]
| closes #8760
closes #7718
| https://api.github.com/repos/pandas-dev/pandas/pulls/11988 | 2016-01-07T16:49:44Z | 2016-01-08T14:07:14Z | 2016-01-08T14:07:14Z | 2016-01-08T14:07:20Z |
DOC: whatsnew Timestamp addition example fix up | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 498cfaf838320..4d35d7f833ddf 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -212,8 +212,8 @@ as opposed to
.. code-block:: python
- In [3]: pd.Timestamp('1990315') + pd.Timestamp('19900315')
- ValueError:
+ In [3]: pd.Timestamp('19900315') + pd.Timestamp('19900315')
+ TypeError: unsupported operand type(s) for +: 'Timestamp' and 'Timestamp'
However, when wrapped in a ``Series`` whose ``dtype`` is ``datetime64[ns]`` or ``timedelta64[ns]``,
the ``dtype`` information is respected.
| The problem was I only had 7 digits in the date string.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11976 | 2016-01-07T01:27:59Z | 2016-01-07T01:32:01Z | 2016-01-07T01:32:01Z | 2016-01-07T01:32:01Z |
TST: fix comparison message for #10174 | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8e533f3e22e5c..adb1b538fc2aa 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -13706,8 +13706,7 @@ def test_quantile_interpolation_np_lt_1p9(self):
assert_series_equal(q, q1)
# interpolation method other than default linear
- expErrMsg = ("Interpolation methods other than linear"
- " not supported in numpy < 1.9")
+ expErrMsg = "Interpolation methods other than linear"
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile(.5, axis=1, interpolation='nearest')
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index caf8883499890..62a3f5249ddce 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3115,8 +3115,7 @@ def test_quantile_interpolation_np_lt_1p9(self):
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# interpolation other than linear
- expErrMsg = "Interpolation methods other than " \
- "linear not supported in numpy < 1.9"
+ expErrMsg = "Interpolation methods other than "
with tm.assertRaisesRegexp(ValueError, expErrMsg):
self.ts.quantile(0.9, interpolation='nearest')
| https://api.github.com/repos/pandas-dev/pandas/pulls/11975 | 2016-01-07T01:01:04Z | 2016-01-07T01:16:19Z | 2016-01-07T01:16:19Z | 2016-01-07T01:16:19Z | |
BUG: GH11835 where comparison of Timedelta array caused infinite loop | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index af3c3dc013e26..2bdcd9e5acff5 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -476,6 +476,7 @@ Bug Fixes
- Bug in ``.style.bar`` may not rendered properly using specific browser (:issue:`11678`)
+- Bug in rich comparison of ``Timedelta`` with a ``numpy.array`` of ``Timedelta``s that caused an infinite recursion (:issue:`11835`)
- Bug in ``df.replace`` while replacing value in mixed dtype ``Dataframe`` (:issue:`11698`)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index cb050f2589673..21f0c44789f9b 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -363,6 +363,14 @@ def test_compare_timedelta_series(self):
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
+ def test_compare_timedelta_ndarray(self):
+ # GH11835
+ periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
+ arr = np.array(periods)
+ result = arr[0] > arr
+ expected = np.array([False, False])
+ self.assert_numpy_array_equal(result, expected)
+
def test_ops_notimplemented(self):
class Other:
pass
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 43f3c3add160a..45dc42f5ce302 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -2184,6 +2184,8 @@ cdef class _Timedelta(timedelta):
raise TypeError('Cannot compare type %r with type %r' %
(type(self).__name__,
type(other).__name__))
+ if isinstance(other, np.ndarray):
+ return PyObject_RichCompare(np.array([self]), other, op)
return PyObject_RichCompare(other, self, _reverse_ops[op])
else:
if op == Py_EQ:
| closes #11835
| https://api.github.com/repos/pandas-dev/pandas/pulls/11957 | 2016-01-04T23:09:14Z | 2016-01-05T00:39:03Z | 2016-01-05T00:39:03Z | 2016-01-05T00:39:06Z |
PEP: pandas/core round 2 | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 462ead70c9f93..abc9e58d7c435 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -13,33 +13,38 @@
from pandas.core.missing import interpolate_2d
from pandas.util.decorators import cache_readonly, deprecate_kwarg
-from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
- isnull, notnull, is_dtype_equal,
- is_categorical_dtype, is_integer_dtype, is_object_dtype,
- _possibly_infer_to_datetimelike, get_dtype_kinds,
- is_list_like, is_sequence, is_null_slice, is_bool,
- _ensure_platform_int, _ensure_object, _ensure_int64,
- _coerce_indexer_dtype, take_1d)
+from pandas.core.common import (
+ ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex, isnull,
+ notnull, is_dtype_equal, is_categorical_dtype, is_integer_dtype,
+ is_object_dtype, _possibly_infer_to_datetimelike, get_dtype_kinds,
+ is_list_like, is_sequence, is_null_slice, is_bool, _ensure_platform_int,
+ _ensure_object, _ensure_int64, _coerce_indexer_dtype, take_1d)
from pandas.core.dtypes import CategoricalDtype
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
+
def _cat_compare_op(op):
def f(self, other):
- # On python2, you can usually compare any type to any type, and Categoricals can be
- # seen as a custom type, but having different results depending whether categories are
- # the same or not is kind of insane, so be a bit stricter here and use the python3 idea
- # of comparing only things of equal type.
+ # On python2, you can usually compare any type to any type, and
+ # Categoricals can be seen as a custom type, but having different
+ # results depending whether categories are the same or not is kind of
+ # insane, so be a bit stricter here and use the python3 idea of
+ # comparing only things of equal type.
if not self.ordered:
- if op in ['__lt__', '__gt__','__le__','__ge__']:
- raise TypeError("Unordered Categoricals can only compare equality or not")
+ if op in ['__lt__', '__gt__', '__le__', '__ge__']:
+ raise TypeError("Unordered Categoricals can only compare "
+ "equality or not")
if isinstance(other, Categorical):
- # Two Categoricals can only be be compared if the categories are the same
- if (len(self.categories) != len(other.categories)) or \
- not ((self.categories == other.categories).all()):
- raise TypeError("Categoricals can only be compared if 'categories' are the same")
+ # Two Categoricals can only be be compared if the categories are
+ # the same
+ if ((len(self.categories) != len(other.categories)) or
+ not ((self.categories == other.categories).all())):
+ raise TypeError("Categoricals can only be compared if "
+ "'categories' are the same")
if not (self.ordered == other.ordered):
- raise TypeError("Categoricals can only be compared if 'ordered' is the same")
+ raise TypeError("Categoricals can only be compared if "
+ "'ordered' is the same")
na_mask = (self._codes == -1) | (other._codes == -1)
f = getattr(self._codes, op)
ret = f(other._codes)
@@ -66,37 +71,40 @@ def f(self, other):
elif op == '__ne__':
return np.repeat(True, len(self))
else:
- msg = "Cannot compare a Categorical for op {op} with a scalar, " \
- "which is not a category."
+ msg = ("Cannot compare a Categorical for op {op} with a "
+ "scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
- if op in ['__eq__','__ne__']:
- return getattr(np.array(self),op)(np.array(other))
+ if op in ['__eq__', '__ne__']:
+ return getattr(np.array(self), op)(np.array(other))
- msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
- "compare values, use 'np.asarray(cat) <op> other'."
- raise TypeError(msg.format(op=op,typ=type(other)))
+ msg = ("Cannot compare a Categorical for op {op} with type {typ}."
+ "\nIf you want to compare values, use 'np.asarray(cat) "
+ "<op> other'.")
+ raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
+
def maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
return array
+
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
-There is not setter, use the other categorical methods and the normal item setter to change
-values in the categorical.
+There is not setter, use the other categorical methods and the normal item
+setter to change values in the categorical.
"""
_categories_doc = """The categories of this categorical.
@@ -104,16 +112,17 @@ def maybe_to_categorical(array):
Setting assigns new values to each category (effectively a rename of
each individual category).
-The assigned value has to be a list-like object. All items must be unique and the number of items
-in the new categories must be the same as the number of items in the old categories.
+The assigned value has to be a list-like object. All items must be unique and
+the number of items in the new categories must be the same as the number of
+items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
- If the new categories do not validate as categories or if the number of new categories is
- unequal the number of old categories
+ If the new categories do not validate as categories or if the number of new
+ categories is unequal the number of old categories
See also
--------
@@ -124,8 +133,9 @@ def maybe_to_categorical(array):
remove_unused_categories
set_categories
"""
-class Categorical(PandasObject):
+
+class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
@@ -135,27 +145,29 @@ class Categorical(PandasObject):
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
- Assigning values outside of `categories` will raise a `ValueError`. Order is
- defined by the order of the `categories`, not lexical order of the values.
+ Assigning values outside of `categories` will raise a `ValueError`. Order
+ is defined by the order of the `categories`, not lexical order of the
+ values.
Parameters
----------
values : list-like
- The values of the categorical. If categories are given, values not in categories will
- be replaced with NaN.
+ The values of the categorical. If categories are given, values not in
+ categories will be replaced with NaN.
categories : Index-like (unique), optional
- The unique categories for this categorical. If not given, the categories are assumed
- to be the unique values of values.
+ The unique categories for this categorical. If not given, the
+ categories are assumed to be the unique values of values.
ordered : boolean, (default False)
- Whether or not this categorical is treated as a ordered categorical. If not given,
- the resulting categorical will not be ordered.
+ Whether or not this categorical is treated as a ordered categorical.
+ If not given, the resulting categorical will not be ordered.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
- The codes (integer positions, which point to the categories) of this categorical, read only.
+ The codes (integer positions, which point to the categories) of this
+ categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
@@ -164,8 +176,8 @@ class Categorical(PandasObject):
ValueError
If the categories do not validate.
TypeError
- If an explicit ``ordered=True`` is given but no `categories` and the `values` are
- not sortable.
+ If an explicit ``ordered=True`` is given but no `categories` and the
+ `values` are not sortable.
Examples
@@ -179,13 +191,13 @@ class Categorical(PandasObject):
[a, b, c, a, b, c]
Categories (3, object): [a < b < c]
- >>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'], ordered=True)
+ >>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'],
+ ordered=True)
>>> a.min()
'c'
"""
dtype = CategoricalDtype()
"""The dtype (always "category")"""
-
"""Whether or not this Categorical is ordered.
Only ordered `Categoricals` can be sorted (according to the order
@@ -199,34 +211,38 @@ class Categorical(PandasObject):
Categorical.max
"""
- # For comparisons, so that numpy uses our implementation if the compare ops, which raise
+ # For comparisons, so that numpy uses our implementation if the compare
+ # ops, which raise
__array_priority__ = 1000
_typ = 'categorical'
- def __init__(self, values, categories=None, ordered=False, name=None, fastpath=False,
- levels=None):
+ def __init__(self, values, categories=None, ordered=False, name=None,
+ fastpath=False, levels=None):
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
- self._categories = self._validate_categories(categories, fastpath=isinstance(categories, ABCIndexClass))
+ self._categories = self._validate_categories(
+ categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
- if not name is None:
- msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
- "categorical instead (e.g. 'Series(cat, name=\"something\")'"
+ if name is not None:
+ msg = ("the 'name' keyword is removed, use 'name' with consumers "
+ "of the categorical instead (e.g. 'Series(cat, "
+ "name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
- if not levels is None:
- warn("Creating a 'Categorical' with 'levels' is deprecated, use 'categories' instead",
- FutureWarning, stacklevel=2)
+ if levels is not None:
+ warn("Creating a 'Categorical' with 'levels' is deprecated, use "
+ "'categories' instead", FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
- raise ValueError("Cannot pass in both 'categories' and (deprecated) 'levels', "
- "use only 'categories'", stacklevel=2)
+ raise ValueError("Cannot pass in both 'categories' and "
+ "(deprecated) 'levels', use only "
+ "'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
@@ -246,56 +262,66 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
else:
- # on numpy < 1.6 datetimelike get inferred to all i8 by _sanitize_array
- # which is fine, but since factorize does this correctly no need here
- # this is an issue because _sanitize_array also coerces np.nan to a string
- # under certain versions of numpy as well
- values = _possibly_infer_to_datetimelike(values, convert_dates=True)
+ # on numpy < 1.6 datetimelike get inferred to all i8 by
+ # _sanitize_array which is fine, but since factorize does this
+ # correctly no need here this is an issue because _sanitize_array
+ # also coerces np.nan to a string under certain versions of numpy
+ # as well
+ values = _possibly_infer_to_datetimelike(values,
+ convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
- # On list with NaNs, int values will be converted to float. Use "object" dtype
- # to prevent this. In the end objects will be casted to int/... in the category
- # assignment step.
+ # On list with NaNs, int values will be converted to float. Use
+ # "object" dtype to prevent this. In the end objects will be
+ # casted to int/... in the category assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
-
if categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if ordered:
- # raise, as we don't have a sortable data structure and so the user should
- # give us one by specifying categories
- raise TypeError("'values' is not ordered, please explicitly specify the "
- "categories order by passing in a categories argument.")
+ # raise, as we don't have a sortable data structure and so
+ # the user should give us one by specifying categories
+ raise TypeError("'values' is not ordered, please "
+ "explicitly specify the categories order "
+ "by passing in a categories argument.")
except ValueError:
- ### FIXME ####
- raise NotImplementedError("> 1 ndim Categorical are not supported at this time")
+ # FIXME
+ raise NotImplementedError("> 1 ndim Categorical are not "
+ "supported at this time")
categories = self._validate_categories(categories)
else:
# there were two ways if categories are present
- # - the old one, where each value is a int pointer to the levels array -> not anymore
- # possible, but code outside of pandas could call us like that, so make some checks
- # - the new one, where each value is also in the categories array (or np.nan)
-
- # make sure that we always have the same type here, no matter what we get passed in
+ # - the old one, where each value is a int pointer to the levels
+ # array -> not anymore possible, but code outside of pandas could
+ # call us like that, so make some checks
+ # - the new one, where each value is also in the categories array
+ # (or np.nan)
+
+ # make sure that we always have the same type here, no matter what
+ # we get passed in
categories = self._validate_categories(categories)
codes = _get_codes_for_values(values, categories)
- # TODO: check for old style usage. These warnings should be removes after 0.18/ in 2016
+ # TODO: check for old style usage. These warnings should be removes
+ # after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
- warn("Values and categories have different dtypes. Did you mean to use\n"
- "'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
+ warn("Values and categories have different dtypes. Did you "
+ "mean to use\n'Categorical.from_codes(codes, "
+ "categories)'?", RuntimeWarning, stacklevel=2)
- if len(values) and is_integer_dtype(values) and (codes == -1).all():
- warn("None of the categories were found in values. Did you mean to use\n"
- "'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
+ if (len(values) and is_integer_dtype(values) and
+ (codes == -1).all()):
+ warn("None of the categories were found in values. Did you "
+ "mean to use\n'Categorical.from_codes(codes, "
+ "categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
@@ -303,8 +329,9 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
def copy(self):
""" Copy constructor. """
- return Categorical(values=self._codes.copy(),categories=self.categories,
- ordered=self.ordered, fastpath=True)
+ return Categorical(values=self._codes.copy(),
+ categories=self.categories, ordered=self.ordered,
+ fastpath=True)
def astype(self, dtype):
""" coerce this type to another dtype """
@@ -356,37 +383,45 @@ def from_codes(cls, codes, categories, ordered=False, name=None):
"""
Make a Categorical type from codes and categories arrays.
- This constructor is useful if you already have codes and categories and so do not need the
- (computation intensive) factorization step, which is usually done on the constructor.
+ This constructor is useful if you already have codes and categories and
+ so do not need the (computation intensive) factorization step, which is
+ usually done on the constructor.
- If your data does not follow this convention, please use the normal constructor.
+ If your data does not follow this convention, please use the normal
+ constructor.
Parameters
----------
codes : array-like, integers
- An integer array, where each integer points to a category in categories or -1 for NaN
+ An integer array, where each integer points to a category in
+ categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
- Whether or not this categorical is treated as a ordered categorical. If not given,
- the resulting categorical will be unordered.
- """
- if not name is None:
- msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
- "categorical instead (e.g. 'Series(cat, name=\"something\")'"
+ Whether or not this categorical is treated as a ordered
+ categorical. If not given, the resulting categorical will be
+ unordered.
+ """
+ if name is not None:
+ msg = ("the 'name' keyword is removed, use 'name' with consumers "
+ "of the categorical instead (e.g. 'Series(cat, "
+ "name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
try:
codes = np.asarray(codes, np.int64)
except:
- raise ValueError("codes need to be convertible to an arrays of integers")
+ raise ValueError(
+ "codes need to be convertible to an arrays of integers")
categories = cls._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
- raise ValueError("codes need to be between -1 and len(categories)-1")
+ raise ValueError("codes need to be between -1 and "
+ "len(categories)-1")
- return Categorical(codes, categories=categories, ordered=ordered, fastpath=True)
+ return Categorical(codes, categories=categories, ordered=ordered,
+ fastpath=True)
_codes = None
@@ -416,7 +451,8 @@ def _get_labels(self):
Deprecated, use .codes!
"""
- warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=2)
+ warn("'labels' is deprecated. Use 'codes' instead", FutureWarning,
+ stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
@@ -438,10 +474,11 @@ def _validate_categories(cls, categories, fastpath=False):
dtype = None
if not hasattr(categories, "dtype"):
categories = _convert_to_list_like(categories)
- # on categories with NaNs, int values would be converted to float.
- # Use "object" dtype to prevent this.
+ # On categories with NaNs, int values would be converted to
+ # float. Use "object" dtype to prevent this.
if isnull(categories).any():
- without_na = np.array([x for x in categories if notnull(x)])
+ without_na = np.array([x for x in categories
+ if notnull(x)])
with_na = np.array(categories)
if with_na.dtype != without_na.dtype:
dtype = "object"
@@ -455,7 +492,8 @@ def _validate_categories(cls, categories, fastpath=False):
# we don't allow NaNs in the categories themselves
if categories.hasnans:
- # NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748
+ # NaNs in cats deprecated in 0.17,
+ # remove in 0.18 or 0.19 GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
warn(msg, FutureWarning, stacklevel=3)
@@ -478,9 +516,10 @@ def _set_categories(self, categories, fastpath=False):
"""
categories = self._validate_categories(categories, fastpath=fastpath)
- if not fastpath and not self._categories is None and len(categories) != len(self._categories):
- raise ValueError("new categories need to have the same number of items than the old "
- "categories!")
+ if (not fastpath and self._categories is not None and
+ len(categories) != len(self._categories)):
+ raise ValueError("new categories need to have the same number of "
+ "items than the old categories!")
self._categories = categories
@@ -489,16 +528,19 @@ def _get_categories(self):
# categories is an Index, which is immutable -> no need to copy
return self._categories
- categories = property(fget=_get_categories, fset=_set_categories, doc=_categories_doc)
+ categories = property(fget=_get_categories, fset=_set_categories,
+ doc=_categories_doc)
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
- warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
+ warn("Assigning to 'levels' is deprecated, use 'categories'",
+ FutureWarning, stacklevel=2)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
- warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
+ warn("Accessing 'levels' is deprecated, use 'categories'",
+ FutureWarning, stacklevel=2)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
@@ -508,8 +550,8 @@ def _get_levels(self):
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
- warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning,
- stacklevel=2)
+ warn("Setting 'ordered' directly is deprecated, use 'set_ordered'",
+ FutureWarning, stacklevel=2)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
@@ -518,10 +560,11 @@ def set_ordered(self, value, inplace=False):
Parameters
----------
- value : boolean to set whether this categorical is ordered (True) or not (False)
+ value : boolean to set whether this categorical is ordered (True) or
+ not (False)
inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy of this categorical
- with ordered set to the value
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to the value
"""
if not is_bool(value):
raise TypeError("ordered must be a boolean value")
@@ -537,8 +580,8 @@ def as_ordered(self, inplace=False):
Parameters
----------
inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy of this categorical
- with ordered set to True
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to True
"""
return self.set_ordered(True, inplace=inplace)
@@ -549,8 +592,8 @@ def as_unordered(self, inplace=False):
Parameters
----------
inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy of this categorical
- with ordered set to False
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to False
"""
return self.set_ordered(False, inplace=inplace)
@@ -560,22 +603,25 @@ def _get_ordered(self):
ordered = property(fget=_get_ordered, fset=_set_ordered)
- def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
+ def set_categories(self, new_categories, ordered=None, rename=False,
+ inplace=False):
""" Sets the categories to the specified new_categories.
- `new_categories` can include new categories (which will result in unused categories) or
- or remove old categories (which results in values set to NaN). If `rename==True`,
- the categories will simple be renamed (less or more items than in old categories will
- result in values set to NaN or in unused categories respectively).
+ `new_categories` can include new categories (which will result in
+ unused categories) or remove old categories (which results in values
+ set to NaN). If `rename==True`, the categories will simple be renamed
+ (less or more items than in old categories will result in values set to
+ NaN or in unused categories respectively).
- This method can be used to perform more than one action of adding, removing,
- and reordering simultaneously and is therefore faster than performing the individual steps
- via the more specialised methods.
+ This method can be used to perform more than one action of adding,
+ removing, and reordering simultaneously and is therefore faster than
+ performing the individual steps via the more specialised methods.
- On the other hand this methods does not do checks (e.g., whether the old categories are
- included in the new categories on a reorder), which can result in surprising changes, for
- example when using special string dtypes on python3, which does not considers a S1 string
- equal to a single char python string.
+ On the other hand this methods does not do checks (e.g., whether the
+ old categories are included in the new categories on a reorder), which
+ can result in surprising changes, for example when using special string
+ dtypes on python3, which does not considers a S1 string equal to a
+ single char python string.
Raises
------
@@ -587,14 +633,14 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
- Whether or not the categorical is treated as a ordered categorical. If not given,
- do not change the ordered information.
+ Whether or not the categorical is treated as a ordered categorical.
+ If not given, do not change the ordered information.
rename : boolean (default: False)
- Whether or not the new_categories should be considered as a rename of the old
- categories or as reordered categories.
+ Whether or not the new_categories should be considered as a rename
+ of the old categories or as reordered categories.
inplace : boolean (default: False)
- Whether or not to reorder the categories inplace or return a copy of this categorical
- with reordered categories.
+ Whether or not to reorder the categories inplace or return a copy of
+ this categorical with reordered categories.
Returns
-------
@@ -611,7 +657,8 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal
new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
if rename:
- if not cat._categories is None and len(new_categories) < len(cat._categories):
+ if (cat._categories is not None and
+ len(new_categories) < len(cat._categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
else:
@@ -629,22 +676,23 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
- The new categories has to be a list-like object. All items must be unique and the number of
- items in the new categories must be the same as the number of items in the old categories.
+ The new categories has to be a list-like object. All items must be
+ unique and the number of items in the new categories must be the same
+ as the number of items in the old categories.
Raises
------
ValueError
- If the new categories do not have the same number of items than the current categories
- or do not validate as categories
+ If the new categories do not have the same number of items than the
+ current categories or do not validate as categories
Parameters
----------
new_categories : Index-like
The renamed categories.
inplace : boolean (default: False)
- Whether or not to rename the categories inplace or return a copy of this categorical
- with renamed categories.
+ Whether or not to rename the categories inplace or return a copy of
+ this categorical with renamed categories.
Returns
-------
@@ -666,23 +714,25 @@ def rename_categories(self, new_categories, inplace=False):
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
- `new_categories` need to include all old categories and no new category items.
+ `new_categories` need to include all old categories and no new category
+ items.
Raises
------
ValueError
- If the new categories do not contain all old category items or any new ones
+ If the new categories do not contain all old category items or any
+ new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
- Whether or not the categorical is treated as a ordered categorical. If not given,
- do not change the ordered information.
+ Whether or not the categorical is treated as a ordered categorical.
+ If not given, do not change the ordered information.
inplace : boolean (default: False)
- Whether or not to reorder the categories inplace or return a copy of this categorical
- with reordered categories.
+ Whether or not to reorder the categories inplace or return a copy of
+ this categorical with reordered categories.
Returns
-------
@@ -697,27 +747,30 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
set_categories
"""
if set(self._categories) != set(new_categories):
- raise ValueError("items in new_categories are not the same as in old categories")
- return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
+ raise ValueError("items in new_categories are not the same as in "
+ "old categories")
+ return self.set_categories(new_categories, ordered=ordered,
+ inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
- `new_categories` will be included at the last/highest place in the categories and will be
- unused directly after this call.
+ `new_categories` will be included at the last/highest place in the
+ categories and will be unused directly after this call.
Raises
------
ValueError
- If the new categories include old categories or do not validate as categories
+ If the new categories include old categories or do not validate as
+ categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
- Whether or not to add the categories inplace or return a copy of this categorical
- with added categories.
+ Whether or not to add the categories inplace or return a copy of
+ this categorical with added categories.
Returns
-------
@@ -735,7 +788,8 @@ def add_categories(self, new_categories, inplace=False):
new_categories = [new_categories]
already_included = set(new_categories) & set(self._categories)
if len(already_included) != 0:
- msg = "new categories must not include old categories: %s" % str(already_included)
+ msg = ("new categories must not include old categories: %s" %
+ str(already_included))
raise ValueError(msg)
new_categories = list(self._categories) + list(new_categories)
cat = self if inplace else self.copy()
@@ -747,8 +801,8 @@ def add_categories(self, new_categories, inplace=False):
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
- `removals` must be included in the old categories. Values which were in the removed
- categories will be set to NaN
+ `removals` must be included in the old categories. Values which were in
+ the removed categories will be set to NaN
Raises
------
@@ -760,8 +814,8 @@ def remove_categories(self, removals, inplace=False):
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
- Whether or not to remove the categories inplace or return a copy of this categorical
- with removed categories.
+ Whether or not to remove the categories inplace or return a copy of
+ this categorical with removed categories.
Returns
-------
@@ -780,7 +834,7 @@ def remove_categories(self, removals, inplace=False):
removal_set = set(list(removals))
not_included = removal_set - set(self._categories)
- new_categories = [ c for c in self._categories if c not in removal_set ]
+ new_categories = [c for c in self._categories if c not in removal_set]
# GH 10156
if any(isnull(removals)):
@@ -788,11 +842,11 @@ def remove_categories(self, removals, inplace=False):
new_categories = [x for x in new_categories if notnull(x)]
if len(not_included) != 0:
- raise ValueError("removals must all be in old categories: %s" % str(not_included))
-
- return self.set_categories(new_categories, ordered=self.ordered, rename=False,
- inplace=inplace)
+ raise ValueError("removals must all be in old categories: %s" %
+ str(not_included))
+ return self.set_categories(new_categories, ordered=self.ordered,
+ rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
@@ -800,8 +854,8 @@ def remove_unused_categories(self, inplace=False):
Parameters
----------
inplace : boolean (default: False)
- Whether or not to drop unused categories inplace or return a copy of this categorical
- with unused categories dropped.
+ Whether or not to drop unused categories inplace or return a copy of
+ this categorical with unused categories dropped.
Returns
-------
@@ -827,7 +881,6 @@ def remove_unused_categories(self, inplace=False):
if not inplace:
return cat
-
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
@@ -874,8 +927,7 @@ def shift(self, periods):
else:
codes[periods:] = -1
- return Categorical.from_codes(codes,
- categories=self.categories,
+ return Categorical.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
@@ -885,11 +937,12 @@ def __array__(self, dtype=None):
Returns
-------
values : numpy array
- A numpy array of either the specified dtype or, if dtype==None (default), the same
- dtype as categorical.categories.dtype
+ A numpy array of either the specified dtype or,
+ if dtype==None (default), the same dtype as
+ categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
- if dtype and not is_dtype_equal(dtype,self.categories.dtype):
+ if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
return ret
@@ -902,8 +955,8 @@ def __setstate__(self, state):
if '_codes' not in state and 'labels' in state:
state['_codes'] = state.pop('labels')
if '_categories' not in state and '_levels' in state:
- state['_categories'] = \
- self._validate_categories(state.pop('_levels'))
+ state['_categories'] = self._validate_categories(state.pop(
+ '_levels'))
# 0.16.0 ordered change
if '_ordered' not in state:
@@ -960,7 +1013,8 @@ def searchsorted(self, v, side='left', sorter=None):
Parameters
----------
v : array_like
- Array-like values or a scalar value, to insert/search for in `self`.
+ Array-like values or a scalar value, to insert/search for in
+ `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
@@ -996,16 +1050,20 @@ def searchsorted(self, v, side='left', sorter=None):
array([1, 4])
>>> x.searchsorted(['bread', 'eggs'], side='right')
array([3, 4]) # eggs before milk
- >>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ])
- >>> x.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
- array([3, 5]) # eggs after donuts, after switching milk and donuts
+ >>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk',
+ 'donuts' ])
+ >>> x.searchsorted(['bread', 'eggs'], side='right',
+ sorter=[0, 1, 2, 3, 5, 4])
+ array([3, 5]) # eggs after donuts, after switching milk and donuts
"""
if not self.ordered:
- raise ValueError("Categorical not ordered\n"
- "you can use .as_ordered() to change the Categorical to an ordered one\n")
+ raise ValueError("Categorical not ordered\nyou can use "
+ ".as_ordered() to change the Categorical to an "
+ "ordered one")
from pandas.core.series import Series
- values_as_codes = self.categories.values.searchsorted(Series(v).values, side)
+ values_as_codes = self.categories.values.searchsorted(
+ Series(v).values, side)
return self.codes.searchsorted(values_as_codes, sorter=sorter)
def isnull(self):
@@ -1031,14 +1089,15 @@ def isnull(self):
if np.nan in self.categories:
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
- ret = np.logical_or(ret , self._codes == nan_pos)
+ ret = np.logical_or(ret, self._codes == nan_pos)
return ret
def notnull(self):
"""
Reverse of isnull
- Both missing values (-1 in .codes) and NA as a category are detected as null.
+ Both missing values (-1 in .codes) and NA as a category are detected as
+ null.
Returns
-------
@@ -1087,9 +1146,8 @@ def value_counts(self, dropna=True):
from pandas.core.series import Series
from pandas.core.index import CategoricalIndex
- obj = self.remove_categories([np.nan]) \
- if dropna and isnull(self.categories).any() else self
-
+ obj = (self.remove_categories([np.nan]) if dropna and
+ isnull(self.categories).any() else self)
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
@@ -1101,8 +1159,8 @@ def value_counts(self, dropna=True):
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
- ix = Categorical(ix, categories=cat,
- ordered=obj.ordered, fastpath=True)
+ ix = Categorical(ix, categories=cat, ordered=obj.ordered,
+ fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
@@ -1126,7 +1184,8 @@ def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
- "you can use .as_ordered() to change the Categorical to an ordered one\n".format(op=op))
+ "you can use .as_ordered() to change the "
+ "Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, **kwargs):
""" Implements ndarray.argsort.
@@ -1145,7 +1204,8 @@ def argsort(self, ascending=True, **kwargs):
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
- """ Sorts the Category by category value returning a new Categorical by default.
+ """ Sorts the Category by category value returning a new Categorical by
+ default.
Only ordered Categoricals can be sorted!
@@ -1169,7 +1229,7 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
--------
Category.sort
"""
- if na_position not in ['last','first']:
+ if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
codes = np.sort(self._codes)
@@ -1177,19 +1237,19 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
codes = codes[::-1]
# NaN handling
- na_mask = (codes==-1)
+ na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
- if na_position=="first" and not ascending:
+ if na_position == "first" and not ascending:
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
- elif na_position=="last" and not ascending:
+ elif na_position == "last" and not ascending:
# ... and to the end
new_codes = codes.copy()
- pos = len(codes)-n_nans
+ pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
@@ -1197,14 +1257,15 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
self._codes = codes
return
else:
- return Categorical(values=codes,categories=self.categories, ordered=self.ordered,
- fastpath=True)
+ return Categorical(values=codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
def order(self, inplace=False, ascending=True, na_position='last'):
"""
DEPRECATED: use :meth:`Categorical.sort_values`
- Sorts the Category by category value returning a new Categorical by default.
+ Sorts the Category by category value returning a new Categorical by
+ default.
Only ordered Categoricals can be sorted!
@@ -1228,9 +1289,10 @@ def order(self, inplace=False, ascending=True, na_position='last'):
--------
Category.sort
"""
- warn("order is deprecated, use sort_values(...)",
- FutureWarning, stacklevel=2)
- return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position)
+ warn("order is deprecated, use sort_values(...)", FutureWarning,
+ stacklevel=2)
+ return self.sort_values(inplace=inplace, ascending=ascending,
+ na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last'):
""" Sorts the Category inplace by category value.
@@ -1340,8 +1402,8 @@ def fillna(self, value=None, method=None, limit=None):
if method is not None:
values = self.to_dense().reshape(-1, len(self))
- values = interpolate_2d(
- values, method, 0, None, value).astype(self.categories.dtype)[0]
+ values = interpolate_2d(values, method, 0, None,
+ value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
@@ -1349,13 +1411,13 @@ def fillna(self, value=None, method=None, limit=None):
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
- mask = values==-1
+ mask = values == -1
if mask.any():
values = values.copy()
values[mask] = self.categories.get_loc(value)
- return Categorical(values, categories=self.categories, ordered=self.ordered,
- fastpath=True)
+ return Categorical(values, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
@@ -1368,8 +1430,8 @@ def take_nd(self, indexer, allow_fill=True, fill_value=None):
assert isnull(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
- result = Categorical(codes, categories=self.categories, ordered=self.ordered,
- fastpath=True)
+ result = Categorical(codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
return result
take = take_nd
@@ -1384,12 +1446,13 @@ def _slice(self, slicer):
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
- raise AssertionError("invalid slicing for a 1-ndim categorical")
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
- return Categorical(values=_codes,categories=self.categories, ordered=self.ordered,
- fastpath=True)
+ return Categorical(values=_codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
@@ -1400,11 +1463,12 @@ def __iter__(self):
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
- """ a short repr displaying only max_vals and an optional (but default footer) """
+ """ a short repr displaying only max_vals and an optional (but default
+ footer)
+ """
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
- tail = self[-(max_vals - num):]._get_repr(length=False,
- footer=False)
+ tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '%s, ..., %s' % (head[:-1], tail[1:])
if footer:
@@ -1414,8 +1478,8 @@ def _tidy_repr(self, max_vals=10, footer=True):
def _repr_categories(self):
""" return the base repr for the categories """
- max_categories = (10 if get_option("display.max_categories") == 0
- else get_option("display.max_categories"))
+ max_categories = (10 if get_option("display.max_categories") == 0 else
+ get_option("display.max_categories"))
from pandas.core import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
@@ -1433,7 +1497,8 @@ def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
- dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype))
+ dtype = getattr(self.categories, 'dtype_str',
+ str(self.categories.dtype))
levheader = "Categories (%d, %s): " % (len(self.categories), dtype)
width, height = get_terminal_size()
@@ -1443,20 +1508,20 @@ def _repr_categories_info(self):
max_width = 0
levstring = ""
start = True
- cur_col_len = len(levheader) # header
+ cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
- linesep = sep.rstrip() + "\n" # remove whitespace
+ linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
- cur_col_len = len(levheader) + 1 # header + a whitespace
+ cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
- return levheader + "["+levstring.replace(" < ... < ", " ... ")+"]"
+ return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
@@ -1464,10 +1529,8 @@ def _repr_footer(self):
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.core import format as fmt
- formatter = fmt.CategoricalFormatter(self,
- length=length,
- na_rep=na_rep,
- footer=footer)
+ formatter = fmt.CategoricalFormatter(self, length=length,
+ na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
@@ -1479,9 +1542,9 @@ def __unicode__(self):
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
- result = '[], %s' % self._get_repr(length=False,
- footer=True,
- ).replace("\n",", ")
+ result = ('[], %s' %
+ self._get_repr(length=False,
+ footer=True, ).replace("\n", ", "))
return result
@@ -1500,8 +1563,10 @@ def __getitem__(self, key):
else:
return self.categories[i]
else:
- return Categorical(values=self._codes[key], categories=self.categories,
- ordered=self.ordered, fastpath=True)
+ return Categorical(values=self._codes[key],
+ categories=self.categories,
+ ordered=self.ordered,
+ fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
@@ -1510,26 +1575,26 @@ def __setitem__(self, key, value):
Raises
------
ValueError
- If (one or more) Value is not in categories or if a assigned `Categorical` has not the
- same categories
-
+ If (one or more) Value is not in categories or if a assigned
+ `Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
- raise ValueError("Cannot set a Categorical with another, without identical "
- "categories")
+ raise ValueError("Cannot set a Categorical with another, "
+ "without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
- # no assignments of values not in categories, but it's always ok to set something to np.nan
+ # no assignments of values not in categories, but it's always ok to set
+ # something to np.nan
if len(to_add) and not isnull(to_add).all():
- raise ValueError("cannot setitem on a Categorical with a new category,"
- " set the categories first")
+ raise ValueError("Cannot setitem on a Categorical with a new "
+ "category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
@@ -1541,12 +1606,14 @@ def __setitem__(self, key, value):
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
- raise AssertionError("invalid slicing for a 1-ndim categorical")
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
- raise AssertionError("invalid slicing for a 1-ndim categorical")
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
@@ -1554,18 +1621,20 @@ def __setitem__(self, key, value):
# Array of True/False in Series or Categorical
else:
- # There is a bug in numpy, which does not accept a Series as a indexer
+ # There is a bug in numpy, which does not accept a Series as a
+ # indexer
# https://github.com/pydata/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
- # FIXME: remove when numpy 1.9 is the lowest numpy version pandas accepts...
+ # FIXME: remove when numpy 1.9 is the lowest numpy version pandas
+ # accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
- # FIXME: the following can be removed after https://github.com/pydata/pandas/issues/7820
- # is fixed.
- # float categories do currently return -1 for np.nan, even if np.nan is included in the
- # index -> "repair" this here
+ # FIXME: the following can be removed after GH7820 is fixed:
+ # https://github.com/pydata/pandas/issues/7820
+ # float categories do currently return -1 for np.nan, even if np.nan is
+ # included in the index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
nan_pos = np.where(isnull(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
@@ -1573,13 +1642,14 @@ def __setitem__(self, key, value):
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
- #### reduction ops ####
+ # reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
- func = getattr(self,name,None)
+ func = getattr(self, name, None)
if func is None:
- raise TypeError("Categorical cannot perform the operation {op}".format(op=name))
+ raise TypeError("Categorical cannot perform the operation "
+ "{op}".format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
@@ -1607,7 +1677,6 @@ def min(self, numeric_only=None, **kwargs):
else:
return self.categories[pointer]
-
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
@@ -1637,8 +1706,8 @@ def mode(self):
"""
Returns the mode(s) of the Categorical.
- Empty if nothing occurs at least 2 times. Always returns `Categorical` even
- if only one value.
+ Empty if nothing occurs at least 2 times. Always returns `Categorical`
+ even if only one value.
Returns
-------
@@ -1647,14 +1716,15 @@ def mode(self):
import pandas.hashtable as htable
good = self._codes != -1
- result = Categorical(sorted(htable.mode_int64(_ensure_int64(self._codes[good]))),
- categories=self.categories,ordered=self.ordered, fastpath=True)
+ values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
+ result = Categorical(values=values, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
- Return the ``Categorical`` which ``categories`` and ``codes`` are unique.
- Unused categories are NOT returned.
+ Return the ``Categorical`` which ``categories`` and ``codes`` are
+ unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
@@ -1690,7 +1760,8 @@ def equals(self, other):
-------
are_equal : boolean
"""
- return self.is_dtype_equal(other) and np.array_equal(self._codes, other._codes)
+ return (self.is_dtype_equal(other) and
+ np.array_equal(self._codes, other._codes))
def is_dtype_equal(self, other):
"""
@@ -1707,7 +1778,8 @@ def is_dtype_equal(self, other):
"""
try:
- return self.categories.equals(other.categories) and self.ordered == other.ordered
+ return (self.categories.equals(other.categories) and
+ self.ordered == other.ordered)
except (AttributeError, TypeError):
return False
@@ -1723,8 +1795,8 @@ def describe(self):
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
- result = concat([counts,freqs],axis=1)
- result.columns = ['counts','freqs']
+ result = concat([counts, freqs], axis=1)
+ result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
@@ -1742,15 +1814,16 @@ def repeat(self, repeats):
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
+# The Series.cat accessor
-##### The Series.cat accessor #####
class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
- Be aware that assigning to `categories` is a inplace operation, while all methods return
- new categorical data per default (but can be called with `inplace=True`).
+ Be aware that assigning to `categories` is a inplace operation, while all
+ methods return new categorical data per default (but can be called with
+ `inplace=True`).
Examples
--------
@@ -1787,24 +1860,21 @@ def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
- if not res is None:
+ if res is not None:
return Series(res, index=self.index)
+
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
- accessors=["categories", "ordered"],
+ accessors=["categories",
+ "ordered"],
typ='property')
-CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
- accessors=["rename_categories",
- "reorder_categories",
- "add_categories",
- "remove_categories",
- "remove_unused_categories",
- "set_categories",
- "as_ordered",
- "as_unordered"],
- typ='method')
-
-##### utility routines #####
+CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
+ "rename_categories", "reorder_categories", "add_categories",
+ "remove_categories", "remove_unused_categories", "set_categories",
+ "as_ordered", "as_unordered"], typ='method')
+
+# utility routines
+
def _get_codes_for_values(values, categories):
"""
@@ -1812,7 +1882,7 @@ def _get_codes_for_values(values, categories):
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
- if not is_dtype_equal(values.dtype,categories.dtype):
+ if not is_dtype_equal(values.dtype, categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
@@ -1822,13 +1892,14 @@ def _get_codes_for_values(values, categories):
t.map_locations(cats)
return _coerce_indexer_dtype(t.lookup(vals), cats)
+
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
- if (is_sequence(list_like) or isinstance(list_like, tuple)
- or isinstance(list_like, types.GeneratorType)):
+ if (is_sequence(list_like) or isinstance(list_like, tuple) or
+ isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif np.isscalar(list_like):
return [list_like]
@@ -1836,6 +1907,7 @@ def _convert_to_list_like(list_like):
# is this reached?
return [list_like]
+
def _concat_compat(to_concat, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
@@ -1882,9 +1954,9 @@ def convert_categorical(x):
if len(categoricals) == len(to_concat):
# concating numeric types is much faster than concating object types
# and fastpath takes a shorter path through the constructor
- return Categorical(np.concatenate([x.codes for x in to_concat], axis=0),
- rawcats,
- ordered=categoricals[0].ordered,
+ return Categorical(np.concatenate([x.codes for x in to_concat],
+ axis=0),
+ rawcats, ordered=categoricals[0].ordered,
fastpath=True)
else:
concatted = np.concatenate(list(map(convert_categorical, to_concat)),
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 3d320199e04d9..b80b7eecaeb11 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -17,10 +17,13 @@
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
-from pandas.compat import BytesIO, range, long, u, zip, map, string_types, iteritems
-from pandas.core.dtypes import CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType
+from pandas.compat import (BytesIO, range, long, u, zip, map, string_types,
+ iteritems)
+from pandas.core.dtypes import (CategoricalDtype, CategoricalDtypeType,
+ DatetimeTZDtype, DatetimeTZDtypeType)
from pandas.core.config import get_option
+
class PandasError(Exception):
pass
@@ -41,70 +44,78 @@ class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
+
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
- return "This method must be defined in the concrete class of " \
- + self.class_instance.__class__.__name__
+ return ("This method must be defined in the concrete class of %s" %
+ self.class_instance.__class__.__name__)
+
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
- for t in ['O', 'int8',
- 'uint8', 'int16', 'uint16', 'int32',
- 'uint32', 'int64', 'uint64']])
+ for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
-_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
- 'm8[ns]', '<m8[ns]', '>m8[ns]']])
+_DATELIKE_DTYPES = set([np.dtype(t)
+ for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
+ 'm8[ns]', '<m8[ns]', '>m8[ns]']])
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
+
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, '_typ') in comp
- dct = dict(__instancecheck__=_check,
- __subclasscheck__=_check)
- meta = type("ABCBase", (type,), dct)
+
+ dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
+ meta = type("ABCBase", (type, ), dct)
return meta(name, tuple(), dct)
-ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index",))
-ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",))
-ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ", ("float64index",))
-ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",))
-ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",))
-ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",))
-ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",))
-ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",))
-ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ", ("index",
- "int64index",
- "float64index",
- "multiindex",
- "datetimeindex",
- "timedeltaindex",
- "periodindex",
- "categoricalindex"))
-
-ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
-ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
-ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
+ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
+ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
+ ("int64index", ))
+ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
+ ("float64index", ))
+ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
+ ("multiindex", ))
+ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ",
+ ("datetimeindex", ))
+ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ",
+ ("timedeltaindex", ))
+ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ",
+ ("periodindex", ))
+ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
+ ("categoricalindex", ))
+ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
+ ("index", "int64index", "float64index",
+ "multiindex", "datetimeindex",
+ "timedeltaindex", "periodindex",
+ "categoricalindex"))
+
+ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", ))
+ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", ))
+ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel", ))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
-ABCCategorical = create_pandas_abc_type("ABCCategorical","_typ",("categorical"))
-ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period",))
+ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ",
+ ("categorical"))
+ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
-class _ABCGeneric(type):
+class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
@@ -136,6 +147,7 @@ class to receive bound method
else:
setattr(cls, name, func)
+
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
@@ -198,6 +210,7 @@ def _isnull_old(obj):
else:
return obj is None
+
_isnull = _isnull_new
@@ -263,6 +276,7 @@ def _isnull_ndarraylike(obj):
return result
+
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
@@ -316,6 +330,7 @@ def notnull(obj):
return not res
return ~res
+
def is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
@@ -324,18 +339,19 @@ def is_null_datelike_scalar(other):
elif np.isscalar(other):
# a timedelta
- if hasattr(other,'dtype'):
+ if hasattr(other, 'dtype'):
return other.view('i8') == tslib.iNaT
elif is_integer(other) and other == tslib.iNaT:
return True
return isnull(other)
return False
+
def array_equivalent(left, right, strict_nan=False):
"""
- True if two arrays, left and right, have equal non-NaN elements, and NaNs in
- corresponding locations. False otherwise. It is assumed that left and right
- are NumPy arrays of the same dtype. The behavior of this function
+ True if two arrays, left and right, have equal non-NaN elements, and NaNs
+ in corresponding locations. False otherwise. It is assumed that left and
+ right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
@@ -363,22 +379,25 @@ def array_equivalent(left, right, strict_nan=False):
"""
left, right = np.asarray(left), np.asarray(right)
- if left.shape != right.shape: return False
+ if left.shape != right.shape:
+ return False
# Object arrays can contain None, NaN and NaT.
- if issubclass(left.dtype.type, np.object_) or issubclass(right.dtype.type, np.object_):
+ if (issubclass(left.dtype.type, np.object_) or
+ issubclass(right.dtype.type, np.object_)):
if not strict_nan:
# pd.isnull considers NaN and None to be equivalent.
- return lib.array_equivalent_object(_ensure_object(left.ravel()),
- _ensure_object(right.ravel()))
+ return lib.array_equivalent_object(
+ _ensure_object(left.ravel()), _ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is tslib.NaT and right_value is not tslib.NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
- if not isinstance(right_value, float) or not np.isnan(right_value):
+ if (not isinstance(right_value, float) or
+ not np.isnan(right_value)):
return False
else:
if left_value != right_value:
@@ -396,6 +415,7 @@ def array_equivalent(left, right, strict_nan=False):
# NaNs cannot occur otherwise.
return np.array_equal(left, right)
+
def _iterable_not_string(x):
return (isinstance(x, collections.Iterable) and
not isinstance(x, compat.string_types))
@@ -502,6 +522,7 @@ def wrapper(arr, indexer, out, fill_value=np.nan):
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
+
return wrapper
@@ -509,6 +530,7 @@ def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
+
return wrapper
@@ -569,15 +591,14 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
- ('bool', 'bool'):
- _view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),
- ('bool', 'object'):
- _view_wrapper(algos.take_1d_bool_object, np.uint8, None),
- ('datetime64[ns]', 'datetime64[ns]'):
- _view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
+ ('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
+ np.uint8),
+ ('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
+ None),
+ ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
+ algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
-
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
@@ -596,16 +617,15 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
- ('bool', 'bool'):
- _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),
- ('bool', 'object'):
- _view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),
+ ('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
+ np.uint8),
+ ('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
+ np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
-
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
@@ -624,16 +644,15 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
- ('bool', 'bool'):
- _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),
- ('bool', 'object'):
- _view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),
+ ('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
+ np.uint8),
+ ('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
+ np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
-
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
@@ -652,10 +671,10 @@ def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
- ('bool', 'bool'):
- _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),
- ('bool', 'object'):
- _view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),
+ ('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
+ np.uint8),
+ ('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
+ np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
@@ -689,13 +708,14 @@ def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
def func(arr, indexer, out, fill_value=np.nan):
indexer = _ensure_int64(indexer)
- _take_nd_generic(arr, indexer, out, axis=axis,
- fill_value=fill_value, mask_info=mask_info)
+ _take_nd_generic(arr, indexer, out, axis=axis, fill_value=fill_value,
+ mask_info=mask_info)
+
return func
-def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
- mask_info=None, allow_fill=True):
+def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
+ allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
@@ -786,8 +806,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
else:
out = np.empty(out_shape, dtype=dtype)
- func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype,
- axis=axis, mask_info=mask_info)
+ func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
+ mask_info=mask_info)
indexer = _ensure_int64(indexer)
func(arr, indexer, out, fill_value)
@@ -799,8 +819,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
take_1d = take_nd
-def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
- mask_info=None, allow_fill=True):
+def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
+ allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
@@ -858,12 +878,15 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
+
def func(arr, indexer, out, fill_value=np.nan):
- _take_2d_multi_generic(arr, indexer, out,
- fill_value=fill_value, mask_info=mask_info)
+ _take_2d_multi_generic(arr, indexer, out, fill_value=fill_value,
+ mask_info=mask_info)
+
func(arr, indexer, out=out, fill_value=fill_value)
return out
+
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
@@ -873,6 +896,7 @@ def func(arr, indexer, out, fill_value=np.nan):
'int8': algos.diff_2d_int8,
}
+
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
@@ -931,10 +955,12 @@ def diff(arr, n, axis=0):
if is_timedelta:
from pandas import TimedeltaIndex
- out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(out_arr.shape).astype('timedelta64[ns]')
+ out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
+ out_arr.shape).astype('timedelta64[ns]')
return out_arr
+
def _coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
l = len(categories)
@@ -946,6 +972,7 @@ def _coerce_indexer_dtype(indexer, categories):
return _ensure_int32(indexer)
return _ensure_int64(indexer)
+
def _coerce_to_dtypes(result, dtypes):
""" given a dtypes and a result set, coerce the result elements to the
dtypes
@@ -965,7 +992,7 @@ def conv(r, dtype):
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
- if is_integer(r) and r not in [0,1]:
+ if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
@@ -982,22 +1009,22 @@ def conv(r, dtype):
def _infer_fill_value(val):
"""
- infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like
- if we are a NaT, return the correct dtyped element to provide proper block construction
-
+ infer the fill value for the nan/NaT from the provided
+ scalar/ndarray/list-like if we are a NaT, return the correct dtyped
+ element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
- val = np.array(val,copy=False)
+ val = np.array(val, copy=False)
if is_datetimelike(val):
- return np.array('NaT',dtype=val.dtype)
+ return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(_ensure_object(val))
- if dtype in ['datetime','datetime64']:
- return np.array('NaT',dtype=_NS_DTYPE)
- elif dtype in ['timedelta','timedelta64']:
- return np.array('NaT',dtype=_TD_DTYPE)
+ if dtype in ['datetime', 'datetime64']:
+ return np.array('NaT', dtype=_NS_DTYPE)
+ elif dtype in ['timedelta', 'timedelta64']:
+ return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
@@ -1025,12 +1052,13 @@ def _infer_dtype_from_scalar(val):
dtype = np.object_
- elif isinstance(val, (np.datetime64, datetime)) and getattr(val,'tzinfo',None) is None:
+ elif isinstance(val, (np.datetime64,
+ datetime)) and getattr(val, 'tzinfo', None) is None:
val = lib.Timestamp(val).value
dtype = np.dtype('M8[ns]')
elif isinstance(val, (np.timedelta64, timedelta)):
- val = tslib.convert_to_timedelta(val,'ns')
+ val = tslib.convert_to_timedelta(val, 'ns')
dtype = np.dtype('m8[ns]')
elif is_bool(val):
@@ -1252,7 +1280,9 @@ def _possibly_downcast_to_dtype(result, dtype):
if np.isscalar(result):
return result
- trans = lambda x: x
+ def trans(x):
+ return x
+
if isinstance(dtype, compat.string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
@@ -1269,8 +1299,9 @@ def _possibly_downcast_to_dtype(result, dtype):
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
- trans = lambda x: x.round()
+ def trans(x):
+ return x.round()
else:
dtype = 'object'
@@ -1281,7 +1312,8 @@ def _possibly_downcast_to_dtype(result, dtype):
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
- if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):
+ if (result.dtype.itemsize <= dtype.itemsize and
+ np.prod(result.shape)):
return result
if issubclass(dtype.type, np.floating):
@@ -1317,7 +1349,7 @@ def _possibly_downcast_to_dtype(result, dtype):
return new_result
# a datetimelike
- elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:
+ elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i']:
try:
result = result.astype(dtype)
except:
@@ -1339,7 +1371,7 @@ def _maybe_convert_string_to_object(values):
if isinstance(values, string_types):
values = np.array([values], dtype=object)
elif (isinstance(values, np.ndarray) and
- issubclass(values.dtype.type, (np.string_, np.unicode_))):
+ issubclass(values.dtype.type, (np.string_, np.unicode_))):
values = values.astype(object)
return values
@@ -1386,9 +1418,9 @@ def _fill_zeros(result, x, y, name, fill):
return result
if name.startswith(('r', '__r')):
- x,y = y,x
+ x, y = y, x
- is_typed_variable = (hasattr(y, 'dtype') or hasattr(y,'type'))
+ is_typed_variable = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar = lib.isscalar(y)
if not is_typed_variable and not is_scalar:
@@ -1433,30 +1465,32 @@ def _consensus_name_attr(objs):
return None
return name
-
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# Lots of little utilities
+
def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
if typ != 'generic' and typ != 'ns':
- raise ValueError('%r is too specific of a frequency, try passing %r'
- % (dtype.name, dtype.type.__name__))
+ raise ValueError('%r is too specific of a frequency, try passing %r' %
+ (dtype.name, dtype.type.__name__))
def _invalidate_string_dtypes(dtype_set):
- """Change string like dtypes to object for ``DataFrame.select_dtypes()``."""
+ """Change string like dtypes to object for
+ ``DataFrame.select_dtypes()``.
+ """
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def _get_dtype_from_object(dtype):
- """Get a numpy dtype.type-style object. This handles the
- datetime64[ns] and datetime64[ns, TZ] compat
+ """Get a numpy dtype.type-style object. This handles the datetime64[ns]
+ and datetime64[ns, TZ] compat
Notes
-----
@@ -1523,6 +1557,7 @@ def _maybe_box_datetimelike(value):
return value
+
_values_from_object = lib.values_from_object
@@ -1569,39 +1604,43 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
if is_datetime64 or is_datetime64tz or is_timedelta64:
# force the dtype if needed
- if is_datetime64 and not is_dtype_equal(dtype,_NS_DTYPE):
+ if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name == 'datetime64[ns]':
dtype = _NS_DTYPE
else:
- raise TypeError(
- "cannot convert datetimelike to dtype [%s]" % dtype)
+ raise TypeError("cannot convert datetimelike to "
+ "dtype [%s]" % dtype)
elif is_datetime64tz:
pass
- elif is_timedelta64 and not is_dtype_equal(dtype,_TD_DTYPE):
+ elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name == 'timedelta64[ns]':
dtype = _TD_DTYPE
else:
- raise TypeError(
- "cannot convert timedeltalike to dtype [%s]" % dtype)
+ raise TypeError("cannot convert timedeltalike to "
+ "dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
value = tslib.iNaT
else:
- value = np.array(value,copy=False)
+ value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
- elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
+ elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
+ dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
- # input has to be UTC at this point, so just localize
- value = to_datetime(value, errors=errors).tz_localize(dtype.tz)
+ # input has to be UTC at this point, so just
+ # localize
+ value = to_datetime(
+ value,
+ errors=errors).tz_localize(dtype.tz)
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError):
@@ -1670,7 +1709,7 @@ def _possibly_infer_to_datetimelike(value, convert_dates=False):
v = value
if not is_list_like(v):
v = [v]
- v = np.array(v,copy=False)
+ v = np.array(v, copy=False)
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
@@ -1684,8 +1723,8 @@ def _try_datetime(v):
except ValueError:
# we might have a sequence of the same-datetimes with tz's
- # if so coerce to a DatetimeIndex; if they are not the same, then
- # these stay as object dtype
+ # if so coerce to a DatetimeIndex; if they are not the same,
+ # then these stay as object dtype
try:
from pandas import to_datetime
return to_datetime(v)
@@ -1708,17 +1747,18 @@ def _try_timedelta(v):
return v
# do a quick inference for perf
- sample = v[:min(3,len(v))]
+ sample = v[:min(3, len(v))]
inferred_type = lib.infer_dtype(sample)
- if inferred_type in ['datetime', 'datetime64'] or (convert_dates and inferred_type in ['date']):
+ if (inferred_type in ['datetime', 'datetime64'] or
+ (convert_dates and inferred_type in ['date'])):
value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
value = _try_timedelta(v)
- # its possible to have nulls intermixed within the datetime or timedelta
- # these will in general have an inferred_type of 'mixed', so have to try
- # both datetime and timedelta
+ # It's possible to have nulls intermixed within the datetime or
+ # timedelta. These will in general have an inferred_type of 'mixed',
+ # so have to try both datetime and timedelta.
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
@@ -1758,7 +1798,7 @@ def is_bool_indexer(key):
def _default_index(n):
from pandas.core.index import Int64Index
values = np.arange(n, dtype=np.int64)
- result = Int64Index(values,name=None)
+ result = Int64Index(values, name=None)
result.is_unique = True
return result
@@ -1785,6 +1825,7 @@ def _mut_exclusive(**kwargs):
def _not_none(*args):
return (arg for arg in args if arg is not None)
+
def _any_none(*args):
for arg in args:
if arg is None:
@@ -1810,12 +1851,10 @@ def _try_sort(iterable):
def _count_not_none(*args):
return sum(x is not None for x in args)
-#------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# miscellaneous python tools
-
-
def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
@@ -1850,6 +1889,7 @@ def adjoin(space, *lists, **kwargs):
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
+
def _justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
@@ -1861,6 +1901,7 @@ def _justify(texts, max_len, mode='right'):
else:
return [x.rjust(max_len) for x in texts]
+
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
@@ -1874,7 +1915,7 @@ def iterpairs(seq):
"""
Parameters
----------
- seq: sequence
+ seq : sequence
Returns
-------
@@ -1933,7 +1974,6 @@ def _long_prod(vals):
class groupby(dict):
-
"""
A simple groupby different from the one in itertools.
@@ -1945,6 +1985,7 @@ def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
+
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
@@ -1986,8 +2027,7 @@ def intersection(*seqs):
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
- if not (isinstance(values, (list, tuple))
- or hasattr(values, '__array__')):
+ if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
@@ -2036,25 +2076,21 @@ def _maybe_make_list(obj):
return [obj]
return obj
-########################
-##### TYPE TESTING #####
-########################
+# TYPE TESTING
is_bool = lib.is_bool
-
is_integer = lib.is_integer
-
is_float = lib.is_float
-
is_complex = lib.is_complex
def is_string_like(obj):
return isinstance(obj, (compat.text_type, compat.string_types))
+
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
@@ -2063,6 +2099,7 @@ def is_iterator(obj):
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
+
def is_period_arraylike(arr):
""" return if we are period arraylike / PeriodIndex """
if isinstance(arr, pd.PeriodIndex):
@@ -2071,6 +2108,7 @@ def is_period_arraylike(arr):
return arr.dtype == object and lib.infer_dtype(arr) == 'period'
return getattr(arr, 'inferred_type', None) == 'period'
+
def is_datetime_arraylike(arr):
""" return if we are datetime arraylike / DatetimeIndex """
if isinstance(arr, ABCDatetimeIndex):
@@ -2079,8 +2117,11 @@ def is_datetime_arraylike(arr):
return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'
return getattr(arr, 'inferred_type', None) == 'datetime'
+
def is_datetimelike(arr):
- return arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex) or is_datetimetz(arr)
+ return (arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex) or
+ is_datetimetz(arr))
+
def _coerce_to_dtype(dtype):
""" coerce a string / np.dtype to a dtype """
@@ -2092,6 +2133,7 @@ def _coerce_to_dtype(dtype):
dtype = np.dtype(dtype)
return dtype
+
def _get_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
@@ -2111,6 +2153,7 @@ def _get_dtype(arr_or_dtype):
arr_or_dtype = arr_or_dtype.dtype
return np.dtype(arr_or_dtype)
+
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
@@ -2131,6 +2174,7 @@ def _get_dtype_type(arr_or_dtype):
except AttributeError:
return type(None)
+
def is_dtype_equal(source, target):
""" return a boolean if the dtypes are equal """
try:
@@ -2143,6 +2187,7 @@ def is_dtype_equal(source, target):
# object == category will hit this
return False
+
def is_any_int_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
@@ -2153,15 +2198,18 @@ def is_integer_dtype(arr_or_dtype):
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
+
def is_int64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.int64)
+
def is_int_or_datetime_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
+
def is_datetime64_dtype(arr_or_dtype):
try:
tipo = _get_dtype_type(arr_or_dtype)
@@ -2169,11 +2217,15 @@ def is_datetime64_dtype(arr_or_dtype):
return False
return issubclass(tipo, np.datetime64)
+
def is_datetime64tz_dtype(arr_or_dtype):
return DatetimeTZDtype.is_dtype(arr_or_dtype)
+
def is_datetime64_any_dtype(arr_or_dtype):
- return is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype)
+ return (is_datetime64_dtype(arr_or_dtype) or
+ is_datetime64tz_dtype(arr_or_dtype))
+
def is_datetime64_ns_dtype(arr_or_dtype):
try:
@@ -2182,6 +2234,7 @@ def is_datetime64_ns_dtype(arr_or_dtype):
return False
return tipo == _NS_DTYPE
+
def is_timedelta64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
@@ -2215,62 +2268,77 @@ def is_numeric_v_string_like(a, b):
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
- return (
- is_a_numeric_array and is_b_scalar_string_like) or (
- is_b_numeric_array and is_a_scalar_string_like) or (
- is_a_numeric_array and is_b_string_array) or (
- is_b_numeric_array and is_a_string_array
- )
+ return ((is_a_numeric_array and is_b_scalar_string_like) or
+ (is_b_numeric_array and is_a_scalar_string_like) or
+ (is_a_numeric_array and is_b_string_array) or
+ (is_b_numeric_array and is_a_string_array))
+
def is_datetimelike_v_numeric(a, b):
- # return if we have an i8 convertible and numeric comparision
- if not hasattr(a,'dtype'):
+ # return if we have an i8 convertible and numeric comparison
+ if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
- is_numeric = lambda x: is_integer_dtype(x) or is_float_dtype(x)
+
+ def is_numeric(x):
+ return is_integer_dtype(x) or is_float_dtype(x)
+
is_datetimelike = needs_i8_conversion
- return (is_datetimelike(a) and is_numeric(b)) or (
- is_datetimelike(b) and is_numeric(a))
+ return ((is_datetimelike(a) and is_numeric(b)) or
+ (is_datetimelike(b) and is_numeric(a)))
+
def is_datetimelike_v_object(a, b):
- # return if we have an i8 convertible and object comparision
- if not hasattr(a,'dtype'):
+ # return if we have an i8 convertible and object comparsion
+ if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
- f = lambda x: is_object_dtype(x)
- is_object = lambda x: is_integer_dtype(x) or is_float_dtype(x)
+
+ def f(x):
+ return is_object_dtype(x)
+
+ def is_object(x):
+ return is_integer_dtype(x) or is_float_dtype(x)
+
is_datetimelike = needs_i8_conversion
- return (is_datetimelike(a) and is_object(b)) or (
- is_datetimelike(b) and is_object(a))
+ return ((is_datetimelike(a) and is_object(b)) or
+ (is_datetimelike(b) and is_object(a)))
+
+
+def needs_i8_conversion(arr_or_dtype):
+ return (is_datetime_or_timedelta_dtype(arr_or_dtype) or
+ is_datetime64tz_dtype(arr_or_dtype))
-needs_i8_conversion = lambda arr_or_dtype: is_datetime_or_timedelta_dtype(arr_or_dtype) or \
- is_datetime64tz_dtype(arr_or_dtype)
def i8_boxer(arr_or_dtype):
""" return the scalar boxer for the dtype """
- if is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype):
+ if (is_datetime64_dtype(arr_or_dtype) or
+ is_datetime64tz_dtype(arr_or_dtype)):
return lib.Timestamp
elif is_timedelta64_dtype(arr_or_dtype):
- return lambda x: lib.Timedelta(x,unit='ns')
+ return lambda x: lib.Timedelta(x, unit='ns')
raise ValueError("cannot find a scalar boxer for {0}".format(arr_or_dtype))
+
def is_numeric_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
- return (issubclass(tipo, (np.number, np.bool_))
- and not issubclass(tipo, (np.datetime64, np.timedelta64)))
+ return (issubclass(tipo, (np.number, np.bool_)) and
+ not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_string_dtype(arr_or_dtype):
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('O', 'S', 'U')
+
def is_string_like_dtype(arr_or_dtype):
# exclude object as its a mixed dtype
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('S', 'U')
+
def is_float_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
@@ -2289,13 +2357,18 @@ def is_bool_dtype(arr_or_dtype):
return False
return issubclass(tipo, np.bool_)
+
def is_sparse(array):
""" return if we are a sparse array """
return isinstance(array, (ABCSparseArray, ABCSparseSeries))
+
def is_datetimetz(array):
""" return if we are a datetime with tz array """
- return (isinstance(array, ABCDatetimeIndex) and getattr(array,'tz',None) is not None) or is_datetime64tz_dtype(array)
+ return ((isinstance(array, ABCDatetimeIndex) and
+ getattr(array, 'tz', None) is not None) or
+ is_datetime64tz_dtype(array))
+
def is_internal_type(value):
"""
@@ -2310,13 +2383,16 @@ def is_internal_type(value):
return True
return False
+
def is_categorical(array):
""" return if we are a categorical possibility """
return isinstance(array, ABCCategorical) or is_categorical_dtype(array)
+
def is_categorical_dtype(arr_or_dtype):
return CategoricalDtype.is_dtype(arr_or_dtype)
+
def is_complex_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
@@ -2341,21 +2417,25 @@ def is_re_compilable(obj):
def is_list_like(arg):
- return (hasattr(arg, '__iter__') and
+ return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
+
def is_named_tuple(arg):
return isinstance(arg, tuple) and hasattr(arg, '_fields')
+
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
+
def is_full_slice(obj, l):
""" we have a full length slice """
- return (isinstance(obj, slice) and obj.start == 0 and
- obj.stop == l and obj.step is None)
+ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
+ obj.step is None)
+
def is_hashable(arg):
"""Return True if hash(arg) will succeed, False otherwise.
@@ -2414,10 +2494,10 @@ def _get_callable_name(obj):
# distinguishing between no name and a name of ''
return None
+
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
-
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
_ensure_int64 = algos.ensure_int64
@@ -2456,7 +2536,7 @@ def _astype_nansafe(arr, dtype, copy=True):
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
- (not compat.PY3 and dtype != _TD_DTYPE)):
+ (not compat.PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
@@ -2524,11 +2604,13 @@ def get_dtype_kinds(l):
typs.add(typ)
return typs
+
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
- 'normalized' dtypes (in that for example, if its object, then it is a non-datetimelike
- provde a combined dtype for the resulting array the preserves the overall dtype if possible)
+ 'normalized' dtypes (in that for example, if it's object, then it is a
+ non-datetimelike and provide a combined dtype for the resulting array that
+ preserves the overall dtype if possible)
Parameters
----------
@@ -2547,6 +2629,7 @@ def is_nonempty(x):
return x.shape[axis] > 0
except Exception:
return True
+
nonempty = [x for x in to_concat if is_nonempty(x)]
# If all arrays are empty, there's nothing to convert, just short-cut to
@@ -2572,20 +2655,22 @@ def is_nonempty(x):
return _concat_compat(to_concat, axis=axis)
if not nonempty:
-
- # we have all empties, but may need to coerce the result dtype to object if we
- # have non-numeric type operands (numpy would otherwise cast this to float)
+ # we have all empties, but may need to coerce the result dtype to
+ # object if we have non-numeric type operands (numpy would otherwise
+ # cast this to float)
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
- if not len(typs-set(['i','u','f'])) or not len(typs-set(['bool','i','u'])):
+ if (not len(typs - set(['i', 'u', 'f'])) or
+ not len(typs - set(['bool', 'i', 'u']))):
# let numpy coerce
pass
else:
# coerce to object
- to_concat = [ x.astype('object') for x in to_concat ]
+ to_concat = [x.astype('object') for x in to_concat]
+
+ return np.concatenate(to_concat, axis=axis)
- return np.concatenate(to_concat,axis=axis)
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
@@ -2600,6 +2685,7 @@ def _where_compat(mask, arr1, arr2):
return np.where(mask, arr1, arr2)
+
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
@@ -2613,20 +2699,23 @@ def _dict_compat(d):
dict
"""
- return dict((_maybe_box_datetimelike(key), value) for key, value in iteritems(d))
+ return dict((_maybe_box_datetimelike(key), value)
+ for key, value in iteritems(d))
-def sentinel_factory():
+def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
+
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
+
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
@@ -2648,8 +2737,7 @@ def in_qtconsole():
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
- ip.config.get('IPKernelApp', {}).get('parent_appname', "")
- )
+ ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
@@ -2668,8 +2756,7 @@ def in_ipnb():
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
- ip.config.get('IPKernelApp', {}).get('parent_appname', "")
- )
+ ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
@@ -2738,7 +2825,8 @@ def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
- r.append(pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
+ r.append(pprint_thing(
+ next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
body = ", ".join(r)
if nitems < len(seq):
@@ -2765,8 +2853,11 @@ def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
- pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
- pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))
+ pairs.append(pfmt %
+ (pprint_thing(k, _nest_lvl + 1,
+ max_seq_items=max_seq_items, **kwds),
+ pprint_thing(v, _nest_lvl + 1,
+ max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
@@ -2802,6 +2893,7 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
result - unicode object on py2, str on py3. Always Unicode.
"""
+
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
@@ -2813,10 +2905,7 @@ def as_escaped_unicode(thing, escape_chars=escape_chars):
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
- translate = {'\t': r'\t',
- '\n': r'\n',
- '\r': r'\r',
- }
+ translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
@@ -2834,11 +2923,13 @@ def as_escaped_unicode(thing, escape_chars=escape_chars):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
- result = _pprint_dict(thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items)
- elif is_sequence(thing) and _nest_lvl < \
- get_option("display.pprint_nest_depth"):
+ result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
+ max_seq_items=max_seq_items)
+ elif (is_sequence(thing) and
+ _nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
- quote_strings=quote_strings, max_seq_items=max_seq_items)
+ quote_strings=quote_strings,
+ max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = "'%s'"
@@ -2864,8 +2955,8 @@ def console_encode(object, **kwds):
set in display.encoding. Use this everywhere
where you output to the console.
"""
- return pprint_thing_encoded(object,
- get_option("display.encoding"))
+ return pprint_thing_encoded(object, get_option("display.encoding"))
+
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
@@ -2881,6 +2972,7 @@ def _maybe_match_name(a, b):
return b.name
return None
+
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
@@ -2906,4 +2998,5 @@ def _random_state(state=None):
elif state is None:
return np.random.RandomState()
else:
- raise ValueError("random_state must be an integer, a numpy RandomState, or None")
+ raise ValueError("random_state must be an integer, a numpy "
+ "RandomState, or None")
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b27c4268796dd..273166db12142 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -23,17 +23,15 @@
import numpy as np
import numpy.ma as ma
-from pandas.core.common import (isnull, notnull, PandasError, _try_sort,
- _default_index, _maybe_upcast, is_sequence,
- _infer_dtype_from_scalar, _values_from_object,
- is_list_like, _maybe_box_datetimelike,
- is_categorical_dtype, is_object_dtype,
- is_internal_type, is_datetimetz,
- _possibly_infer_to_datetimelike, _dict_compat)
+from pandas.core.common import (
+ isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast,
+ is_sequence, _infer_dtype_from_scalar, _values_from_object, is_list_like,
+ _maybe_box_datetimelike, is_categorical_dtype, is_object_dtype,
+ is_internal_type, is_datetimetz, _possibly_infer_to_datetimelike,
+ _dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
-from pandas.core.indexing import (maybe_droplevels,
- convert_to_index_sliceable,
+from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
@@ -43,11 +41,11 @@
import pandas.computation.expressions as expressions
from pandas.computation.eval import eval as _eval
from numpy import percentile as _quantile
-from pandas.compat import(range, map, zip, lrange, lmap, lzip, StringIO, u,
- OrderedDict, raise_with_traceback)
+from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
+ OrderedDict, raise_with_traceback)
from pandas import compat
-from pandas.util.decorators import (deprecate, Appender,
- Substitution, deprecate_kwarg)
+from pandas.util.decorators import (deprecate, Appender, Substitution,
+ deprecate_kwarg)
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
@@ -59,6 +57,7 @@
import pandas.core.format as fmt
import pandas.core.nanops as nanops
import pandas.core.ops as ops
+import pandas.tools.plotting as gfx
import pandas.lib as lib
import pandas.algos as _algos
@@ -66,7 +65,7 @@
from pandas.core.config import get_option
from pandas import _np_version_under1p9
-# ----------------------------------------------------------------------
+# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',
@@ -156,12 +155,11 @@
of DataFrame.
"""
-#----------------------------------------------------------------------
+# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
-
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
@@ -359,11 +357,9 @@ def _init_dict(self, data, index, columns, dtype=None):
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
- return _arrays_to_mgr(arrays, data_names, index, columns,
- dtype=dtype)
+ return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
- def _init_ndarray(self, values, index, columns, dtype=None,
- copy=False):
+ def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
@@ -396,20 +392,20 @@ def _get_axes(N, K, index=index, columns=columns):
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
- if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype):
+ if (is_categorical_dtype(getattr(values, 'dtype', None)) or
+ is_categorical_dtype(dtype)):
- if not hasattr(values,'dtype'):
+ if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
- index, columns = _get_axes(len(values),1)
- return _arrays_to_mgr([ values ], columns, index, columns,
+ index, columns = _get_axes(len(values), 1)
+ return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
- return self._init_dict({ 0 : values }, index, columns,
- dtype=dtype)
+ return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
@@ -449,7 +445,7 @@ def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
- return (len(self.index), len(self.columns))
+ return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
@@ -478,9 +474,9 @@ def _repr_fits_horizontal_(self, ignore_width=False):
((not ignore_width) and width and nb_columns > (width // 2))):
return False
- if (ignore_width # used by repr_html under IPython notebook
- # scripts ignore terminal dims
- or not com.in_interactive_session()):
+ # used by repr_html under IPython notebook or scripts ignore terminal
+ # dims
+ if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
@@ -514,9 +510,8 @@ def _repr_fits_horizontal_(self, ignore_width=False):
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
- return info_repr_option and not (
- self._repr_fits_horizontal_() and self._repr_fits_vertical_()
- )
+ return info_repr_option and not (self._repr_fits_horizontal_() and
+ self._repr_fits_vertical_())
def __unicode__(self):
"""
@@ -551,8 +546,8 @@ def _repr_html_(self):
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
- # display HTML, so this check can be removed when support for IPython 2.x
- # is no longer needed.
+ # display HTML, so this check can be removed when support for
+ # IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
@@ -561,8 +556,8 @@ def _repr_html_(self):
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
- val = buf.getvalue().replace('<', r'<', 1).replace('>',
- r'>', 1)
+ val = buf.getvalue().replace('<', r'<', 1)
+ val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
@@ -571,8 +566,7 @@ def _repr_html_(self):
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
- show_dimensions=show_dimensions,
- notebook=True)
+ show_dimensions=show_dimensions, notebook=True)
else:
return None
@@ -602,8 +596,8 @@ def iteritems(self):
See also
--------
- iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.
- itertuples : Iterate over the rows of a DataFrame as namedtuples of the values.
+ iterrows : Iterate over DataFrame rows as (index, Series) pairs.
+ itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
@@ -611,11 +605,11 @@ def iteritems(self):
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
- yield k, self._ixs(i,axis=1)
+ yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
- Iterate over the rows of a DataFrame as (index, Series) pairs.
+ Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
@@ -651,7 +645,7 @@ def iterrows(self):
See also
--------
- itertuples : Iterate over the rows of a DataFrame as namedtuples of the values.
+ itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
@@ -662,15 +656,16 @@ def iterrows(self):
def itertuples(self, index=True, name="Pandas"):
"""
- Iterate over the rows of DataFrame as namedtuples, with index value
- as first element of the tuple.
+ Iterate over DataFrame rows as namedtuples, with index value as first
+ element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
- The name of the returned namedtuples or None to return regular tuples.
+ The name of the returned namedtuples or None to return regular
+ tuples.
Notes
-----
@@ -680,13 +675,14 @@ def itertuples(self, index=True, name="Pandas"):
See also
--------
- iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.
+ iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]}, index=['a', 'b'])
+ >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
+ index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
@@ -712,8 +708,9 @@ def itertuples(self, index=True, name="Pandas"):
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
- itertuple = collections.namedtuple(
- name, fields+list(self.columns), rename=True)
+ itertuple = collections.namedtuple(name,
+ fields + list(self.columns),
+ rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
@@ -759,8 +756,7 @@ def dot(self, other):
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
- return self._constructor(np.dot(lvals, rvals),
- index=left.index,
+ return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
@@ -773,7 +769,7 @@ def dot(self, other):
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
@@ -847,12 +843,15 @@ def to_dict(self, orient='dict'):
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
- 'data': lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
+ 'data': lib.map_infer(self.values.ravel(),
+ _maybe_box_datetimelike)
.reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
- return dict((k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self))
+ return dict((k, _maybe_box_datetimelike(v))
+ for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
- return [dict((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, row))
+ return [dict((k, _maybe_box_datetimelike(v))
+ for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
@@ -890,8 +889,8 @@ def to_gbq(self, destination_table, project_id, chunksize=10000,
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
- chunksize=chunksize, verbose=verbose,
- reauth=reauth, if_exists=if_exists)
+ chunksize=chunksize, verbose=verbose, reauth=reauth,
+ if_exists=if_exists)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
@@ -1015,8 +1014,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None,
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
- mgr = _arrays_to_mgr(arrays, arr_columns, result_index,
- columns)
+ mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
@@ -1126,11 +1124,12 @@ def _from_arrays(cls, arrays, columns, index, dtype=None):
return cls(mgr)
@classmethod
- def from_csv(cls, path, header=0, sep=',', index_col=0,
- parse_dates=True, encoding=None, tupleize_cols=False,
+ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
+ encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
- Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv` instead).
+ Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
+ instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
@@ -1197,8 +1196,8 @@ def to_sparse(self, fill_value=None, kind='block'):
y : SparseDataFrame
"""
from pandas.core.sparse import SparseDataFrame
- return SparseDataFrame(self._series, index=self.index, columns=self.columns,
- default_kind=kind,
+ return SparseDataFrame(self._series, index=self.index,
+ columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
@@ -1246,7 +1245,8 @@ def to_panel(self):
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
- labels=[major_labels, minor_labels],
+ labels=[major_labels,
+ minor_labels],
shape=shape,
ref_items=selfsorted.columns)
@@ -1316,26 +1316,25 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
- Character recognized as decimal separator. E.g. use ',' for European data
+ Character recognized as decimal separator. E.g. use ',' for
+ European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
- line_terminator=line_terminator,
- sep=sep, encoding=encoding,
- compression=compression,
- quoting=quoting, na_rep=na_rep,
- float_format=float_format, cols=columns,
- header=header, index=index,
+ line_terminator=line_terminator, sep=sep,
+ encoding=encoding,
+ compression=compression, quoting=quoting,
+ na_rep=na_rep, float_format=float_format,
+ cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
engine=kwds.get("engine"),
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
- escapechar=escapechar,
- decimal=decimal)
+ escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
@@ -1344,8 +1343,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
- merge_cells=True, encoding=None, inf_rep='inf',
- verbose=True):
+ merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
"""
Write DataFrame to a excel sheet
@@ -1410,12 +1408,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
- formatter = fmt.ExcelFormatter(self,
- na_rep=na_rep,
- cols=columns,
+ formatter = fmt.ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
- float_format=float_format,
- index=index,
+ float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
@@ -1425,9 +1420,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
if need_save:
excel_writer.save()
- def to_stata(
- self, fname, convert_dates=None, write_index=True, encoding="latin-1",
- byteorder=None, time_stamp=None, data_label=None):
+ def to_stata(self, fname, convert_dates=None, write_index=True,
+ encoding="latin-1", byteorder=None, time_stamp=None,
+ data_label=None):
"""
A class for writing Stata binary dta files from array-like objects
@@ -1464,10 +1459,10 @@ def to_stata(
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
- def to_string(self, buf=None, columns=None, col_space=None,
- header=True, index=True, na_rep='NaN', formatters=None,
- float_format=None, sparsify=None, index_names=True,
- justify=None, line_width=None, max_rows=None, max_cols=None,
+ def to_string(self, buf=None, columns=None, col_space=None, header=True,
+ index=True, na_rep='NaN', formatters=None, float_format=None,
+ sparsify=None, index_names=True, justify=None,
+ line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
@@ -1477,8 +1472,7 @@ def to_string(self, buf=None, columns=None, col_space=None,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
- sparsify=sparsify,
- justify=justify,
+ sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
@@ -1527,12 +1521,10 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
- sparsify=sparsify,
- justify=justify,
+ sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
- bold_rows=bold_rows,
- escape=escape,
+ bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
@@ -1546,8 +1538,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
- bold_rows=True, column_format=None,
- longtable=None, escape=None, encoding=None):
+ bold_rows=True, column_format=None, longtable=None,
+ escape=None, encoding=None):
"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
@@ -1558,7 +1550,8 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
- <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
+ <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
+ columns
longtable : boolean, default will be read from the pandas config module
default: False
Use a longtable environment instead of tabular. Requires adding
@@ -1596,7 +1589,8 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
if buf is None:
return formatter.buf.getvalue()
- def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):
+ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
+ null_counts=None):
"""
Concise summary of a DataFrame.
@@ -1619,9 +1613,10 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_co
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
+ - If None, then only show if the frame is smaller than
+ max_info_rows and max_info_columns.
+ - If True, always show counts.
+ - If False, never show counts.
"""
from pandas.core.format import _put_lines
@@ -1643,8 +1638,8 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_co
# hack
if max_cols is None:
- max_cols = get_option(
- 'display.max_info_columns', len(self.columns) + 1)
+ max_cols = get_option('display.max_info_columns',
+ len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
@@ -1665,8 +1660,8 @@ def _verbose_repr():
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
- raise AssertionError('Columns must equal counts (%d != %d)' %
- (len(cols), len(counts)))
+ raise AssertionError('Columns must equal counts (%d != %d)'
+ % (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
@@ -1678,8 +1673,7 @@ def _verbose_repr():
if show_counts:
count = counts.iloc[i]
- lines.append(_put_str(col, space) +
- tmpl % (count, dtype))
+ lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
@@ -1712,18 +1706,17 @@ def _sizeof_fmt(num, size_qualifier):
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
- deep=True
+ deep = True
else:
- # size_qualifier is just a best effort; not guaranteed to catch all
- # cases (e.g., it misses categorical data even with object
+ # size_qualifier is just a best effort; not guaranteed to catch
+ # all cases (e.g., it misses categorical data even with object
# categories)
- deep=False
+ deep = False
if 'object' in counts or is_object_dtype(self.index):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
- _sizeof_fmt(mem_usage, size_qualifier)
- )
+ _sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
@@ -1754,11 +1747,11 @@ def memory_usage(self, index=True, deep=False):
--------
numpy.ndarray.nbytes
"""
- result = Series([ c.memory_usage(index=False, deep=deep) for col, c in self.iteritems() ],
- index=self.columns)
+ result = Series([c.memory_usage(index=False, deep=deep)
+ for col, c in self.iteritems()], index=self.columns)
if index:
- result = Series(self.index.memory_usage(deep=deep),
- index=['Index']).append(result)
+ result = Series(self.index.memory_usage(deep=deep),
+ index=['Index']).append(result)
return result
def transpose(self):
@@ -1767,7 +1760,7 @@ def transpose(self):
T = property(transpose)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
@@ -1795,15 +1788,13 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
- columns=_unpickle_array(ocols),
- copy=False)
+ columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
- #----------------------------------------------------------------------
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
@@ -1888,7 +1879,6 @@ def _ixs(self, i, axis=0):
# irow
if axis == 0:
-
"""
Notes
-----
@@ -1902,14 +1892,15 @@ def _ixs(self, i, axis=0):
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
- copy=True
+ copy = True
else:
new_values = self._data.fast_xs(i)
if lib.isscalar(new_values):
return new_values
# if we are a copy, mark as such
- copy = isinstance(new_values,np.ndarray) and new_values.base is None
+ copy = (isinstance(new_values, np.ndarray) and
+ new_values.base is None)
result = Series(new_values, index=self.columns,
name=self.index[i], dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
@@ -1917,7 +1908,6 @@ def _ixs(self, i, axis=0):
# icol
else:
-
"""
Notes
-----
@@ -1943,9 +1933,10 @@ def _ixs(self, i, axis=0):
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
- result = self._constructor_sliced.from_array(
- values, index=self.index,
- name=label, fastpath=True)
+ result = self._constructor_sliced.from_array(values,
+ index=self.index,
+ name=label,
+ fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
@@ -2035,14 +2026,17 @@ def _getitem_multilevel(self, key):
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
- columns=result_columns).__finalize__(self)
+ columns=result_columns)
+ result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
- result = self._constructor_sliced(result, index=self.index, name=key)
+ result = self._constructor_sliced(result,
+ index=self.index,
+ name=key)
result._set_is_copy(self)
return result
@@ -2274,16 +2268,15 @@ def select_dtypes(self, include=None, exclude=None):
'nonempty')
# convert the myriad valid dtypes object to a single representation
- include, exclude = map(lambda x:
- frozenset(map(com._get_dtype_from_object, x)),
- selection)
+ include, exclude = map(
+ lambda x: frozenset(map(com._get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
com._invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
- raise ValueError('include and exclude overlap on %s'
- % (include & exclude))
+ raise ValueError('include and exclude overlap on %s' %
+ (include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
@@ -2381,16 +2374,15 @@ def _ensure_valid_index(self, value):
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
- try:
- value = Series(value)
- except:
- raise ValueError('Cannot set a frame with no defined index '
- 'and a value that cannot be converted to a '
- 'Series')
-
- self._data = self._data.reindex_axis(value.index.copy(), axis=1,
- fill_value=np.nan)
+ try:
+ value = Series(value)
+ except:
+ raise ValueError('Cannot set a frame with no defined index '
+ 'and a value that cannot be converted to a '
+ 'Series')
+ self._data = self._data.reindex_axis(value.index.copy(), axis=1,
+ fill_value=np.nan)
def _set_item(self, key, value):
"""
@@ -2429,8 +2421,8 @@ def insert(self, loc, column, value, allow_duplicates=False):
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value)
- self._data.insert(
- loc, column, value, allow_duplicates=allow_duplicates)
+ self._data.insert(loc, column, value,
+ allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
@@ -2558,7 +2550,7 @@ def reindexer(value):
elif isinstance(value, Categorical):
value = value.copy()
- elif (isinstance(value, Index) or is_sequence(value)):
+ elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
@@ -2589,8 +2581,8 @@ def reindexer(value):
# broadcast across multiple columns if necessary
if key in self.columns and value.ndim == 1:
- if not self.columns.is_unique or isinstance(self.columns,
- MultiIndex):
+ if (not self.columns.is_unique or
+ isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
@@ -2656,11 +2648,11 @@ def lookup(self, row_labels, col_labels):
return result
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Reindexing and alignment
- def _reindex_axes(self, axes, level, limit, tolerance, method,
- fill_value, copy):
+ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
+ copy):
frame = self
columns = axes['columns']
@@ -2706,7 +2698,7 @@ def _reindex_multi(self, axes, copy, fill_value):
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
- return self._reindex_with_indexers({0: [new_index, row_indexer],
+ return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@@ -2715,9 +2707,12 @@ def _reindex_multi(self, axes, copy, fill_value):
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
- return super(DataFrame, self).align(other, join=join, axis=axis, level=level, copy=copy,
- fill_value=fill_value, method=method, limit=limit,
- fill_axis=fill_axis, broadcast_axis=broadcast_axis)
+ return super(DataFrame, self).align(other, join=join, axis=axis,
+ level=level, copy=copy,
+ fill_value=fill_value,
+ method=method, limit=limit,
+ fill_axis=fill_axis,
+ broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
@@ -2727,10 +2722,10 @@ def reindex(self, index=None, columns=None, **kwargs):
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
- return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,
- method=method, level=level,
- copy=copy, limit=limit,
- fill_value=fill_value)
+ return super(DataFrame,
+ self).reindex_axis(labels=labels, axis=axis,
+ method=method, level=level, copy=copy,
+ limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
@@ -2740,10 +2735,10 @@ def rename(self, index=None, columns=None, **kwargs):
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
- return super(DataFrame, self).fillna(value=value, method=method,
- axis=axis, inplace=inplace,
- limit=limit, downcast=downcast,
- **kwargs)
+ return super(DataFrame,
+ self).fillna(value=value, method=method, axis=axis,
+ inplace=inplace, limit=limit,
+ downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
@@ -2880,8 +2875,7 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
- elif (isinstance(index, DatetimeIndex) and
- index.tz is not None):
+ elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
@@ -2893,11 +2887,11 @@ def _maybe_casted_values(index, labels=None):
mask = labels == -1
values = values.take(labels)
if mask.any():
- values, changed = com._maybe_upcast_putmask(values,
- mask, np.nan)
+ values, changed = com._maybe_upcast_putmask(values, mask,
+ np.nan)
return values
- new_index = np.arange(len(new_obj),dtype='int64')
+ new_index = np.arange(len(new_obj), dtype='int64')
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
@@ -2918,8 +2912,7 @@ def _maybe_casted_values(index, labels=None):
if multi_col:
if col_fill is None:
- col_name = tuple([col_name] *
- self.columns.nlevels)
+ col_name = tuple([col_name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
@@ -2950,8 +2943,7 @@ def _maybe_casted_values(index, labels=None):
if not inplace:
return new_obj
-
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
@@ -2982,8 +2974,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
- result = result.dropna(how=how, thresh=thresh,
- subset=subset, axis=ax)
+ result = result.dropna(how=how, thresh=thresh, subset=subset,
+ axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
@@ -2994,8 +2986,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
- raise KeyError(list(np.compress(check,subset)))
- agg_obj = self.take(indices,axis=agg_axis)
+ raise KeyError(list(np.compress(check, subset)))
+ agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
@@ -3018,7 +3010,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
else:
return result
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
+ False: 'first'})
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
@@ -3052,7 +3045,8 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False):
else:
return self[-duplicated]
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
+ False: 'first'})
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)
def duplicated(self, subset=None, keep='first'):
"""
@@ -3082,23 +3076,24 @@ def duplicated(self, subset=None, keep='first'):
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
- labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
- return labels.astype('i8',copy=False), len(shape)
+ labels, shape = factorize(vals, size_hint=min(len(self),
+ _SIZE_HINT_LIMIT))
+ return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
- elif not np.iterable(subset) or \
- isinstance(subset, compat.string_types) or \
- isinstance(subset, tuple) and subset in self.columns:
+ elif (not np.iterable(subset) or
+ isinstance(subset, compat.string_types) or
+ isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
- labels, shape = map(list, zip( * map(f, vals)))
+ labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
@@ -3106,16 +3101,14 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
axis = self._get_axis_number(axis)
- labels = self._get_axis(axis)
if axis != 0:
- raise ValueError('When sorting by column, axis must be 0 '
- '(rows)')
+ raise ValueError('When sorting by column, axis must be 0 (rows)')
if not isinstance(by, list):
by = [by]
if com.is_sequence(ascending) and len(by) != len(ascending):
- raise ValueError('Length of ascending (%d) != length of by'
- ' (%d)' % (len(ascending), len(by)))
+ raise ValueError('Length of ascending (%d) != length of by (%d)' %
+ (len(ascending), len(by)))
if len(by) > 1:
from pandas.core.groupby import _lexsort_indexer
@@ -3123,11 +3116,13 @@ def trans(v):
if com.needs_i8_conversion(v):
return v.view('i8')
return v
+
keys = []
for x in by:
k = self[x].values
if k.ndim == 2:
- raise ValueError('Cannot sort by duplicate column %s' % str(x))
+ raise ValueError('Cannot sort by duplicate column %s' %
+ str(x))
keys.append(trans(k))
indexer = _lexsort_indexer(keys, orders=ascending,
na_position=na_position)
@@ -3141,19 +3136,20 @@ def trans(v):
# try to be helpful
if isinstance(self.columns, MultiIndex):
- raise ValueError('Cannot sort by column %s in a multi-index'
- ' you need to explicity provide all the levels'
- % str(by))
+ raise ValueError('Cannot sort by column %s in a '
+ 'multi-index you need to explicity '
+ 'provide all the levels' % str(by))
- raise ValueError('Cannot sort by duplicate column %s'
- % str(by))
+ raise ValueError('Cannot sort by duplicate column %s' %
+ str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = _nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
- new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),
+ new_data = self._data.take(indexer,
+ axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
@@ -3161,8 +3157,8 @@ def trans(v):
else:
return self._constructor(new_data).__finalize__(self)
- def sort(self, columns=None, axis=0, ascending=True,
- inplace=False, kind='quicksort', na_position='last'):
+ def sort(self, columns=None, axis=0, ascending=True, inplace=False,
+ kind='quicksort', na_position='last'):
"""
DEPRECATED: use :meth:`DataFrame.sort_values`
@@ -3183,7 +3179,8 @@ def sort(self, columns=None, axis=0, ascending=True,
inplace : boolean, default False
Sort the DataFrame without creating a new instance
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
- This option is only applied when sorting on a single column or label.
+ This option is only applied when sorting on a single column or
+ label.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
@@ -3200,25 +3197,28 @@ def sort(self, columns=None, axis=0, ascending=True,
if columns is None:
warnings.warn("sort(....) is deprecated, use sort_index(.....)",
FutureWarning, stacklevel=2)
- return self.sort_index(axis=axis, ascending=ascending, inplace=inplace)
+ return self.sort_index(axis=axis, ascending=ascending,
+ inplace=inplace)
- warnings.warn("sort(columns=....) is deprecated, use sort_values(by=.....)",
- FutureWarning, stacklevel=2)
+ warnings.warn("sort(columns=....) is deprecated, use "
+ "sort_values(by=.....)", FutureWarning, stacklevel=2)
return self.sort_values(by=columns, axis=axis, ascending=ascending,
- inplace=inplace, kind=kind, na_position=na_position)
+ inplace=inplace, kind=kind,
+ na_position=na_position)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
- kind='quicksort', na_position='last', sort_remaining=True, by=None):
+ kind='quicksort', na_position='last', sort_remaining=True,
+ by=None):
# 10726
if by is not None:
- warnings.warn("by argument to sort_index is deprecated, pls use .sort_values(by=...)",
- FutureWarning, stacklevel=2)
+ warnings.warn("by argument to sort_index is deprecated, pls use "
+ ".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
- return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
-
+ return self.sort_values(by, axis=axis, ascending=ascending,
+ inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
@@ -3243,9 +3243,10 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
from pandas.core.groupby import _nargsort
# GH11080 - Check monotonic-ness before sort an index
- # if monotonic (already sorted), return None or copy() according to 'inplace'
- if (ascending and labels.is_monotonic_increasing) or \
- (not ascending and labels.is_monotonic_decreasing):
+ # if monotonic (already sorted), return None or copy() according
+ # to 'inplace'
+ if ((ascending and labels.is_monotonic_increasing) or
+ (not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
@@ -3254,7 +3255,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
- new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),
+ new_data = self._data.take(indexer,
+ axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
@@ -3262,8 +3264,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
else:
return self._constructor(new_data).__finalize__(self)
- def sortlevel(self, level=0, axis=0, ascending=True,
- inplace=False, sort_remaining=True):
+ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
+ sort_remaining=True):
"""
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
@@ -3291,7 +3293,6 @@ def sortlevel(self, level=0, axis=0, ascending=True,
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
-
def _nsorted(self, columns, n, method, keep):
if not com.is_list_like(columns):
columns = [columns]
@@ -3421,7 +3422,7 @@ def reorder_levels(self, order, axis=0):
result.columns = result.columns.reorder_levels(order)
return result
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
@@ -3473,18 +3474,21 @@ def f(i):
else:
result = _arith_op(this.values, other.values)
- return self._constructor(result, index=new_index,
- columns=new_columns, copy=False)
+ return self._constructor(result, index=new_index, columns=new_columns,
+ copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
- return self._combine_match_index(other, func, level=level, fill_value=fill_value)
+ return self._combine_match_index(other, func, level=level,
+ fill_value=fill_value)
else:
- return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
- return self._combine_series_infer(other, func, level=level, fill_value=fill_value)
+ return self._combine_match_columns(other, func, level=level,
+ fill_value=fill_value)
+ return self._combine_series_infer(other, func, level=level,
+ fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
@@ -3495,48 +3499,56 @@ def _combine_series_infer(self, other, func, level=None, fill_value=None):
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
- return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
+ return self._combine_match_columns(other, func, level=level,
+ fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
- left, right = self.align(other, join='outer', axis=0, level=level, copy=False)
+ left, right = self.align(other, join='outer', axis=0, level=level,
+ copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
- index=left.index,
- columns=self.columns, copy=False)
+ index=left.index, columns=self.columns,
+ copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
- left, right = self.align(other, join='outer', axis=1, level=level, copy=False)
+ left, right = self.align(other, join='outer', axis=1, level=level,
+ copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
- new_data = left._data.eval(
- func=func, other=right, axes=[left.columns, self.index])
+ new_data = left._data.eval(func=func, other=right,
+ axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
if self.empty:
return self
- new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)
+ new_data = self._data.eval(func=func, other=other,
+ raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
+
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
+
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
+
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
+
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
@@ -3640,8 +3652,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
# convert_objects just in case
- return self._constructor(result,
- index=new_index,
+ return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
@@ -3666,6 +3677,7 @@ def combine_first(self, other):
-------
combined : DataFrame
"""
+
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
@@ -3730,10 +3742,10 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
else:
mask = notnull(this)
- self[col] = expressions.where(
- mask, this, that, raise_on_error=True)
+ self[col] = expressions.where(mask, this, that,
+ raise_on_error=True)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
@@ -3748,7 +3760,7 @@ def last_valid_index(self):
"""
return self.index[self.count(1) > 0][-1]
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
@@ -3899,7 +3911,7 @@ def unstack(self, level=-1):
from pandas.core.reshape import unstack
return unstack(self, level)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
@@ -3923,7 +3935,7 @@ def diff(self, periods=1, axis=0):
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Function application
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
@@ -3988,7 +4000,9 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
"""
axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
- f = lambda x: func(x, *args, **kwds)
+
+ def f(x):
+ return func(x, *args, **kwds)
else:
f = func
@@ -4036,8 +4050,7 @@ def _apply_raw(self, func, axis):
# TODO: mixed type case
if result.ndim == 2:
- return DataFrame(result, index=self.index,
- columns=self.columns)
+ return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
@@ -4045,8 +4058,9 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
- if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:
- reduce=False
+ if (reduce and axis == 1 and self._is_mixed_type and
+ self._is_datelike_mixed_type):
+ reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
@@ -4071,16 +4085,18 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
dtype = object if self._is_mixed_type else None
if axis == 0:
- series_gen = (self._ixs(i,axis=1) for i in range(len(self.columns)))
+ series_gen = (self._ixs(i, axis=1)
+ for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
- series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
- for i, (arr, name) in
- enumerate(zip(values, res_index)))
+ series_gen = (Series.from_array(arr, index=res_columns, name=name,
+ dtype=dtype)
+ for i, (arr, name) in enumerate(zip(values,
+ res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
@@ -4110,7 +4126,7 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
- com.pprint_thing(k),)
+ com.pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
@@ -4197,9 +4213,10 @@ def infer(x):
f = com.i8_boxer(x)
x = lib.map_infer(_values_from_object(x), f)
return lib.map_infer(_values_from_object(x), func)
+
return self.apply(infer)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
@@ -4222,9 +4239,9 @@ def append(self, other, ignore_index=False, verify_integrity=False):
Notes
-----
- If a list of dict/series is passed and the keys are all contained in the
- DataFrame's index, the order of the columns in the resulting DataFrame
- will be unchanged.
+ If a list of dict/series is passed and the keys are all contained in
+ the DataFrame's index, the order of the columns in the resulting
+ DataFrame will be unchanged.
See also
--------
@@ -4265,11 +4282,11 @@ def append(self, other, ignore_index=False, verify_integrity=False):
' or if the Series has a name')
index = None if other.name is None else [other.name]
- combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()
+ combined_columns = self.columns.tolist() + self.columns.union(
+ other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
- index=index,
- columns=combined_columns)
+ index=index, columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
@@ -4370,8 +4387,8 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
joined = frames[0]
for frame in frames[1:]:
- joined = merge(joined, frame, how=how,
- left_index=True, right_index=True)
+ joined = merge(joined, frame, how=how, left_index=True,
+ right_index=True)
return joined
@@ -4381,10 +4398,10 @@ def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
from pandas.tools.merge import merge
- return merge(self, right, how=how, on=on,
- left_on=left_on, right_on=right_on,
- left_index=left_index, right_index=right_index, sort=sort,
- suffixes=suffixes, copy=copy, indicator=indicator)
+ return merge(self, right, how=how, on=on, left_on=left_on,
+ right_on=right_on, left_index=left_index,
+ right_index=right_index, sort=sort, suffixes=suffixes,
+ copy=copy, indicator=indicator)
def round(self, decimals=0, out=None):
"""
@@ -4462,8 +4479,8 @@ def _series_round(s, decimals):
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
- raise TypeError("decimals must be an integer, "
- "a dict-like, or a Series")
+ raise TypeError("decimals must be an integer, a dict-like or a "
+ "Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
@@ -4472,7 +4489,7 @@ def _series_round(s, decimals):
else:
return self
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
@@ -4499,8 +4516,7 @@ def corr(self, method='pearson', min_periods=1):
mat = numeric_df.values
if method == 'pearson':
- correl = _algos.nancorr(com._ensure_float64(mat),
- minp=min_periods)
+ correl = _algos.nancorr(com._ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = _algos.nancorr_spearman(com._ensure_float64(mat),
minp=min_periods)
@@ -4617,7 +4633,7 @@ def corrwith(self, other, axis=0, drop=False):
return correl
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
@@ -4694,8 +4710,7 @@ def _count_level(self, level, axis=0, numeric_only=False):
labels = com._ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
- result = DataFrame(counts, index=level_index,
- columns=agg_axis)
+ result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
@@ -4706,7 +4721,10 @@ def _count_level(self, level, axis=0, numeric_only=False):
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
- f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)
+
+ def f(x):
+ return op(x, axis=axis, skipna=skipna, **kwds)
+
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
@@ -4726,7 +4744,7 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
- result = self.apply(f,reduce=False)
+ result = self.apply(f, reduce=False)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
@@ -4739,8 +4757,8 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
- "type %s not implemented."
- % filter_type)
+ "type %s not implemented." %
+ filter_type)
raise_with_traceback(e)
result = f(data.values)
labels = data._get_agg_axis(axis)
@@ -4847,14 +4865,14 @@ def _get_agg_axis(self, axis_num):
def mode(self, axis=0, numeric_only=False):
"""
- Gets the mode(s) of each element along the axis selected. Empty if nothing
- has 2+ occurrences. Adds a row for each mode per label, fills in gaps
- with nan.
+ Gets the mode(s) of each element along the axis selected. Empty if
+ nothing has 2+ occurrences. Adds a row for each mode per label, fills
+ in gaps with nan.
Note that there could be multiple values returned for the selected
- axis (when more than one item share the maximum frequency), which is the
- reason why a dataframe is returned. If you want to impute missing values
- with the mode in a dataframe ``df``, you can just do this:
+ axis (when more than one item share the maximum frequency), which is
+ the reason why a dataframe is returned. If you want to impute missing
+ values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
@@ -4878,7 +4896,10 @@ def mode(self, axis=0, numeric_only=False):
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
- f = lambda s: s.mode()
+
+ def f(s):
+ return s.mode()
+
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
@@ -5154,26 +5175,25 @@ def isin(self, values):
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
- raise ValueError("ValueError: cannot compute isin with"
- " a duplicate axis.")
+ raise ValueError("cannot compute isin with "
+ "a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
- raise ValueError("ValueError: cannot compute isin with"
- " a duplicate axis.")
+ raise ValueError("cannot compute isin with "
+ "a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
- raise TypeError("only list-like or dict-like objects are"
- " allowed to be passed to DataFrame.isin(), "
+ raise TypeError("only list-like or dict-like objects are "
+ "allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(lib.ismember(self.values.ravel(),
- set(values)).reshape(self.shape),
- self.index,
+ set(values)).reshape(self.shape), self.index,
self.columns)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Deprecated stuff
def combineAdd(self, other):
@@ -5237,6 +5257,7 @@ def combineMult(self, other):
_EMPTY_SERIES = Series([])
+
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
@@ -5300,8 +5321,8 @@ def extract_index(data):
if have_series:
if lengths[0] != len(index):
- msg = ('array length %d does not match index length %d'
- % (lengths[0], len(index)))
+ msg = ('array length %d does not match index length %d' %
+ (lengths[0], len(index)))
raise ValueError(msg)
else:
index = Index(np.arange(lengths[0]))
@@ -5349,11 +5370,11 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
if isinstance(data, DataFrame):
if columns is not None:
- arrays = [data._ixs(i,axis=1).values for i, col in enumerate(data.columns)
- if col in columns]
+ arrays = [data._ixs(i, axis=1).values
+ for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
- arrays = [data._ixs(i,axis=1).values for i in range(len(columns))]
+ arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
@@ -5368,8 +5389,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
- coerce_float=coerce_float,
- dtype=dtype)
+ coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
@@ -5378,8 +5398,8 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(data))
return data, columns
- elif (isinstance(data, (np.ndarray, Series, Index))
- and data.dtype.names is not None):
+ elif (isinstance(data, (np.ndarray, Series, Index)) and
+ data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
@@ -5387,8 +5407,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
else:
# last ditch effort
data = lmap(tuple, data)
- return _list_to_arrays(data, columns,
- coerce_float=coerce_float,
+ return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
@@ -5433,10 +5452,8 @@ def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
- indexer = _ensure_index(
- arr_columns).get_indexer(columns)
- arr_columns = _ensure_index(
- [arr_columns[i] for i in indexer])
+ indexer = _ensure_index(arr_columns).get_indexer(columns)
+ arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
@@ -5515,7 +5532,7 @@ def convert(arr):
arr = com._possibly_cast_to_datetime(arr, dtype)
return arr
- arrays = [ convert(arr) for arr in content ]
+ arrays = [convert(arr) for arr in content]
return arrays, columns
@@ -5583,29 +5600,26 @@ def _from_nested_dict(data):
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
-
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
-import pandas.tools.plotting as gfx
-
-DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods)
+DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
+ gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
-def boxplot(self, column=None, by=None, ax=None, fontsize=None,
- rot=0, grid=True, figsize=None, layout=None, return_type=None,
- **kwds):
+def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
+ grid=True, figsize=None, layout=None, return_type=None, **kwds):
import pandas.tools.plotting as plots
import matplotlib.pyplot as plt
- ax = plots.boxplot(self, column=column, by=by, ax=ax,
- fontsize=fontsize, grid=grid, rot=rot,
- figsize=figsize, layout=layout, return_type=return_type,
- **kwds)
+ ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
+ grid=grid, rot=rot, figsize=figsize, layout=layout,
+ return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
+
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
diff --git a/setup.cfg b/setup.cfg
index 5c07a44ff4f7f..f69e256b80869 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,3 +13,9 @@ parentdir_prefix = pandas-
[flake8]
ignore = E731
+
+[yapf]
+based_on_style = pep8
+split_before_named_assigns = false
+split_penalty_after_opening_bracket = 1000000
+split_penalty_logical_operator = 30
| Fix layout and pep8 compliance for categorical, common, and frame. Used YAPF with default settings and then manual modifications including:
-fixing line length of comments/docstrings for pep8 compliance
-converting lambdas to functions for pep8 compliance
-change YAPF default formatting of error strings
-change some YAPF line break choices to make them more natural in my eyes. For example converting
```
if (function1(argument) and function2(
argument)):
```
to
```
if (function1(argument) and
function2(argument)):
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11951 | 2016-01-03T21:47:08Z | 2016-01-16T15:13:58Z | 2016-01-16T15:13:58Z | 2016-01-16T17:12:07Z |
WIP/PEP8: pandas/core | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index e5347f03b5462..d1c983769ed2a 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -12,6 +12,7 @@
import pandas.hashtable as htable
from pandas.compat import string_types
+
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
@@ -44,7 +45,8 @@ def match(to_match, values, na_sentinel=-1):
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas.core.series import Series
- result = Series(result.ravel()).replace(-1,na_sentinel).values.reshape(result.shape)
+ result = Series(result.ravel()).replace(-1, na_sentinel).values.\
+ reshape(result.shape)
return result
@@ -63,6 +65,7 @@ def unique(values):
uniques
"""
values = com._asarray_tuplesafe(values)
+
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
@@ -95,9 +98,9 @@ def isin(comps, values):
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
- f = lambda x, y: np.in1d(x,np.asarray(list(y)))
+ f = lambda x, y: np.in1d(x, np.asarray(list(y)))
else:
- f = lambda x, y: lib.ismember_int64(x,set(y))
+ f = lambda x, y: lib.ismember_int64(x, set(y))
# may need i8 conversion for proper membership testing
if com.is_datetime64_dtype(comps):
@@ -115,6 +118,7 @@ def isin(comps, values):
return f(comps, values)
+
def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
@@ -148,8 +152,6 @@ def _unique_generic(values, table_type, type_caster):
return type_caster(uniques)
-
-
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
@@ -169,12 +171,15 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
- the unique values. Index is returned when passed values is Index or Series
+ the unique values. Index is returned when passed values is Index or
+ Series
- note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex
+ note: an array of Periods will ignore sort as it returns an always sorted
+ PeriodIndex
"""
if order is not None:
- msg = "order is deprecated. See https://github.com/pydata/pandas/issues/6926"
+ msg = "order is deprecated. See " \
+ "https://github.com/pydata/pandas/issues/6926"
warn(msg, FutureWarning, stacklevel=2)
from pandas.core.index import Index
@@ -203,10 +208,12 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
# order ints before strings
ordered = np.concatenate([
- np.sort(np.array([ e for i, e in enumerate(uniques) if f(e) ],dtype=object)) for f in [ lambda x: not isinstance(x,string_types),
- lambda x: isinstance(x,string_types) ]
- ])
- sorter = com._ensure_platform_int(t.lookup(com._ensure_object(ordered)))
+ np.sort(np.array([e for i, e in enumerate(uniques) if f(e)],
+ dtype=object)) for f in
+ [lambda x: not isinstance(x, string_types),
+ lambda x: isinstance(x, string_types)]])
+ sorter = com._ensure_platform_int(t.lookup(
+ com._ensure_object(ordered)))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
@@ -276,7 +283,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
is_period = com.is_period_arraylike(values)
is_datetimetz = com.is_datetimetz(values)
- if com.is_datetime_or_timedelta_dtype(dtype) or is_period or is_datetimetz:
+ if com.is_datetime_or_timedelta_dtype(dtype) or is_period or \
+ is_datetimetz:
if is_period:
values = PeriodIndex(values)
@@ -300,7 +308,6 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
else:
keys = keys.astype(dtype)
-
elif com.is_integer_dtype(dtype):
values = com._ensure_int64(values)
keys, counts = htable.value_count_scalar64(values, dropna)
@@ -322,7 +329,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
if bins is not None:
# TODO: This next line should be more efficient
- result = result.reindex(np.arange(len(cat.categories)), fill_value=0)
+ result = result.reindex(np.arange(len(cat.categories)),
+ fill_value=0)
result.index = bins[:-1]
if sort:
@@ -525,12 +533,11 @@ def _finalize_nsmallest(arr, kth_val, n, keep, narr):
def nsmallest(arr, n, keep='first'):
- '''
+ """
Find the indices of the n smallest values of a numpy array.
Note: Fails silently with NaN.
-
- '''
+ """
if keep == 'last':
arr = arr[::-1]
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 548b922926f02..7164a593d8e96 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -7,8 +7,7 @@
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas.lib as lib
-from pandas.util.decorators import (Appender, Substitution,
- cache_readonly, deprecate_kwarg)
+from pandas.util.decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.common import AbstractMethodError
_shared_docs = dict()
@@ -17,7 +16,6 @@
class StringMixin(object):
-
"""implements string methods so long as object defines a `__unicode__`
method.
@@ -26,7 +24,7 @@ class StringMixin(object):
# side note - this could be made into a metaclass if more than one
# object needs
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
@@ -115,7 +113,7 @@ def _reset_cache(self, key=None):
def __sizeof__(self):
"""
Generates the total memory usage for a object that returns
- either a value or Series of values
+ either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
@@ -131,25 +129,27 @@ def __sizeof__(self):
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
- Prevents additional attributes via xxx.attribute = "something" after a call to
- `self.__freeze()`. Mainly used to prevent the user from using wrong attrirbutes
- on a accessor (`Series.cat/.str/.dt`).
+ Prevents additional attributes via xxx.attribute = "something" after a
+ call to `self.__freeze()`. Mainly used to prevent the user from using
+ wrong attrirbutes on a accessor (`Series.cat/.str/.dt`).
- If you really want to add a new attribute at a later time, you need to use
- `object.__setattr__(self, key, value)`.
- """
+ If you really want to add a new attribute at a later time, you need to use
+ `object.__setattr__(self, key, value)`.
+ """
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
-
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
- # dict lookup instead of getattr as getattr is false for getter which error
- if getattr(self, "__frozen", False) and not (key in type(self).__dict__ or key == "_cache"):
- raise AttributeError( "You cannot add any new attribute '{key}'".format(key=key))
+ # dict lookup instead of getattr as getattr is false for getter
+ # which error
+ if getattr(self, "__frozen", False) and not \
+ (key in type(self).__dict__ or key == "_cache"):
+ raise AttributeError("You cannot add any new attribute '{key}'".
+ format(key=key))
object.__setattr__(self, key, value)
@@ -157,7 +157,8 @@ class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
def _delegate_property_get(self, name, *args, **kwargs):
- raise TypeError("You cannot access the property {name}".format(name=name))
+ raise TypeError("You cannot access the "
+ "property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
@@ -166,7 +167,8 @@ def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
- def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False):
+ def _add_delegate_accessors(cls, delegate, accessors, typ,
+ overwrite=False):
"""
add accessors to cls from the delegate class
@@ -178,20 +180,21 @@ def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False):
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
-
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
+
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
- return property(fget=_getter, fset=_setter, doc=getattr(delegate,name).__doc__)
+ return property(fget=_getter, fset=_setter,
+ doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
@@ -199,7 +202,7 @@ def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
- f.__doc__ = getattr(delegate,name).__doc__
+ f.__doc__ = getattr(delegate, name).__doc__
return f
@@ -212,7 +215,7 @@ def f(self, *args, **kwargs):
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
- setattr(cls,name,f)
+ setattr(cls, name, f)
class AccessorProperty(object):
@@ -250,17 +253,17 @@ class SpecificationError(GroupByError):
class SelectionMixin(object):
"""
- mixin implementing the selection & aggregation interface on a group-like object
- sub-classes need to define: obj, exclusions
+ mixin implementing the selection & aggregation interface on a group-like
+ object sub-classes need to define: obj, exclusions
"""
_selection = None
- _internal_names = ['_cache','__setstate__']
+ _internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
- builtins.min: np.min,
- }
+ builtins.min: np.min
+ }
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
@@ -275,7 +278,7 @@ class SelectionMixin(object):
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
- }
+ }
@property
def name(self):
@@ -286,7 +289,8 @@ def name(self):
@property
def _selection_list(self):
- if not isinstance(self._selection, (list, tuple, com.ABCSeries, com.ABCIndex, np.ndarray)):
+ if not isinstance(self._selection, (list, tuple, com.ABCSeries,
+ com.ABCIndex, np.ndarray)):
return [self._selection]
return self._selection
@@ -300,7 +304,8 @@ def _selected_obj(self):
@cache_readonly
def _obj_with_exclusions(self):
- if self._selection is not None and isinstance(self.obj, com.ABCDataFrame):
+ if self._selection is not None and isinstance(self.obj,
+ com.ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
@@ -312,14 +317,15 @@ def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
- if isinstance(key, (list, tuple, com.ABCSeries, com.ABCIndex, np.ndarray)):
+ if isinstance(key, (list, tuple, com.ABCSeries, com.ABCIndex,
+ np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return self._gotitem(list(key), ndim=2)
- elif not getattr(self,'as_index',False):
+ elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return self._gotitem(key, ndim=2)
@@ -345,7 +351,8 @@ def _gotitem(self, key, ndim, subset=None):
"""
raise AbstractMethodError(self)
- _agg_doc = """Aggregate using input function or dict of {column -> function}
+ _agg_doc = """Aggregate using input function or dict of {column ->
+function}
Parameters
----------
@@ -395,7 +402,6 @@ def _aggregate(self, arg, *args, **kwargs):
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
-
Returns
-------
tuple of result, how
@@ -406,7 +412,7 @@ def _aggregate(self, arg, *args, **kwargs):
None if not required
"""
- _level = kwargs.pop('_level',None)
+ _level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs), None
@@ -431,7 +437,8 @@ def _aggregate(self, arg, *args, **kwargs):
subset = obj
for fname, agg_how in compat.iteritems(arg):
- colg = self._gotitem(self._selection, ndim=1, subset=subset)
+ colg = self._gotitem(self._selection, ndim=1,
+ subset=subset)
result[fname] = colg.aggregate(agg_how, _level=None)
keys.append(fname)
else:
@@ -442,7 +449,7 @@ def _aggregate(self, arg, *args, **kwargs):
if isinstance(list(result.values())[0], com.ABCDataFrame):
from pandas.tools.merge import concat
- result = concat([ result[k] for k in keys ], keys=keys, axis=1)
+ result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
from pandas import DataFrame
result = DataFrame(result)
@@ -475,7 +482,7 @@ def _aggregate_multiple_funcs(self, arg, _level):
keys = []
# degenerate case
- if obj.ndim==1:
+ if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
@@ -518,6 +525,7 @@ def _is_builtin_func(self, arg):
"""
return self._builtin_table.get(arg, arg)
+
class FrozenList(PandasObject, list):
"""
@@ -585,6 +593,7 @@ def __repr__(self):
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
+
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
@@ -623,7 +632,9 @@ def __unicode__(self):
class IndexOpsMixin(object):
- """ common ops mixin to support a unified inteface / docs for Series / Index """
+ """ common ops mixin to support a unified inteface / docs for Series /
+ Index
+ """
# ndarray compatibility
__array_priority__ = 1000
@@ -632,7 +643,8 @@ def transpose(self):
""" return the transpose, which is by definition self """
return self
- T = property(transpose, doc="return the transpose, which is by definition self")
+ T = property(transpose, doc="return the transpose, which is by "
+ "definition self")
@property
def shape(self):
@@ -641,11 +653,15 @@ def shape(self):
@property
def ndim(self):
- """ return the number of dimensions of the underlying data, by definition 1 """
+ """ return the number of dimensions of the underlying data,
+ by definition 1
+ """
return 1
def item(self):
- """ return the first element of the underlying data as a python scalar """
+ """ return the first element of the underlying data as a python
+ scalar
+ """
try:
return self.values.item()
except IndexError:
@@ -685,7 +701,9 @@ def flags(self):
@property
def base(self):
- """ return the base object if the memory of the underlying data is shared """
+ """ return the base object if the memory of the underlying data is
+ shared
+ """
return self.values.base
@property
@@ -729,9 +747,10 @@ def hasnans(self):
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
- func = getattr(self,name,None)
+ func = getattr(self, name, None)
if func is None:
- raise TypeError("{klass} cannot perform the operation {op}".format(klass=self.__class__.__name__,op=name))
+ raise TypeError("{klass} cannot perform the operation {op}".format(
+ klass=self.__class__.__name__, op=name))
return func(**kwds)
def value_counts(self, normalize=False, sort=True, ascending=False,
@@ -787,7 +806,7 @@ def unique(self):
"""
from pandas.core.nanops import unique1d
values = self.values
- if hasattr(values,'unique'):
+ if hasattr(values, 'unique'):
return values.unique()
return unique1d(values)
@@ -836,7 +855,7 @@ def memory_usage(self, deep=False):
--------
numpy.ndarray.nbytes
"""
- if hasattr(self.values,'memory_usage'):
+ if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
@@ -866,9 +885,9 @@ def factorize(self, sort=False, na_sentinel=-1):
def searchsorted(self, key, side='left'):
""" np.ndarray searchsorted compat """
- ### FIXME in GH7447
- #### needs coercion on the key (DatetimeIndex does alreay)
- #### needs tests/doc-string
+ # FIXME in GH7447
+ # needs coercion on the key (DatetimeIndex does alreay)
+ # needs tests/doc-string
return self.values.searchsorted(key, side=side)
_shared_docs['drop_duplicates'] = (
@@ -889,7 +908,8 @@ def searchsorted(self, key, side='left'):
deduplicated : %(klass)s
""")
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
+ False: 'first'})
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
duplicated = self.duplicated(keep=keep)
@@ -905,8 +925,10 @@ def drop_duplicates(self, keep='first', inplace=False):
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- - ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- - ``last`` : Mark duplicates as ``True`` except for the last occurrence.
+ - ``first`` : Mark duplicates as ``True`` except for the first
+ occurrence.
+ - ``last`` : Mark duplicates as ``True`` except for the last
+ occurrence.
- False : Mark all duplicates as ``True``.
take_last : deprecated
@@ -915,7 +937,8 @@ def drop_duplicates(self, keep='first', inplace=False):
duplicated : %(duplicated)s
""")
- @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
+ False: 'first'})
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
keys = com._values_from_object(com._ensure_object(self.values))
@@ -926,7 +949,7 @@ def duplicated(self, keep='first'):
except AttributeError:
return np.array(duplicated, dtype=bool)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
diff --git a/pandas/core/dtypes.py b/pandas/core/dtypes.py
index 69957299aa9bb..1e358694de63e 100644
--- a/pandas/core/dtypes.py
+++ b/pandas/core/dtypes.py
@@ -4,6 +4,7 @@
import numpy as np
from pandas import compat
+
class ExtensionDtype(object):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
@@ -60,17 +61,21 @@ def __repr__(self):
return str(self)
def __hash__(self):
- raise NotImplementedError("sub-classes should implement an __hash__ method")
+ raise NotImplementedError("sub-classes should implement an __hash__ "
+ "method")
def __eq__(self, other):
- raise NotImplementedError("sub-classes should implement an __eq__ method")
+ raise NotImplementedError("sub-classes should implement an __eq__ "
+ "method")
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def is_dtype(cls, dtype):
- """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
+ """ Return a boolean if we if the passed type is an actual dtype that
+ we can match (via string or type)
+ """
if hasattr(dtype, 'dtype'):
dtype = dtype.dtype
if isinstance(dtype, cls):
@@ -82,16 +87,19 @@ def is_dtype(cls, dtype):
except:
return False
+
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
+
class CategoricalDtype(ExtensionDtype):
"""
- A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
+ A np.dtype duck-typed class, suitable for holding a custom categorical
+ dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
@@ -113,7 +121,8 @@ def __eq__(self, other):
@classmethod
def construct_from_string(cls, string):
- """ attempt to construct this type from a string, raise a TypeError if its not possible """
+ """ attempt to construct this type from a string, raise a TypeError if
+ it's not possible """
try:
if string == 'category':
return cls()
@@ -122,25 +131,29 @@ def construct_from_string(cls, string):
raise TypeError("cannot construct a CategoricalDtype")
+
class DatetimeTZDtypeType(type):
"""
the type of DatetimeTZDtype, this metaclass determines subclass ability
"""
pass
+
class DatetimeTZDtype(ExtensionDtype):
"""
- A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype.
+ A np.dtype duck-typed class, suitable for holding a custom datetime with tz
+ dtype.
- THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns]
+ THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of
+ np.datetime64[ns]
"""
type = DatetimeTZDtypeType
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
- _metadata = ['unit','tz']
+ _metadata = ['unit', 'tz']
_match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
def __init__(self, unit, tz=None):
@@ -167,7 +180,8 @@ def __init__(self, unit, tz=None):
except:
raise ValueError("could not construct DatetimeTZDtype")
- raise ValueError("DatetimeTZDtype constructor must have a tz supplied")
+ raise ValueError("DatetimeTZDtype constructor must have a tz "
+ "supplied")
if unit != 'ns':
raise ValueError("DatetimeTZDtype only supports ns units")
@@ -176,7 +190,9 @@ def __init__(self, unit, tz=None):
@classmethod
def construct_from_string(cls, string):
- """ attempt to construct this type from a string, raise a TypeError if its not possible """
+ """ attempt to construct this type from a string, raise a TypeError if
+ it's not possible
+ """
try:
return cls(unit=string)
except ValueError:
@@ -198,4 +214,5 @@ def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
- return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and self.tz == other.tz
+ return isinstance(other, DatetimeTZDtype) and self.unit == other.unit \
+ and self.tz == other.tz
| xref #11928
Putting this out there as a start. Will add to this over the next couple of days.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11945 | 2016-01-03T13:21:24Z | 2016-01-04T02:53:54Z | 2016-01-04T02:53:54Z | 2016-01-04T02:53:58Z |
PEP8: add in flake8 checking | diff --git a/.travis.yml b/.travis.yml
index abca2fe9c2c7e..6252b5654890f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -173,4 +173,4 @@ after_script:
- if [ -f /tmp/doc.log ]; then cat /tmp/doc.log; fi
- source activate pandas && ci/print_versions.py
- ci/print_skipped.py /tmp/nosetests.xml
- - ci/after_script.sh
+ - ci/lint.sh
diff --git a/ci/after_script.sh b/ci/after_script.sh
deleted file mode 100755
index b17d69daa5b8d..0000000000000
--- a/ci/after_script.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-#wget https://raw.github.com/y-p/ScatterCI-CLI/master/scatter_cli.py
-#chmod u+x scatter_cli.py
-
-pip install -I requests==2.1.0
-echo "${TRAVIS_PYTHON_VERSION:0:4}"
-if [ x"${TRAVIS_PYTHON_VERSION:0:4}" == x"2.6" ]; then
- pip install simplejson;
-fi
-
-# ScatterCI accepts a build log, but currently does nothing with it.
-echo '' > /tmp/build.log
-
-# nore exposed in the build logs
-#export SCATTERCI_ACCESS_KEY=
-#export SCATTERCI_HOST=
-
-# Generate a json file describing system and dep versions
-ci/print_versions.py -j /tmp/env.json
-
-# nose ran using "--with-xunit --xunit-file nosetest.xml" and generated /tmp/nosetest.xml
-# Will timeout if server not available, and should not fail the build
-#python scatter_cli.py --xunit-file /tmp/nosetests.xml --log-file /tmp/build.log --env-file /tmp/env.json --build-name "$JOB_NAME" --succeed
-
-true # never fail because bad things happened here
diff --git a/ci/install_conda.sh b/ci/install_conda.sh
index 7c73606e57bf3..465a4e3f63142 100755
--- a/ci/install_conda.sh
+++ b/ci/install_conda.sh
@@ -80,7 +80,7 @@ conda info -a || exit 1
# build deps
REQ="ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.build"
-time conda create -n pandas python=$TRAVIS_PYTHON_VERSION nose || exit 1
+time conda create -n pandas python=$TRAVIS_PYTHON_VERSION nose flake8 || exit 1
time conda install -n pandas --file=${REQ} || exit 1
source activate pandas
diff --git a/ci/lint.sh b/ci/lint.sh
new file mode 100755
index 0000000000000..1795451f7ace4
--- /dev/null
+++ b/ci/lint.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+echo "inside $0"
+
+source activate pandas
+
+echo flake8 pandas/core --statistics
+flake8 pandas/core --statistics
+
+RET="$?"
+
+# we are disabling the return code for now
+# to have Travis-CI pass. When the code
+# passes linting, re-enable
+#exit "$RET"
+
+exit 0
diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt
index 6a05f2db8901f..bc97957bff2b7 100644
--- a/ci/requirements_all.txt
+++ b/ci/requirements_all.txt
@@ -1,4 +1,5 @@
nose
+flake8
sphinx
ipython
python-dateutil
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index eac993f1cdf73..7396fba6548d9 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -3,3 +3,4 @@ pytz
numpy
cython
nose
+flake8
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 8cab77a0688a7..8d8ff74b6c04e 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -176,7 +176,7 @@ If you are on Windows, then you will also need to install the compiler linkages:
This will create the new environment, and not touch any of your existing environments,
nor any existing python installation. It will install all of the basic dependencies of
-*pandas*, as well as the development and testing tools. If you would like to install
+*pandas*, as well as the development and testing tools. If you would like to install
other dependencies, you can install them as follows::
conda install -n pandas_dev -c pandas pytables scipy
@@ -294,7 +294,7 @@ Some other important things to know about the docs:
In [2]: x**3
Out[2]: 8
- Almost all code examples in the docs are run (and the output saved) during the
+ Almost all code examples in the docs are run (and the output saved) during the
doc build. This approach means that code examples will always be up to date,
but it does make the doc building a bit more complex.
@@ -337,7 +337,7 @@ Furthermore, it is recommended to have all `optional dependencies
<http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies>`_
installed. This is not strictly necessary, but be aware that you will see some error
messages when building the docs. This happens because all the code in the documentation
-is executed during the doc build, and so code examples using optional dependencies
+is executed during the doc build, and so code examples using optional dependencies
will generate errors. Run ``pd.show_versions()`` to get an overview of the installed
version of all dependencies.
@@ -357,7 +357,7 @@ So how do you build the docs? Navigate to your local
Then you can find the HTML output in the folder ``pandas/doc/build/html/``.
The first time you build the docs, it will take quite a while because it has to run
-all the code examples and build all the generated docstring pages. In subsequent
+all the code examples and build all the generated docstring pages. In subsequent
evocations, sphinx will try to only build the pages that have been modified.
If you want to do a full clean build, do::
@@ -368,7 +368,7 @@ If you want to do a full clean build, do::
Starting with *pandas* 0.13.1 you can tell ``make.py`` to compile only a single section
of the docs, greatly reducing the turn-around time for checking your changes.
You will be prompted to delete ``.rst`` files that aren't required. This is okay because
-the prior versions of these files can be checked out from git. However, you must make sure
+the prior versions of these files can be checked out from git. However, you must make sure
not to commit the file deletions to your Git repository!
::
@@ -396,7 +396,7 @@ And you'll have the satisfaction of seeing your new and improved documentation!
Building master branch documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-When pull requests are merged into the *pandas* ``master`` branch, the main parts of
+When pull requests are merged into the *pandas* ``master`` branch, the main parts of
the documentation are also built by Travis-CI. These docs are then hosted `here
<http://pandas-docs.github.io/pandas-docs-travis>`__.
@@ -410,22 +410,33 @@ Code standards
--------------
*pandas* uses the `PEP8 <http://www.python.org/dev/peps/pep-0008/>`_ standard.
-There are several tools to ensure you abide by this standard.
+There are several tools to ensure you abide by this standard. Here are *some* of
+the more common ``PEP8`` issues:
-We've written a tool to check that your commits are PEP8 great, `pip install pep8radius
+ - we restrict line-length to 80 characters to promote readability
+ - passing arguments should have spaces after commas, e.g. ``foo(arg1, arg2, kw1='bar')``
+
+The Travis-CI will run `flake8 <http://pypi.python.org/pypi/flake8>`_ tool and report
+any stylistic errors in your code. Generating any warnings will cause the build to fail;
+thus these are part of the requirements for submitting code to *pandas*.
+
+It is helpful before submitting code to run this yourself on the diff::
+
+ git diff master | flake8 --diff
+
+Furthermore, we've written a tool to check that your commits are PEP8 great, `pip install pep8radius
<https://github.com/hayd/pep8radius>`_. Look at PEP8 fixes in your branch vs master with::
- pep8radius master --diff
+ pep8radius master --diff
and make these changes with::
pep8radius master --diff --in-place
-Alternatively, use the `flake8 <http://pypi.python.org/pypi/flake8>`_ tool for checking
-the style of your code. Additional standards are outlined on the `code style wiki
+Additional standards are outlined on the `code style wiki
page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
-Please try to maintain backward compatibility. *pandas* has lots of users with lots of
+Please try to maintain backward compatibility. *pandas* has lots of users with lots of
existing code, so don't break it if at all possible. If you think breakage is required,
clearly state why as part of the pull request. Also, be careful when changing method
signatures and add deprecation warnings where needed.
@@ -433,7 +444,7 @@ signatures and add deprecation warnings where needed.
Test-driven development/code writing
------------------------------------
-*pandas* is serious about testing and strongly encourages contributors to embrace
+*pandas* is serious about testing and strongly encourages contributors to embrace
`test-driven development (TDD) <http://en.wikipedia.org/wiki/Test-driven_development>`_.
This development process "relies on the repetition of a very short development cycle:
first the developer writes an (initially failing) automated test case that defines a desired
@@ -556,7 +567,7 @@ It can also be useful to run tests in your current environment. You can simply d
This command is equivalent to::
- asv run --quick --show-stderr --python=same
+ asv run --quick --show-stderr --python=same
This will launch every test only once, display stderr from the benchmarks, and use your local ``python`` that comes from your ``$PATH``.
@@ -680,7 +691,7 @@ To squash to the master branch do::
Use the ``s`` option on a commit to ``squash``, meaning to keep the commit messages,
or ``f`` to ``fixup``, meaning to merge the commit messages.
-Then you will need to push the branch (see below) forcefully to replace the current
+Then you will need to push the branch (see below) forcefully to replace the current
commits with the new ones::
git push origin shiny-new-feature -f
@@ -714,8 +725,8 @@ Review your code
----------------
When you're ready to ask for a code review, file a pull request. Before you do, once
-again make sure that you have followed all the guidelines outlined in this document
-regarding code style, tests, performance tests, and documentation. You should also
+again make sure that you have followed all the guidelines outlined in this document
+regarding code style, tests, performance tests, and documentation. You should also
double check your branch changes against the branch it was based on:
#. Navigate to your repository on GitHub -- https://github.com/your-user-name/pandas
@@ -735,7 +746,7 @@ release. To submit a pull request:
#. Navigate to your repository on GitHub
#. Click on the ``Pull Request`` button
-#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks
+#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks
okay one last time
#. Write a description of your changes in the ``Preview Discussion`` tab
#. Click ``Send Pull Request``.
@@ -747,14 +758,14 @@ updated. Pushing them to GitHub again is done by::
git push -f origin shiny-new-feature
-This will automatically update your pull request with the latest code and restart the
+This will automatically update your pull request with the latest code and restart the
Travis-CI tests.
Delete your merged branch (optional)
------------------------------------
Once your feature branch is accepted into upstream, you'll probably want to get rid of
-the branch. First, merge upstream master into your branch so git knows it is safe to
+the branch. First, merge upstream master into your branch so git knows it is safe to
delete your branch::
git fetch upstream
diff --git a/setup.cfg b/setup.cfg
index 8798e2ce6a5a5..d10f834a57eb1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,3 +10,6 @@ versionfile_source = pandas/_version.py
versionfile_build = pandas/_version.py
tag_prefix = v
parentdir_prefix = pandas-
+
+[flake8]
+ignore = E226,F401
| don't actually fail this till #11928 is clean.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11941 | 2016-01-02T03:42:34Z | 2016-01-03T15:43:55Z | 2016-01-03T15:43:55Z | 2016-01-03T15:43:55Z |
DOC: several minor doc improvements | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 72f1e5749a886..6fa58bf620005 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -186,7 +186,7 @@ Attribute Access
.. _indexing.attribute_access:
-You may access an index on a ``Series``, column on a ``DataFrame``, and a item on a ``Panel`` directly
+You may access an index on a ``Series``, column on a ``DataFrame``, and an item on a ``Panel`` directly
as an attribute:
.. ipython:: python
@@ -1513,7 +1513,7 @@ Compare these two access methods:
These both yield the same results, so which should you use? It is instructive to understand the order
of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``)
-``dfmi['one']`` selects the first level of the columns and returns a data frame that is singly-indexed.
+``dfmi['one']`` selects the first level of the columns and returns a DataFrame that is singly-indexed.
Then another python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens.
This is indicated by the variable ``dfmi_with_one`` because pandas sees these operations as separate events.
e.g. separate calls to ``__getitem__``, so it has to treat them as linear operations, they happen one after another.
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index a8907ac192707..37c8e8b1d8829 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -967,13 +967,14 @@ def str_get(arr, i):
def str_decode(arr, encoding, errors="strict"):
"""
- Decode character string in the Series/Index to unicode
- using indicated encoding. Equivalent to :meth:`str.decode`.
+ Decode character string in the Series/Index using indicated encoding.
+ Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
+ python3.
Parameters
----------
- encoding : string
- errors : string
+ encoding : str
+ errors : str, optional
Returns
-------
@@ -985,13 +986,13 @@ def str_decode(arr, encoding, errors="strict"):
def str_encode(arr, encoding, errors="strict"):
"""
- Encode character string in the Series/Index to some other encoding
- using indicated encoding. Equivalent to :meth:`str.encode`.
+ Encode character string in the Series/Index using indicated encoding.
+ Equivalent to :meth:`str.encode`.
Parameters
----------
- encoding : string
- errors : string
+ encoding : str
+ errors : str, optional
Returns
-------
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 9399f537191e7..9211ffb5cfde5 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -792,7 +792,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
- meaningful indexing information. Note the the index values on the other
+ meaningful indexing information. Note the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
| https://api.github.com/repos/pandas-dev/pandas/pulls/11940 | 2016-01-02T02:11:56Z | 2016-01-02T02:20:55Z | 2016-01-02T02:20:55Z | 2016-01-02T02:20:57Z | |
head(0) and tail(0) return empty DataFrames | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 58d003b5c9dc7..2c97cae80ae2a 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -166,6 +166,8 @@ Backwards incompatible API changes
- The parameter ``out`` has been removed from the ``Series.round()`` method. (:issue:`11763`)
- ``DataFrame.round()`` leaves non-numeric columns unchanged in its return, rather than raises. (:issue:`11885`)
+- ``DataFrame.head(0)`` and ``DataFrame.tail(0)`` return empty frames, rather than ``self``. (:issue:`11937`)
+- ``Series.head(0)`` and ``Series.tail(0)`` return empty series, rather than ``self``. (:issue:`11937`)
NaT and Timedelta operations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 85f23b988778f..958571fdc2218 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2136,18 +2136,14 @@ def head(self, n=5):
"""
Returns first n rows
"""
- l = len(self)
- if l == 0 or n==0:
- return self
return self.iloc[:n]
def tail(self, n=5):
"""
Returns last n rows
"""
- l = len(self)
- if l == 0 or n == 0:
- return self
+ if n == 0:
+ return self.iloc[0:0]
return self.iloc[-n:]
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 57e75e3393b1b..d17df54d25ddd 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5457,8 +5457,10 @@ def test_repr_column_name_unicode_truncation_bug(self):
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
- assert_frame_equal(self.frame.head(0), self.frame)
- assert_frame_equal(self.frame.tail(0), self.frame)
+
+ assert_frame_equal(self.frame.head(0), self.frame[0:0])
+ assert_frame_equal(self.frame.tail(0), self.frame[0:0])
+
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
@@ -5468,8 +5470,8 @@ def test_head_tail(self):
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
- assert_frame_equal(df.head(0), df)
- assert_frame_equal(df.tail(0), df)
+ assert_frame_equal(df.head(0), df[0:0])
+ assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
#test empty dataframe
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 478c65892173d..37cb38454f74e 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -342,8 +342,8 @@ def test_head_tail(self):
self._compare(o.tail(), o.iloc[-5:])
# 0-len
- self._compare(o.head(0), o.iloc[:])
- self._compare(o.tail(0), o.iloc[0:])
+ self._compare(o.head(0), o.iloc[0:0])
+ self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o)+1), o)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index ea9ee8fc5b235..ad79406c70704 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -7642,8 +7642,9 @@ def test_sortlevel(self):
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
+ assert_series_equal(self.series.head(0), self.series[0:0])
assert_series_equal(self.series.tail(), self.series[-5:])
-
+ assert_series_equal(self.series.tail(0), self.series[0:0])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
| closes #11930
frame.head(0) and frame.tail(0) now return empty DataFrames.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11937 | 2015-12-31T20:28:05Z | 2016-01-02T06:39:59Z | 2016-01-02T06:39:59Z | 2016-01-02T06:40:03Z |
API: GH11885 DataFrame.round() now returns non-numeric columns unchanged | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 89ffd0b015846..4dd8a1d19c383 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -165,6 +165,7 @@ Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- The parameter ``out`` has been removed from the ``Series.round()`` method. (:issue:`11763`)
+- ``DataFrame.round()`` leaves non-numeric columns unchanged in its return, rather than raises. (:issue:`11885`)
Bug in QuarterBegin with n=0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 781d4b8bddf12..fea9318349d0b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4416,10 +4416,15 @@ def round(self, decimals=0, out=None):
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
- yield vals.round(decimals[col])
+ yield _series_round(vals, decimals[col])
except KeyError:
yield vals
+ def _series_round(s, decimals):
+ if com.is_integer_dtype(s) or com.is_float_dtype(s):
+ return s.round(decimals)
+ return s
+
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
@@ -4427,7 +4432,7 @@ def _dict_round(df, decimals):
new_cols = [col for col in _dict_round(self, decimals)]
elif com.is_integer(decimals):
# Dispatch to Series.round
- new_cols = [v.round(decimals) for _, v in self.iteritems()]
+ new_cols = [_series_round(v, decimals) for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index aa18c7826e544..57e75e3393b1b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -13523,6 +13523,21 @@ def test_round(self):
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
+
+ def test_round_mixed_type(self):
+ # GH11885
+ df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4], 'col2': ['1', 'a', 'c', 'f'],
+ 'col3': date_range('20111111', periods=4)})
+ round_0 = DataFrame({'col1': [1., 2., 3., 4.], 'col2': ['1', 'a', 'c' ,'f'],
+ 'col3': date_range('20111111', periods=4)})
+ tm.assert_frame_equal(df.round(), round_0)
+ tm.assert_frame_equal(df.round(1), df)
+ tm.assert_frame_equal(df.round({'col1':1}), df)
+ tm.assert_frame_equal(df.round({'col1':0}), round_0)
+ tm.assert_frame_equal(df.round({'col1':0, 'col2':1}), round_0)
+ tm.assert_frame_equal(df.round({'col3':1}), df)
+
+
def test_round_issue(self):
# GH11611
| closes #11885
| https://api.github.com/repos/pandas-dev/pandas/pulls/11923 | 2015-12-29T18:51:08Z | 2015-12-29T21:12:56Z | 2015-12-29T21:12:56Z | 2015-12-29T21:12:59Z |
ENH: raise exception when sqlalchemy is required for database string URI | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 49f277f6ba7bc..47642c2e2bc28 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -528,6 +528,8 @@ def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
+ elif isinstance(con, string_types):
+ raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 21c3ea416e091..ffe7b9d6b460a 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -1051,6 +1051,14 @@ def test_sql_open_close(self):
tm.assert_frame_equal(self.test_frame3, result)
+ def test_con_string_import_error(self):
+ if not SQLALCHEMY_INSTALLED:
+ conn = 'mysql://root@localhost/pandas_nosetest'
+ self.assertRaises(ImportError, sql.read_sql, "SELECT * FROM iris",
+ conn)
+ else:
+ raise nose.SkipTest('SQLAlchemy is installed')
+
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
| - I get a cryptic error `AttributeError: 'str' object has no attribute 'cursor'` if sqlalchemy is not installed and I pass a database string URI to `pandas.read_sql`. I think the error should be made more explicit.
- Also I think there is a shadowing issue with`_SQLALCHEMY_INSTALLED` in `_engine_builder`.
```
/opt/conda/lib/python3.4/site-packages/pandas/io/sql.py in read_sql_query(sql, con, index_col, coerce_float, params, parse_dates, chunksize)
427 return pandas_sql.read_query(
428 sql, index_col=index_col, params=params, coerce_float=coerce_float,
--> 429 parse_dates=parse_dates, chunksize=chunksize)
430
431
/opt/conda/lib/python3.4/site-packages/pandas/io/sql.py in read_query(self, sql, index_col, coerce_float, params, parse_dates, chunksize)
1569
1570 args = _convert_params(sql, params)
-> 1571 cursor = self.execute(*args)
1572 columns = [col_desc[0] for col_desc in cursor.description]
1573
/opt/conda/lib/python3.4/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)
1532 cur = self.con
1533 else:
-> 1534 cur = self.con.cursor()
1535 try:
1536 if kwargs:
AttributeError: 'str' object has no attribute 'cursor'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11920 | 2015-12-29T04:09:22Z | 2016-08-10T10:15:35Z | 2016-08-10T10:15:35Z | 2016-08-10T10:15:35Z |
DOC: fix get_dummies function examples | diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index fecfe5cd82c6d..719f35dd90ce2 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -986,7 +986,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
- >>> get_dummies(s)
+ >>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
@@ -995,22 +995,22 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
>>> s1 = ['a', 'b', np.nan]
- >>> get_dummies(s1)
+ >>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
- >>> get_dummies(s1, dummy_na=True)
+ >>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
- >>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
+ >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
- >>> get_dummies(df, prefix=['col1', 'col2']):
+ >>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
| Small change, to include the pd alias and to remove an extra colon on the last example.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11917 | 2015-12-28T17:41:23Z | 2015-12-28T17:51:54Z | 2015-12-28T17:51:54Z | 2015-12-28T17:52:03Z |
DOC: adapt remaining occurences of pf.rolling_ | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 12be3037def75..bcfc30c6e404b 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1595,8 +1595,8 @@ when plotting a large number of points.
price = pd.Series(np.random.randn(150).cumsum(),
index=pd.date_range('2000-1-1', periods=150, freq='B'))
- ma = pd.rolling_mean(price, 20)
- mstd = pd.rolling_std(price, 20)
+ ma = price.rolling(20).mean()
+ mstd = price.rolling(20).std()
plt.figure()
| https://api.github.com/repos/pandas-dev/pandas/pulls/11916 | 2015-12-28T10:15:46Z | 2015-12-28T11:18:32Z | 2015-12-28T11:18:32Z | 2015-12-28T11:18:32Z | |
ENH: add pd.test to enable nose test runnning from the imported session, #4327 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 3c7ca6d5c2326..88efe27b16c2b 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -194,6 +194,14 @@ Top-level evaluation
eval
+Testing
+~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ test
+
.. _api.series:
Series
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 8d8ff74b6c04e..05d5eb67d7a32 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -509,6 +509,15 @@ entire suite. This is done using one of the following constructs::
nosetests pandas/tests/[test-module].py:[TestClass]
nosetests pandas/tests/[test-module].py:[TestClass].[test_method]
+ .. versionadded:: 0.18.0
+
+Furthermore one can run
+
+.. code-block:: python
+
+ pd.test()
+
+with an imported pandas to run tests similarly.
Running the performance test suite
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 2c97cae80ae2a..5149c91d04701 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -14,6 +14,7 @@ users upgrade to this version.
Highlights include:
- Window functions are now methods on ``.groupby`` like objects, see :ref:`here <whatsnew_0180.enhancements.moments>`.
+- ``pd.test()`` top-level nose test runner is available (:issue:`4327`)
Check the :ref:`API Changes <whatsnew_0180.api>` and :ref:`deprecations <whatsnew_0180.deprecations>` before updating.
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 68a90394cacf1..c2ead16b6f821 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -55,7 +55,12 @@
from pandas.tools.util import to_numeric
from pandas.core.reshape import melt
from pandas.util.print_versions import show_versions
+
+# define the testing framework
import pandas.util.testing
+from pandas.util.nosetester import NoseTester
+test = NoseTester().test
+del NoseTester
# use the closest tagged version if possible
from ._version import get_versions
diff --git a/pandas/util/nosetester.py b/pandas/util/nosetester.py
new file mode 100644
index 0000000000000..eee5dfee809be
--- /dev/null
+++ b/pandas/util/nosetester.py
@@ -0,0 +1,207 @@
+"""
+Nose test running.
+
+This module implements ``test()`` function for pandas modules.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import warnings
+from pandas.compat import string_types
+from numpy.testing import nosetester
+
+
+def get_package_name(filepath):
+ """
+ Given a path where a package is installed, determine its name.
+
+ Parameters
+ ----------
+ filepath : str
+ Path to a file. If the determination fails, "pandas" is returned.
+
+ Examples
+ --------
+ >>> pandas.util.nosetester.get_package_name('nonsense')
+ 'pandas'
+
+ """
+
+ pkg_name = []
+ while 'site-packages' in filepath or 'dist-packages' in filepath:
+ filepath, p2 = os.path.split(filepath)
+ if p2 in ('site-packages', 'dist-packages'):
+ break
+ pkg_name.append(p2)
+
+ # if package name determination failed, just default to pandas
+ if not pkg_name:
+ return "pandas"
+
+ # otherwise, reverse to get correct order and return
+ pkg_name.reverse()
+
+ # don't include the outer egg directory
+ if pkg_name[0].endswith('.egg'):
+ pkg_name.pop(0)
+
+ return '.'.join(pkg_name)
+
+import_nose = nosetester.import_nose
+run_module_suite = nosetester.run_module_suite
+
+
+class NoseTester(nosetester.NoseTester):
+ """
+ Nose test runner.
+
+ This class is made available as pandas.util.nosetester.NoseTester, and
+ a test function is typically added to a package's __init__.py like so::
+
+ from numpy.testing import Tester
+ test = Tester().test
+
+ Calling this test function finds and runs all tests associated with the
+ package and all its sub-packages.
+
+ Attributes
+ ----------
+ package_path : str
+ Full path to the package to test.
+ package_name : str
+ Name of the package to test.
+
+ Parameters
+ ----------
+ package : module, str or None, optional
+ The package to test. If a string, this should be the full path to
+ the package. If None (default), `package` is set to the module from
+ which `NoseTester` is initialized.
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of 'warn' during the test execution. Valid strings are:
+
+ - "develop" : equals ``(DeprecationWarning, RuntimeWarning)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ See Notes for more details.
+
+ Notes
+ -----
+ The default for `raise_warnings` is
+ ``(DeprecationWarning, RuntimeWarning)`` for development versions of
+ pandas, and ``()`` for released versions. The purpose of this switching
+ behavior is to catch as many warnings as possible during development, but
+ not give problems for packaging of released versions.
+
+ """
+ excludes = []
+
+ def _show_system_info(self):
+ nose = import_nose()
+
+ import pandas
+ print("pandas version %s" % pandas.__version__)
+ import numpy
+ print("numpy version %s" % numpy.__version__)
+ pddir = os.path.dirname(pandas.__file__)
+ print("pandas is installed in %s" % pddir)
+
+ pyversion = sys.version.replace('\n', '')
+ print("Python version %s" % pyversion)
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
+
+ def _get_custom_doctester(self):
+ """ Return instantiated plugin for doctests
+
+ Allows subclassing of this class to override doctester
+
+ A return value of None means use the nose builtin doctest plugin
+ """
+ return None
+
+ def test(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, raise_warnings=None):
+ """
+ Run tests for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the tests to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow tests as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+ doctests : bool, optional
+ If True, run doctests in module. Default is False.
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ (This requires the `coverage module:
+ <http://nedbatchelder.com/code/modules/coverage.html>`_).
+ raise_warnings : str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of 'warn' during the test execution. Valid strings are:
+
+ - "develop" : equals ``(DeprecationWarning, RuntimeWarning)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ Returns
+ -------
+ result : object
+ Returns the result of running the tests as a
+ ``nose.result.TextTestResult`` object.
+ """
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ if doctests:
+ print("Running unit tests and doctests for %s" % self.package_name)
+ else:
+ print("Running unit tests for %s" % self.package_name)
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ if raise_warnings is None:
+ raise_warnings = 'release'
+
+ _warn_opts = dict(develop=(DeprecationWarning, RuntimeWarning),
+ release=())
+ if isinstance(raise_warnings, string_types):
+ raise_warnings = _warn_opts[raise_warnings]
+
+ with warnings.catch_warnings():
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # Set all warnings to 'warn', this is because the default 'once'
+ # has the bad property of possibly shadowing later warnings.
+ warnings.filterwarnings('always')
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ warnings.filterwarnings("ignore", category=FutureWarning)
+
+ from numpy.testing.noseclasses import NumpyTestProgram
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage)
+ t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
+
+ return t.result
| closes #4327
copied from `numpy` nosetester.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11913 | 2015-12-27T15:44:51Z | 2016-01-08T14:07:46Z | 2016-01-08T14:07:46Z | 2016-03-10T00:52:08Z |
DEPR: pandas.stats.var, pandas.stats.plm, pandas.stats.ols, pandas.stats.fama_macbeth | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index f9bb37359235c..b443bb74e98ea 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -253,6 +253,8 @@ Deprecations
For example, instead of ``s.rolling(window=5,freq='D').max()`` to get the max value on a rolling 5 Day window, one could use ``s.resample('D',how='max').rolling(window=5).max()``, which first resamples the data to daily data, then provides a rolling 5 day window.
- ``pd.tseries.frequencies.get_offset_name`` function is deprecated. Use offset's ``.freqstr`` property as alternative (:issue:`11192`)
+- ``pandas.stats.fama_macbeth`` routines are deprecated and will be removed in a future version (:issue:`6077`)
+- ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var`` routines are deprecated and will be removed in a future version (:issue:`6077`)
.. _whatsnew_0180.prior_deprecations:
@@ -346,4 +348,4 @@ Bug Fixes
- Bug in ``read_sql`` with pymysql connections failing to return chunked data (:issue:`11522`)
-- Bug in ``DataFrame`` when masking an empty ``DataFrame`` (:issue:`11859`)
\ No newline at end of file
+- Bug in ``DataFrame`` when masking an empty ``DataFrame`` (:issue:`11859`)
diff --git a/pandas/stats/fama_macbeth.py b/pandas/stats/fama_macbeth.py
index 30726d82e1aa9..01e68be273226 100644
--- a/pandas/stats/fama_macbeth.py
+++ b/pandas/stats/fama_macbeth.py
@@ -17,7 +17,7 @@ def fama_macbeth(**kwargs):
nw_lags_beta: int
Newey-West adjusts the betas by the given lags
- """
+ """
window_type = kwargs.get('window_type')
if window_type is None:
klass = FamaMacBeth
@@ -32,6 +32,12 @@ def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
+ import warnings
+ warnings.warn("The pandas.stats.fama_macbeth module is deprecated and will be "
+ "removed in a future version. We refer to external packages "
+ "like statsmodels, see here: http://statsmodels.sourceforge.net/stable/index.html",
+ FutureWarning, stacklevel=4)
+
if dropped_dummies is None:
dropped_dummies = {}
self._nw_lags_beta = nw_lags_beta
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index d1d74442d8961..7031d55c0f682 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -47,6 +47,12 @@ class OLS(StringMixin):
def __init__(self, y, x, intercept=True, weights=None, nw_lags=None,
nw_overlap=False):
+ import warnings
+ warnings.warn("The pandas.stats.ols module is deprecated and will be "
+ "removed in a future version. We refer to external packages "
+ "like statsmodels, see some examples here: http://statsmodels.sourceforge.net/stable/regression.html",
+ FutureWarning, stacklevel=4)
+
try:
import statsmodels.api as sm
except ImportError:
@@ -1197,8 +1203,11 @@ def _results(self):
@cache_readonly
def _window_time_obs(self):
- window_obs = moments.rolling_sum(self._time_obs_count > 0,
- self._window, min_periods=1)
+ window_obs = (Series(self._time_obs_count > 0)
+ .rolling(self._window, min_periods=1)
+ .sum()
+ .values
+ )
window_obs[np.isnan(window_obs)] = 0
return window_obs.astype(int)
@@ -1211,8 +1220,7 @@ def _nobs_raw(self):
# expanding case
window = len(self._index)
- result = moments.rolling_sum(self._time_obs_count, window,
- min_periods=1)
+ result = Series(self._time_obs_count).rolling(window, min_periods=1).sum().values
return result.astype(int)
diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py
index 53b8cce64b74a..177452476b875 100644
--- a/pandas/stats/plm.py
+++ b/pandas/stats/plm.py
@@ -34,6 +34,11 @@ def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False,
nw_overlap=False):
+ import warnings
+ warnings.warn("The pandas.stats.plm module is deprecated and will be "
+ "removed in a future version. We refer to external packages "
+ "like statsmodels, see some examples here: http://statsmodels.sourceforge.net/stable/mixed_linear.html",
+ FutureWarning, stacklevel=4)
self._x_orig = x
self._y_orig = y
self._weights = weights
@@ -732,6 +737,12 @@ def __init__(self, y, x, window_type='full_sample', window=None,
min_periods=None, intercept=True, nw_lags=None,
nw_overlap=False):
+ import warnings
+ warnings.warn("The pandas.stats.plm module is deprecated and will be "
+ "removed in a future version. We refer to external packages "
+ "like statsmodels, see some examples here: http://statsmodels.sourceforge.net/stable/mixed_linear.html",
+ FutureWarning, stacklevel=4)
+
for attr in self.ATTRIBUTES:
setattr(self.__class__, attr, create_ols_attr(attr))
diff --git a/pandas/stats/tests/test_fama_macbeth.py b/pandas/stats/tests/test_fama_macbeth.py
index dd2f196361226..05849bd80c7a8 100644
--- a/pandas/stats/tests/test_fama_macbeth.py
+++ b/pandas/stats/tests/test_fama_macbeth.py
@@ -4,6 +4,7 @@
from pandas.compat import range
from pandas import compat
+import pandas.util.testing as tm
import numpy as np
@@ -23,8 +24,9 @@ def testFamaMacBethRolling(self):
def checkFamaMacBethExtended(self, window_type, x, y, **kwds):
window = 25
- result = fama_macbeth(y=y, x=x, window_type=window_type, window=window,
- **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = fama_macbeth(y=y, x=x, window_type=window_type, window=window,
+ **kwds)
self._check_stuff_works(result)
index = result._index
@@ -43,10 +45,12 @@ def checkFamaMacBethExtended(self, window_type, x, y, **kwds):
x2[k] = v.truncate(start, end)
y2 = y.truncate(start, end)
- reference = fama_macbeth(y=y2, x=x2, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ reference = fama_macbeth(y=y2, x=x2, **kwds)
assert_almost_equal(reference._stats, result._stats[:, i])
- static = fama_macbeth(y=y2, x=x2, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ static = fama_macbeth(y=y2, x=x2, **kwds)
self._check_stuff_works(static)
def _check_stuff_works(self, result):
diff --git a/pandas/stats/tests/test_math.py b/pandas/stats/tests/test_math.py
index 1d1288e126418..628a37006cfeb 100644
--- a/pandas/stats/tests/test_math.py
+++ b/pandas/stats/tests/test_math.py
@@ -52,7 +52,8 @@ def test_solve_rect(self):
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
- expected = ols(y=b, x=self.frame, intercept=False).beta
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ expected = ols(y=b, x=self.frame, intercept=False).beta
self.assertTrue(np.allclose(result, expected))
def test_inv_illformed(self):
diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py
index 60e976f09365b..01095ab2336ce 100644
--- a/pandas/stats/tests/test_ols.py
+++ b/pandas/stats/tests/test_ols.py
@@ -115,7 +115,8 @@ def testWLS(self):
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
- result = ols(y=y, x=x, weights=1 / weights)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=y, x=x, weights=1 / weights)
combined = x.copy()
combined['__y__'] = y
@@ -153,10 +154,12 @@ def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
- result = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=y, x=x)
# check that sparse version is the same
- sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
@@ -175,16 +178,18 @@ def checkOLS(self, exog, endog, x, y):
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
- window = sm.tools.tools.rank(x.values) * 2
+ window = np.linalg.matrix_rank(x.values) * 2
- moving = ols(y=y, x=x, weights=weights, window_type=window_type,
- window=window, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ moving = ols(y=y, x=x, weights=weights, window_type=window_type,
+ window=window, **kwds)
# check that sparse version is the same
- sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
- weights=weights,
- window_type=window_type,
- window=window, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
+ weights=weights,
+ window_type=window_type,
+ window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
@@ -202,7 +207,8 @@ def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
- static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
@@ -248,7 +254,8 @@ def compare(self, static, moving, event_index=None,
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
- model = ols(y=df[0], x=df[1])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=df[0], x=df[1])
summary = repr(model)
@@ -269,7 +276,8 @@ def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
- model = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
@@ -289,8 +297,10 @@ def test_r2_no_intercept(self):
x_with = x.copy()
x_with['intercept'] = 1.
- model1 = ols(y=y, x=x)
- model2 = ols(y=y, x=x_with, intercept=False)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model1 = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
@@ -298,28 +308,33 @@ def test_r2_no_intercept(self):
# rolling
- model1 = ols(y=y, x=x, window=20)
- model2 = ols(y=y, x=x_with, window=20, intercept=False)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model1 = ols(y=y, x=x, window=20)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assertTrue((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
- model = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
- model1 = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
- model1 = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
@@ -358,7 +373,8 @@ def test_predict_longer_exog(self):
endog = Series(endogenous)
exog = Series(exogenous)
- model = ols(y=endog, x=exog)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=endog, x=exog)
pred = model.y_predict
self.assertTrue(pred.index.equals(exog.index))
@@ -368,7 +384,8 @@ def test_longpanel_series_combo(self):
lp = wp.to_frame()
y = lp.pop('ItemA')
- model = ols(y=y, x=lp, entity_effects=True, window=20)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assertTrue(notnull(model.beta.values).all())
tm.assertIsInstance(model, PanelOLS)
model.summary
@@ -376,8 +393,10 @@ def test_longpanel_series_combo(self):
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
- model = ols(y=y, x=x)
- expected = ols(y=y, x={'x': x})
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ expected = ols(y=y, x={'x': x})
assert_series_equal(model.beta, expected.beta)
# GH 5233/5250
@@ -388,7 +407,8 @@ def test_various_attributes(self):
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
- model = ols(y=y, x=x, window=20)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
@@ -405,17 +425,22 @@ def test_catch_regressor_overlap(self):
y = tm.makeTimeSeries()
data = {'foo': df1, 'bar': df2}
- self.assertRaises(Exception, ols, y=y, x=data)
+ def f():
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ ols(y=y, x=data)
+ self.assertRaises(Exception, f)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
- model = ols(y=y, x=x, intercept=False)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x, intercept=False)
model.summary
- model = ols(y=y, x=Panel(x))
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
@@ -423,8 +448,10 @@ def test_plm_attrs(self):
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
- rmodel = ols(y=y, x=x, window=10)
- model = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ rmodel = ols(y=y, x=x, window=10)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x)
model.resid
rmodel.resid
@@ -433,7 +460,8 @@ def test_plm_lagged_y_predict(self):
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
- model = ols(y=y, x=x, window=10)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
@@ -441,7 +469,8 @@ def test_plm_f_test(self):
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
- model = ols(y=y, x=x)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
@@ -456,12 +485,16 @@ def test_plm_exclude_dummy_corner(self):
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
- model = ols(
- y=y, x=x, entity_effects=True, dropped_dummies={'entity': 'D'})
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(
+ y=y, x=x, entity_effects=True, dropped_dummies={'entity': 'D'})
model.summary
- self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
- dropped_dummies={'entity': 'E'})
+ def f():
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ ols(y=y, x=x, entity_effects=True,
+ dropped_dummies={'entity': 'E'})
+ self.assertRaises(Exception, f)
def test_columns_tuples_summary(self):
# #1837
@@ -469,7 +502,8 @@ def test_columns_tuples_summary(self):
Y = Series(np.random.randn(10))
# it works!
- model = ols(y=Y, x=X)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ model = ols(y=Y, x=X)
model.summary
@@ -484,7 +518,8 @@ class TestPanelOLS(BaseTest):
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
- result = ols(y=self.panel_y2, x=self.panel_x2)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
@@ -544,8 +579,10 @@ def test_wls_panel(self):
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
- result = ols(y=y, x=x, weights=1 / weights)
- expected = ols(y=stack_y, x=stack_x, weights=1 / stack_weights)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=y, x=x, weights=1 / weights)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ expected = ols(y=stack_y, x=stack_x, weights=1 / stack_weights)
assert_almost_equal(result.beta, expected.beta)
@@ -555,7 +592,8 @@ def test_wls_panel(self):
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
- result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
@@ -565,7 +603,8 @@ def testWithTimeEffects(self):
# _check_non_raw_results(result)
def testWithEntityEffects(self):
- result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
@@ -577,8 +616,9 @@ def testWithEntityEffects(self):
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
- result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
- dropped_dummies={'entity': 'B'})
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
+ dropped_dummies={'entity': 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 6., 14., 1.], [1, 9, 17, 1], [0, 30, 48, 1]],
@@ -589,7 +629,8 @@ def testWithEntityEffectsAndDroppedDummies(self):
# _check_non_raw_results(result)
def testWithXEffects(self):
- result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
@@ -600,8 +641,9 @@ def testWithXEffects(self):
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
- result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
- dropped_dummies={'x1': 30})
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
+ dropped_dummies={'x1': 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
@@ -612,7 +654,8 @@ def testWithXEffectsAndDroppedDummies(self):
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
- result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
@@ -625,8 +668,9 @@ def testWithXEffectsAndConversion(self):
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
- result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
- dropped_dummies={'x2': 'foo'})
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
+ dropped_dummies={'x2': 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
@@ -707,7 +751,8 @@ def testUnknownWindowType(self):
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
- result = ols(y=y, x=x, pool=False, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
@@ -716,8 +761,9 @@ def checkNonPooled(self, x, y, **kwds):
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
- moving = ols(y=y, x=x, window_type=window_type,
- window=window, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ moving = ols(y=y, x=x, window_type=window_type,
+ window=window, **kwds)
index = moving._index
@@ -734,7 +780,8 @@ def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
- static = ols(y=y_iter, x=x_iter, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
@@ -743,8 +790,10 @@ def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
- result = ols(y=y, x=x, **kwds)
- reference = ols(y=series_y, x=series_x, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ols(y=y, x=x, **kwds)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
@@ -783,9 +832,11 @@ def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
- window_model = ols(y=y, x=data, window=20, min_periods=10)
- rolling_model = ols(y=y, x=data, window=20, min_periods=10,
- window_type='rolling')
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ window_model = ols(y=y, x=data, window=20, min_periods=10)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ rolling_model = ols(y=y, x=data, window=20, min_periods=10,
+ window_type='rolling')
assert_frame_equal(window_model.beta, rolling_model.beta)
diff --git a/pandas/stats/var.py b/pandas/stats/var.py
index be55507f976cb..b06e2f3181496 100644
--- a/pandas/stats/var.py
+++ b/pandas/stats/var.py
@@ -26,6 +26,12 @@ class VAR(StringMixin):
"""
def __init__(self, data, p=1, intercept=True):
+ import warnings
+ warnings.warn("The pandas.stats.var module is deprecated and will be "
+ "removed in a future version. We refer to external packages "
+ "like statsmodels, see some examples here: http://statsmodels.sourceforge.net/stable/vector_ar.html#var",
+ FutureWarning, stacklevel=4)
+
try:
import statsmodels.tsa.vector_ar.api as sm_var
except ImportError:
| closes #6077
this cleans ups the remainder of the `pandas.stats` modules
we are directing:
- `OLS` -> `http://statsmodels.sourceforge.net/stable/regression.html`
- `PanelOLS/MovingOLS/PooledOLS` to `http://statsmodels.sourceforge.net/stable/mixed_linear.html`
- `VAR` to `http://statsmodels.sourceforge.net/stable/vector_ar.html#var`
- `fama_macbeth` to `http://statsmodels.sourceforge.net/stable/index.html` (as not implemented in statsmodell, but good place to start)
| https://api.github.com/repos/pandas-dev/pandas/pulls/11898 | 2015-12-24T21:29:09Z | 2015-12-26T13:08:42Z | 2015-12-26T13:08:42Z | 2015-12-26T13:08:42Z |
BUG: masking empty DataFrame | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 733e86e38e47a..9cb0339f60cd7 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -345,3 +345,4 @@ Bug Fixes
- Bug in ``read_sql`` with pymysql connections failing to return chunked data (:issue:`11522`)
+- Bug in ``DataFrame`` when masking an empty ``DataFrame`` (:issue:`11859`)
\ No newline at end of file
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5cb31717394c6..781d4b8bddf12 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2047,7 +2047,7 @@ def _getitem_multilevel(self, key):
return self._get_item_cache(key)
def _getitem_frame(self, key):
- if key.values.dtype != np.bool_:
+ if key.values.size and not com.is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 45f528ceec02b..aa18c7826e544 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -826,6 +826,13 @@ def test_setitem_empty_frame_with_boolean(self):
df[df > df2] = 47
assert_frame_equal(df, df2)
+ def test_getitem_empty_frame_with_boolean(self):
+ # Test for issue #11859
+
+ df = pd.DataFrame()
+ df2 = df[df>0]
+ assert_frame_equal(df, df2)
+
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
| closes #11859
A `ValueError` was raised when trying to mask an empty DataFrame. In addition to solve the bug I've also added a simple new unit test in `test_frame.py`. I didn't add a line in `whatsnew` because this was already done in #10126
| https://api.github.com/repos/pandas-dev/pandas/pulls/11895 | 2015-12-24T12:00:53Z | 2015-12-26T00:29:36Z | 2015-12-26T00:29:36Z | 2015-12-26T00:29:40Z |
DOC: Put MultiIndex into the main API Reference | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 3c7ca6d5c2326..1b7c7916c33a1 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -572,6 +572,7 @@ strings and apply several methods to it. These can be accessed like
Series.dt
Index.str
CategoricalIndex.str
+ MultiIndex.str
DatetimeIndex.str
TimedeltaIndex.str
@@ -1396,6 +1397,33 @@ Categorical Components
CategoricalIndex.as_ordered
CategoricalIndex.as_unordered
+.. _api.multiindex:
+
+MultiIndex
+----------
+
+.. autosummary::
+ :toctree: generated/
+
+ MultiIndex
+
+MultiIndex Components
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ MultiIndex.from_arrays
+ MultiIndex.from_tuples
+ MultiIndex.from_product
+ MultiIndex.set_levels
+ MultiIndex.set_labels
+ MultiIndex.to_hierarchical
+ MultiIndex.is_lexsorted
+ MultiIndex.droplevel
+ MultiIndex.swaplevel
+ MultiIndex.reorder_levels
+
.. _api.datetimeindex:
DatetimeIndex
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 552eb7fb81180..ad1d36a922c15 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -4022,8 +4022,7 @@ def isin(self, values, level=None):
class MultiIndex(Index):
"""
- Implements multi-level, a.k.a. hierarchical, index object for pandas
- objects
+ A multi-level, or hierarchical, index object for pandas objects
Parameters
----------
| Doc changes only.
closes #11890
| https://api.github.com/repos/pandas-dev/pandas/pulls/11893 | 2015-12-23T23:09:32Z | 2016-01-11T13:39:53Z | 2016-01-11T13:39:53Z | 2016-01-22T19:08:24Z |
ENH: RangeIndex redux | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index d976b0c8c21a5..465fbf483b1cc 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -617,10 +617,20 @@ faster than fancy indexing.
timeit ser.ix[indexer]
timeit ser.take(indexer)
+.. _indexing.index_types:
+
+Index Types
+-----------
+
+We have discussed ``MultiIndex`` in the previous sections pretty extensively. ``DatetimeIndex`` and ``PeriodIndex``
+are shown :ref:`here <timeseries.overview>`. ``TimedeltaIndex`` are :ref:`here <timedeltas.timedeltas>`.
+
+In the following sub-sections we will highlite some other index types.
+
.. _indexing.categoricalindex:
CategoricalIndex
-----------------
+~~~~~~~~~~~~~~~~
.. versionadded:: 0.16.1
@@ -702,10 +712,21 @@ values NOT in the categories, similarly to how you can reindex ANY pandas index.
In [12]: pd.concat([df2, df3]
TypeError: categories must match existing categories when appending
+.. _indexing.rangeindex:
+
+Int64Index and RangeIndex
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``Int64Index`` is a fundamental basic index in *pandas*. This is an Immutable array implementing an ordered, sliceable set.
+Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects.
+
+``RangeIndex`` is a sub-class of ``Int64Index`` added in version 0.18.0, now providing the default index for all ``NDFrame`` objects.
+``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analagous to python :ref:`range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`.
+
.. _indexing.float64index:
Float64Index
-------------
+~~~~~~~~~~~~
.. note::
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index b5be9cf395feb..80a4774e02e69 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1091,7 +1091,7 @@ An example of how holidays and holiday calendars are defined:
Using this calendar, creating an index or doing offset arithmetic skips weekends
and holidays (i.e., Memorial Day/July 4th). For example, the below defines
a custom business day offset using the ``ExampleCalendar``. Like any other offset,
-it can be used to create a ``DatetimeIndex`` or added to ``datetime``
+it can be used to create a ``DatetimeIndex`` or added to ``datetime``
or ``Timestamp`` objects.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index c1f14ce6703a0..05a9d3ac0c861 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -19,6 +19,7 @@ Highlights include:
- Window functions are now methods on ``.groupby`` like objects, see :ref:`here <whatsnew_0180.enhancements.moments>`.
- ``pd.test()`` top-level nose test runner is available (:issue:`4327`)
+- Adding support for a ``RangeIndex`` as a specialized form of the ``Int64Index`` for memory savings, see :ref:`here <whatsnew_0180.enhancements.rangeindex>`.
Check the :ref:`API Changes <whatsnew_0180.api>` and :ref:`deprecations <whatsnew_0180.deprecations>` before updating.
@@ -102,6 +103,39 @@ And multiple aggregations
r.agg({'A' : ['mean','std'],
'B' : ['mean','std']})
+.. _whatsnew_0180.enhancements.rangeindex:
+
+Range Index
+^^^^^^^^^^^
+
+A ``RangeIndex`` has been added to the ``Int64Index`` sub-classes to support a memory saving alternative for common use cases. This has a similar implementation to the python ``range`` object (``xrange`` in python 2), in that it only stores the start, stop, and step values for the index. It will transparently interact with the user API, converting to ``Int64Index`` if needed.
+
+This will now be the default constructed index for ``NDFrame`` objects, rather than previous an ``Int64Index``. (:issue:`939`)
+
+Previous Behavior:
+
+.. code-block:: python
+
+ In [3]: s = Series(range(1000))
+
+ In [4]: s.index
+ Out[4]:
+ Int64Index([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ ...
+ 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], dtype='int64', length=1000)
+
+ In [6]: s.index.nbytes
+ Out[6]: 8000
+
+
+New Behavior:
+
+.. ipython:: python
+
+ s = Series(range(1000))
+ s.index
+ s.index.nbytes
+
.. _whatsnew_0180.enhancements.other:
Other enhancements
diff --git a/pandas/core/api.py b/pandas/core/api.py
index e2ac57e37cba6..0c463d1a201b9 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -8,7 +8,8 @@
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.core.format import set_eng_float_format
-from pandas.core.index import Index, CategoricalIndex, Int64Index, Float64Index, MultiIndex
+from pandas.core.index import (Index, CategoricalIndex, Int64Index,
+ RangeIndex, Float64Index, MultiIndex)
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
diff --git a/pandas/core/common.py b/pandas/core/common.py
index b80b7eecaeb11..7f955002a2c68 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -84,6 +84,8 @@ def _check(cls, inst):
ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
("int64index", ))
+ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ",
+ ("rangeindex", ))
ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
("float64index", ))
ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
@@ -97,7 +99,8 @@ def _check(cls, inst):
ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
("categoricalindex", ))
ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
- ("index", "int64index", "float64index",
+ ("index", "int64index", "rangeindex",
+ "float64index",
"multiindex", "datetimeindex",
"timedeltaindex", "periodindex",
"categoricalindex"))
@@ -1796,11 +1799,8 @@ def is_bool_indexer(key):
def _default_index(n):
- from pandas.core.index import Int64Index
- values = np.arange(n, dtype=np.int64)
- result = Int64Index(values, name=None)
- result.is_unique = True
- return result
+ from pandas.core.index import RangeIndex
+ return RangeIndex(0, n, name=None)
def ensure_float(arr):
diff --git a/pandas/core/dtypes.py b/pandas/core/dtypes.py
index 1e358694de63e..e6adbc8500117 100644
--- a/pandas/core/dtypes.py
+++ b/pandas/core/dtypes.py
@@ -214,5 +214,6 @@ def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
- return isinstance(other, DatetimeTZDtype) and self.unit == other.unit \
- and self.tz == other.tz
+ return isinstance(other, DatetimeTZDtype) and \
+ self.unit == other.unit and \
+ str(self.tz) == str(other.tz)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 273166db12142..7f53e08b7c38b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5325,7 +5325,7 @@ def extract_index(data):
(lengths[0], len(index)))
raise ValueError(msg)
else:
- index = Index(np.arange(lengths[0]))
+ index = _default_index(lengths[0])
return _ensure_index(index)
@@ -5538,11 +5538,11 @@ def convert(arr):
def _get_names_from_index(data):
- index = lrange(len(data))
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
- return index
+ return _default_index(len(data))
+ index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 3832d0c69ed0e..63b748ada6afa 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -3,6 +3,8 @@
import warnings
import operator
from functools import partial
+from math import ceil, floor
+
from sys import getsizeof
import numpy as np
@@ -15,18 +17,23 @@
from pandas.compat import range, zip, lrange, lzip, u, map
from pandas import compat
from pandas.core import algorithms
-from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, PandasDelegate
+from pandas.core.base import (PandasObject, FrozenList, FrozenNDArray,
+ IndexOpsMixin, PandasDelegate)
import pandas.core.base as base
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
from pandas.core.missing import _clean_reindex_fill_method
-from pandas.core.common import (isnull, array_equivalent, is_dtype_equal, is_object_dtype,
- is_datetimetz, ABCSeries, ABCCategorical, ABCPeriodIndex,
- _values_from_object, is_float, is_integer, is_iterator, is_categorical_dtype,
+from pandas.core.common import (isnull, array_equivalent, is_dtype_equal,
+ is_object_dtype, is_datetimetz, ABCSeries,
+ ABCCategorical, ABCPeriodIndex,
+ _values_from_object, is_float, is_integer,
+ is_iterator, is_categorical_dtype,
_ensure_object, _ensure_int64, is_bool_indexer,
- is_list_like, is_bool_dtype, is_null_slice, is_integer_dtype)
+ is_list_like, is_bool_dtype, is_null_slice,
+ is_integer_dtype, is_int64_dtype)
from pandas.core.strings import StringAccessorMixin
+
from pandas.core.config import get_option
from pandas.io.common import PerformanceWarning
@@ -123,22 +130,33 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
if name is None and hasattr(data, 'name'):
name = data.name
- # no class inference!
if fastpath:
return cls._simple_new(data, name)
+ # range
+ if isinstance(data, RangeIndex):
+ return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
+ elif isinstance(data, range):
+ return RangeIndex.from_range(data, copy=copy, dtype=dtype,
+ name=name)
+
+ # categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
- if isinstance(data, (np.ndarray, Index, ABCSeries)):
+ # index-like
+ elif isinstance(data, (np.ndarray, Index, ABCSeries)):
+
+ if issubclass(data.dtype.type,
+ np.datetime64) or is_datetimetz(data):
- if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
+
elif issubclass(data.dtype.type, np.timedelta64):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
@@ -327,7 +345,8 @@ def is_(self, other):
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
- return self._id is getattr(other, '_id', Ellipsis)
+ return self._id is getattr(
+ other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
@@ -455,14 +474,14 @@ def _coerce_scalar_to_index(self, item):
"""
return Index([item], dtype=self.dtype, **self._get_attributes_dict())
- def copy(self, names=None, name=None, dtype=None, deep=False):
- """
+ _index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
+ deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
@@ -474,6 +493,10 @@ def copy(self, names=None, name=None, dtype=None, deep=False):
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
+
+ @Appender(_index_shared_docs['copy'])
+ def copy(self, name=None, deep=False, dtype=None, **kwargs):
+ names = kwargs.get('names')
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if deep:
@@ -1060,9 +1083,9 @@ def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(form=form,
- klass=type(self),
- key=key,
- kind=type(key)))
+ klass=type(self),
+ key=key,
+ kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
@@ -1076,6 +1099,10 @@ def get_duplicates(self):
def _cleanup(self):
self._engine.clear_mapping()
+ @cache_readonly
+ def _constructor(self):
+ return type(self)
+
@cache_readonly
def _engine(self):
# property, for now, slow to look up
@@ -1639,7 +1666,7 @@ def union(self, other):
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
- return self.__class__(data=result, name=name)
+ return self.__class__(result, name=name)
def intersection(self, other):
"""
@@ -2158,6 +2185,7 @@ def reindex(self, target, method=None, level=None, limit=None,
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
+
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
@@ -2221,9 +2249,9 @@ def _reindex_non_unique(self, target):
missing = com._ensure_platform_int(missing)
missing_labels = target.take(missing)
- missing_indexer = com._ensure_int64(l[~check])
+ missing_indexer = _ensure_int64(l[~check])
cur_labels = self.take(indexer[check])._values
- cur_indexer = com._ensure_int64(l[check])
+ cur_indexer = _ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
@@ -2442,7 +2470,7 @@ def _get_leaf_sorter(labels):
return np.empty(0, dtype='int64')
if len(labels) == 1:
- lab = com._ensure_int64(labels[0])
+ lab = _ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
@@ -2453,8 +2481,8 @@ def _get_leaf_sorter(labels):
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
- lab = com._ensure_int64(labels[-1])
- return lib.get_level_sorter(lab, com._ensure_int64(starts))
+ lab = _ensure_int64(labels[-1])
+ return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
@@ -2486,7 +2514,7 @@ def _get_leaf_sorter(labels):
join_index = left[left_indexer]
else:
- left_lev_indexer = com._ensure_int64(left_lev_indexer)
+ left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
@@ -2956,6 +2984,7 @@ def invalid_op(self, other=None):
invalid_op.__name__ = name
return invalid_op
+ cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__')
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
@@ -2970,40 +2999,82 @@ def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
+ def _validate_for_numeric_unaryop(self, op, opstr):
+ """ validate if we can perform a numeric unary operation """
+
+ if not self._is_numeric_dtype:
+ raise TypeError("cannot evaluate a numeric op "
+ "{opstr} for type: {typ}".format(
+ opstr=opstr,
+ typ=type(self))
+ )
+
+ def _validate_for_numeric_binop(self, other, op, opstr):
+ """
+ return valid other, evaluate or raise TypeError
+ if we are not of the appropriate type
+
+ internal method called by ops
+ """
+ from pandas.tseries.offsets import DateOffset
+
+ # if we are an inheritor of numeric,
+ # but not actually numeric (e.g. DatetimeIndex/PeriodInde)
+ if not self._is_numeric_dtype:
+ raise TypeError("cannot evaluate a numeric op {opstr} "
+ "for type: {typ}".format(
+ opstr=opstr,
+ typ=type(self))
+ )
+
+ if isinstance(other, Index):
+ if not other._is_numeric_dtype:
+ raise TypeError("cannot evaluate a numeric op "
+ "{opstr} with type: {typ}".format(
+ opstr=type(self),
+ typ=type(other))
+ )
+ elif isinstance(other, np.ndarray) and not other.ndim:
+ other = other.item()
+
+ if isinstance(other, (Index, ABCSeries, np.ndarray)):
+ if len(self) != len(other):
+ raise ValueError("cannot evaluate a numeric op with "
+ "unequal lengths")
+ other = _values_from_object(other)
+ if other.dtype.kind not in ['f', 'i']:
+ raise TypeError("cannot evaluate a numeric op "
+ "with a non-numeric dtype")
+ elif isinstance(other, (DateOffset, np.timedelta64,
+ Timedelta, datetime.timedelta)):
+ # higher up to handle
+ pass
+ elif isinstance(other, (Timestamp, np.datetime64)):
+ # higher up to handle
+ pass
+ else:
+ if not (is_float(other) or is_integer(other)):
+ raise TypeError("can only perform ops with scalar values")
+
+ return other
+
@classmethod
- def _add_numeric_methods(cls):
+ def _add_numeric_methods_binary(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False):
def _evaluate_numeric_binop(self, other):
- import pandas.tseries.offsets as offsets
-
- # if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
- if not self._is_numeric_dtype:
- raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
- typ=type(self)))
-
- if isinstance(other, Index):
- if not other._is_numeric_dtype:
- raise TypeError("cannot evaluate a numeric op {opstr} with type: {typ}".format(opstr=type(self),
- typ=type(other)))
- elif isinstance(other, np.ndarray) and not other.ndim:
- other = other.item()
- if isinstance(other, (Index, ABCSeries, np.ndarray)):
- if len(self) != len(other):
- raise ValueError("cannot evaluate a numeric op with unequal lengths")
- other = _values_from_object(other)
- if other.dtype.kind not in ['f','i']:
- raise TypeError("cannot evaluate a numeric op with a non-numeric dtype")
- elif isinstance(other, (offsets.DateOffset, np.timedelta64, Timedelta, datetime.timedelta)):
+ from pandas.tseries.offsets import DateOffset
+ other = self._validate_for_numeric_binop(other, op, opstr)
+
+ # handle time-based others
+ if isinstance(other, (DateOffset, np.timedelta64,
+ Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
- else:
- if not (is_float(other) or is_integer(other)):
- raise TypeError("can only perform ops with scalar values")
# if we are a reversed non-communative op
values = self.values
@@ -3016,28 +3087,18 @@ def _evaluate_numeric_binop(self, other):
return _evaluate_numeric_binop
- def _make_evaluate_unary(op, opstr):
-
- def _evaluate_numeric_unary(self):
-
- # if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
- if not self._is_numeric_dtype:
- raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
- typ=type(self)))
- attrs = self._get_attributes_dict()
- attrs = self._maybe_update_attributes(attrs)
- return Index(op(self.values), **attrs)
-
- return _evaluate_numeric_unary
-
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
- cls.__sub__ = _make_evaluate_binop(operator.sub, '__sub__')
+ cls.__sub__ = _make_evaluate_binop(
+ operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul, '__mul__')
- cls.__mod__ = _make_evaluate_binop(operator.mod, '__mod__')
+ cls.__pow__ = cls.__rpow__ = _make_evaluate_binop(
+ operator.pow, '__pow__')
+ cls.__mod__ = _make_evaluate_binop(
+ operator.mod, '__mod__')
cls.__floordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(
@@ -3051,11 +3112,32 @@ def _evaluate_numeric_unary(self):
operator.div, '__div__')
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
+
+ @classmethod
+ def _add_numeric_methods_unary(cls):
+ """ add in numeric unary methods """
+
+ def _make_evaluate_unary(op, opstr):
+
+ def _evaluate_numeric_unary(self):
+
+ self._validate_for_numeric_unaryop(op, opstr)
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+ return Index(op(self.values), **attrs)
+
+ return _evaluate_numeric_unary
+
cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
+ @classmethod
+ def _add_numeric_methods(cls):
+ cls._add_numeric_methods_unary()
+ cls._add_numeric_methods_binary()
+
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
@@ -3828,6 +3910,560 @@ def _wrap_joined_index(self, joined, other):
Int64Index._add_logical_methods()
+class RangeIndex(Int64Index):
+
+ """
+ Immutable Index implementing a monotonic range. RangeIndex is a
+ memory-saving special case of Int64Index limited to representing
+ monotonic ranges.
+
+ Parameters
+ ----------
+ start : int (default: 0)
+ stop : int (default: 0)
+ step : int (default: 1)
+ name : object, optional
+ Name to be stored in the index
+ copy : bool, default False
+ Make a copy of input if its a RangeIndex
+
+ """
+
+ _typ = 'rangeindex'
+ _engine_type = _index.Int64Engine
+
+ def __new__(cls, start=None, stop=None, step=None, name=None, dtype=None,
+ fastpath=False, copy=False, **kwargs):
+
+ if fastpath:
+ return cls._simple_new(start, stop, step, name=name)
+
+ cls._validate_dtype(dtype)
+
+ # RangeIndex
+ if isinstance(start, RangeIndex):
+ if not copy:
+ return start
+ if name is None:
+ name = getattr(start, 'name', None)
+ start, stop, step = start._start, start._stop, start._step
+
+ # validate the arguments
+ def _ensure_int(value, field):
+ try:
+ new_value = int(value)
+ except:
+ new_value = value
+
+ if not is_integer(new_value) or new_value != value:
+ raise TypeError("RangeIndex(...) must be called with integers,"
+ " {value} was passed for {field}".format(
+ value=type(value).__name__,
+ field=field)
+ )
+
+ return new_value
+
+ if start is None:
+ start = 0
+ else:
+ start = _ensure_int(start, 'start')
+ if stop is None:
+ stop = start
+ start = 0
+ else:
+ stop = _ensure_int(stop, 'stop')
+ if step is None:
+ step = 1
+ elif step == 0:
+ raise ValueError("Step must not be zero")
+ else:
+ step = _ensure_int(step, 'step')
+
+ return cls._simple_new(start, stop, step, name)
+
+ @classmethod
+ def from_range(cls, data, name=None, dtype=None, **kwargs):
+ """ create RangeIndex from a range (py3), or xrange (py2) object """
+ if not isinstance(data, range):
+ raise TypeError(
+ '{0}(...) must be called with object coercible to a '
+ 'range, {1} was passed'.format(cls.__name__, repr(data)))
+
+ if compat.PY3:
+ step = data.step
+ stop = data.stop
+ start = data.start
+ else:
+ # seems we only have indexing ops to infer
+ # rather than direct accessors
+ if len(data) > 1:
+ step = data[1] - data[0]
+ stop = data[-1] + step
+ start = data[0]
+ elif len(data):
+ start = data[0]
+ stop = data[0] + 1
+ step = 1
+ else:
+ start = stop = 0
+ step = 1
+ return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
+
+ @classmethod
+ def _simple_new(cls, start, stop=None, step=None, name=None,
+ dtype=None, **kwargs):
+ result = object.__new__(cls)
+
+ # handle passed None, non-integers
+ if start is None or not is_integer(start):
+ try:
+ return RangeIndex(start, stop, step, name=name, **kwargs)
+ except TypeError:
+ return Index(start, stop, step, name=name, **kwargs)
+
+ result._start = start
+ result._stop = stop or 0
+ result._step = step or 1
+ result.name = name
+ for k, v in compat.iteritems(kwargs):
+ setattr(result, k, v)
+
+ result._reset_identity()
+ return result
+
+ @staticmethod
+ def _validate_dtype(dtype):
+ """ require dtype to be None or int64 """
+ if not (dtype is None or is_int64_dtype(dtype)):
+ raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
+
+ @cache_readonly
+ def _constructor(self):
+ """ return the class to use for construction """
+ return Int64Index
+
+ @cache_readonly
+ def _data(self):
+ return np.arange(self._start, self._stop, self._step, dtype=np.int64)
+
+ @cache_readonly
+ def _int64index(self):
+ return Int64Index(self._data, name=self.name, fastpath=True)
+
+ def _get_data_as_items(self):
+ """ return a list of tuples of start, stop, step """
+ return [('start', self._start),
+ ('stop', self._stop),
+ ('step', self._step)]
+
+ def __reduce__(self):
+ d = self._get_attributes_dict()
+ d.update(dict(self._get_data_as_items()))
+ return _new_Index, (self.__class__, d), None
+
+ def _format_attrs(self):
+ """
+ Return a list of tuples of the (attr, formatted_value)
+ """
+ attrs = self._get_data_as_items()
+ if self.name is not None:
+ attrs.append(('name', default_pprint(self.name)))
+ return attrs
+
+ def _format_data(self):
+ # we are formatting thru the attributes
+ return None
+
+ @cache_readonly
+ def nbytes(self):
+ """ return the number of bytes in the underlying data """
+ return sum([getsizeof(getattr(self, v)) for v in
+ ['_start', '_stop', '_step']])
+
+ def memory_usage(self, deep=False):
+ """
+ Memory usage of my values
+
+ Parameters
+ ----------
+ deep : bool
+ Introspect the data deeply, interrogate
+ `object` dtypes for system-level memory consumption
+
+ Returns
+ -------
+ bytes used
+
+ Notes
+ -----
+ Memory usage does not include memory consumed by elements that
+ are not components of the array if deep=False
+
+ See Also
+ --------
+ numpy.ndarray.nbytes
+ """
+ return self.nbytes
+
+ @property
+ def dtype(self):
+ return np.dtype(np.int64)
+
+ @property
+ def is_unique(self):
+ """ return if the index has unique values """
+ return True
+
+ @property
+ def has_duplicates(self):
+ return False
+
+ def tolist(self):
+ return lrange(self._start, self._stop, self._step)
+
+ def _shallow_copy(self, values=None, **kwargs):
+ """ create a new Index, don't copy the data, use the same object attributes
+ with passed in attributes taking precedence """
+ if values is None:
+ return RangeIndex(name=self.name, fastpath=True,
+ **dict(self._get_data_as_items()))
+ else:
+ kwargs.setdefault('name', self.name)
+ return self._int64index._shallow_copy(values, **kwargs)
+
+ @Appender(_index_shared_docs['copy'])
+ def copy(self, name=None, deep=False, dtype=None, **kwargs):
+ self._validate_dtype(dtype)
+ if name is None:
+ name = self.name
+ return RangeIndex(name=name, fastpath=True,
+ **dict(self._get_data_as_items()))
+
+ def argsort(self, *args, **kwargs):
+ """
+ return an ndarray indexer of the underlying data
+
+ See also
+ --------
+ numpy.ndarray.argsort
+ """
+ if self._step > 0:
+ return np.arange(len(self))
+ else:
+ return np.arange(len(self) - 1, -1, -1)
+
+ def equals(self, other):
+ """
+ Determines if two Index objects contain the same elements.
+ """
+ if isinstance(other, RangeIndex):
+ ls = len(self)
+ lo = len(other)
+ return (ls == lo == 0 or
+ ls == lo == 1 and
+ self._start == other._start or
+ ls == lo and
+ self._start == other._start and
+ self._step == other._step)
+
+ return super(RangeIndex, self).equals(other)
+
+ def intersection(self, other):
+ """
+ Form the intersection of two Index objects. Sortedness of the result is
+ not guaranteed
+
+ Parameters
+ ----------
+ other : Index or array-like
+
+ Returns
+ -------
+ intersection : Index
+ """
+ if not isinstance(other, RangeIndex):
+ return super(RangeIndex, self).intersection(other)
+
+ # check whether intervals intersect
+ # deals with in- and decreasing ranges
+ int_low = max(min(self._start, self._stop + 1),
+ min(other._start, other._stop + 1))
+ int_high = min(max(self._stop, self._start + 1),
+ max(other._stop, other._start + 1))
+ if int_high <= int_low:
+ return RangeIndex()
+
+ # Method hint: linear Diophantine equation
+ # solve intersection problem
+ # performance hint: for identical step sizes, could use
+ # cheaper alternative
+ gcd, s, t = self._extended_gcd(self._step, other._step)
+
+ # check whether element sets intersect
+ if (self._start - other._start) % gcd:
+ return RangeIndex()
+
+ # calculate parameters for the RangeIndex describing the
+ # intersection disregarding the lower bounds
+ tmp_start = self._start + (other._start - self._start) * \
+ self._step // gcd * s
+ new_step = self._step * other._step // gcd
+ new_index = RangeIndex(tmp_start, int_high, new_step, fastpath=True)
+
+ # adjust index to limiting interval
+ new_index._start = new_index._min_fitting_element(int_low)
+ return new_index
+
+ def _min_fitting_element(self, lower_limit):
+ """Returns the value of the smallest element greater than the limit"""
+ round = ceil if self._step > 0 else floor
+ no_steps = round((float(lower_limit) - self._start) / self._step)
+ return self._start + self._step * no_steps
+
+ def _max_fitting_element(self, upper_limit):
+ """Returns the value of the largest element smaller than the limit"""
+ round = floor if self._step > 0 else ceil
+ no_steps = round((float(upper_limit) - self._start) / self._step)
+ return self._start + self._step * no_steps
+
+ def _extended_gcd(self, a, b):
+ """
+ Extended Euclidean algorithms to solve Bezout's identity:
+ a*x + b*y = gcd(x, y)
+ Finds one particular solution for x, y: s, t
+ Returns: gcd, s, t
+ """
+ s, old_s = 0, 1
+ t, old_t = 1, 0
+ r, old_r = b, a
+ while r:
+ quotient = old_r // r
+ old_r, r = r, old_r - quotient * r
+ old_s, s = s, old_s - quotient * s
+ old_t, t = t, old_t - quotient * t
+ return old_r, old_s, old_t
+
+ def union(self, other):
+ """
+ Form the union of two Index objects and sorts if possible
+
+ Parameters
+ ----------
+ other : Index or array-like
+
+ Returns
+ -------
+ union : Index
+ """
+ # note: could return a RangeIndex in some circumstances
+ return self._int64index.union(other)
+
+ def join(self, other, how='left', level=None, return_indexers=False):
+ """
+ *this is an internal non-public method*
+
+ Compute join_index and indexers to conform data
+ structures to the new index.
+
+ Parameters
+ ----------
+ other : Index
+ how : {'left', 'right', 'inner', 'outer'}
+ level : int or level name, default None
+ return_indexers : boolean, default False
+
+ Returns
+ -------
+ join_index, (left_indexer, right_indexer)
+ """
+ if how == 'outer' and self is not other:
+ # note: could return RangeIndex in more circumstances
+ return self._int64index.join(other, how, level, return_indexers)
+
+ return super(RangeIndex, self).join(other, how, level, return_indexers)
+
+ def __len__(self):
+ """
+ return the length of the RangeIndex
+ """
+ return max(0, -(-(self._stop - self._start) // self._step))
+
+ @property
+ def size(self):
+ return len(self)
+
+ def __getitem__(self, key):
+ """
+ Conserve RangeIndex type for scalar and slice keys.
+ """
+ super_getitem = super(RangeIndex, self).__getitem__
+
+ if np.isscalar(key):
+ n = int(key)
+ if n != key:
+ return super_getitem(key)
+ if n < 0:
+ n = len(self) + key
+ if n < 0 or n > len(self) - 1:
+ raise IndexError("index {key} is out of bounds for axis 0 "
+ "with size {size}".format(key=key,
+ size=len(self)))
+ return self._start + n * self._step
+
+ if isinstance(key, slice):
+
+ # This is basically PySlice_GetIndicesEx, but delegation to our
+ # super routines if we don't have integers
+
+ l = len(self)
+
+ # complete missing slice information
+ step = 1 if key.step is None else key.step
+ if key.start is None:
+ start = l - 1 if step < 0 else 0
+ else:
+ start = key.start
+
+ if start < 0:
+ start += l
+ if start < 0:
+ start = -1 if step < 0 else 0
+ if start >= l:
+ start = l - 1 if step < 0 else l
+
+ if key.stop is None:
+ stop = -1 if step < 0 else l
+ else:
+ stop = key.stop
+
+ if stop < 0:
+ stop += l
+ if stop < 0:
+ stop = -1
+ if stop > l:
+ stop = l
+
+ # delegate non-integer slices
+ if (start != int(start) and
+ stop != int(stop) and
+ step != int(step)):
+ return super_getitem(key)
+
+ # convert indexes to values
+ start = self._start + self._step * start
+ stop = self._start + self._step * stop
+ step = self._step * step
+
+ return RangeIndex(start, stop, step, self.name, fastpath=True)
+
+ # fall back to Int64Index
+ return super_getitem(key)
+
+ @classmethod
+ def _add_numeric_methods_binary(cls):
+ """ add in numeric methods, specialized to RangeIndex """
+
+ def _make_evaluate_binop(op, opstr, reversed=False, step=False):
+ """
+ Parameters
+ ----------
+ op : callable that accepts 2 parms
+ perform the binary op
+ opstr : string
+ string name of ops
+ reversed : boolean, default False
+ if this is a reversed op, e.g. radd
+ step : callable, optional, default to False
+ op to apply to the step parm if not None
+ if False, use the existing step
+ """
+
+ def _evaluate_numeric_binop(self, other):
+
+ other = self._validate_for_numeric_binop(other, op, opstr)
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+
+ if reversed:
+ self, other = other, self
+
+ try:
+ # alppy if we have an override
+ if step:
+ rstep = step(self._step, other)
+
+ # we don't have a representable op
+ # so return a base index
+ if not is_integer(rstep) or not rstep:
+ raise ValueError
+
+ else:
+ rstep = self._step
+
+ rstart = op(self._start, other)
+ rstop = op(self._stop, other)
+
+ result = RangeIndex(rstart,
+ rstop,
+ rstep,
+ **attrs)
+
+ # for compat with numpy / Int64Index
+ # even if we can represent as a RangeIndex, return
+ # as a Float64Index if we have float-like descriptors
+ if not all([is_integer(x) for x in
+ [rstart, rstop, rstep]]):
+ result = result.astype('float64')
+
+ return result
+
+ except (ValueError, TypeError, AttributeError):
+ pass
+
+ # convert to Int64Index ops
+ if isinstance(self, RangeIndex):
+ self = self.values
+ if isinstance(other, RangeIndex):
+ other = other.values
+
+ return Index(op(self, other), **attrs)
+
+ return _evaluate_numeric_binop
+
+ cls.__add__ = cls.__radd__ = _make_evaluate_binop(
+ operator.add, '__add__')
+ cls.__sub__ = _make_evaluate_binop(operator.sub, '__sub__')
+ cls.__rsub__ = _make_evaluate_binop(
+ operator.sub, '__sub__', reversed=True)
+ cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
+ operator.mul,
+ '__mul__',
+ step=operator.mul)
+ cls.__truediv__ = _make_evaluate_binop(
+ operator.truediv,
+ '__truediv__',
+ step=operator.truediv)
+ cls.__rtruediv__ = _make_evaluate_binop(
+ operator.truediv,
+ '__truediv__',
+ reversed=True,
+ step=operator.truediv)
+ if not compat.PY3:
+ cls.__div__ = _make_evaluate_binop(
+ operator.div,
+ '__div__',
+ step=operator.div)
+ cls.__rdiv__ = _make_evaluate_binop(
+ operator.div,
+ '__div__',
+ reversed=True,
+ step=operator.div)
+
+RangeIndex._add_numeric_methods()
+RangeIndex._add_logical_methods()
+
+
class Float64Index(NumericIndex):
"""
@@ -4658,10 +5294,12 @@ def get_level_values(self, level):
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
- filled = com.take_1d(unique._values, labels, fill_value=unique._na_value)
- values = unique._simple_new(filled, self.names[num],
- freq=getattr(unique, 'freq', None),
- tz=getattr(unique, 'tz', None))
+ filled = com.take_1d(unique.values, labels,
+ fill_value=unique._na_value)
+ _simple_new = unique._simple_new
+ values = _simple_new(filled, self.names[num],
+ freq=getattr(unique, 'freq', None),
+ tz=getattr(unique, 'tz', None))
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
@@ -5740,7 +6378,7 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
- return convert_indexer(start.start,stop.stop,step)
+ return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ed5b9093681f1..73e645039506f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -110,7 +110,7 @@ class Series(base.IndexOpsMixin, strings.StringAccessorMixin, generic.NDFrame,):
index : array-like or Index (1d)
Values must be unique and hashable, same length as data. Index
object (or other iterable of same length as data) Will default to
- np.arange(len(data)) if not provided. If both a dict and index
+ RangeIndex(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
@@ -920,7 +920,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
resetted : DataFrame, or Series if drop == True
"""
if drop:
- new_index = np.arange(len(self))
+ new_index = _default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
@@ -1706,7 +1706,7 @@ def _try_kind_sort(arr):
bad = isnull(arr)
good = ~bad
- idx = np.arange(len(self))
+ idx = _default_index(len(self))
argsorted = _try_kind_sort(arr[good])
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index d5c02736a1cf5..0ba1254659540 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -49,8 +49,8 @@
from pandas.compat import u, PY3
from pandas import (
Timestamp, Period, Series, DataFrame, Panel, Panel4D,
- Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, Float64Index,
- NaT
+ Index, MultiIndex, Int64Index, RangeIndex, PeriodIndex,
+ DatetimeIndex, Float64Index, NaT
)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
@@ -273,7 +273,14 @@ def encode(obj):
tobj = type(obj)
if isinstance(obj, Index):
- if isinstance(obj, PeriodIndex):
+ if isinstance(obj, RangeIndex):
+ return {'typ': 'range_index',
+ 'klass': obj.__class__.__name__,
+ 'name': getattr(obj, 'name', None),
+ 'start': getattr(obj, '_start', None),
+ 'stop': getattr(obj, '_stop', None),
+ 'step': getattr(obj, '_step', None)}
+ elif isinstance(obj, PeriodIndex):
return {'typ': 'period_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
@@ -464,6 +471,11 @@ def decode(obj):
data = unconvert(obj['data'], dtype,
obj.get('compress'))
return globals()[obj['klass']](data, dtype=dtype, name=obj['name'])
+ elif typ == 'range_index':
+ return globals()[obj['klass']](obj['start'],
+ obj['stop'],
+ obj['step'],
+ name=obj['name'])
elif typ == 'multi_index':
dtype = dtype_for(obj['dtype'])
data = unconvert(obj['data'], dtype,
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index 5f41a803538e6..1690667ef743b 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -729,7 +729,7 @@ def test_misc_example(self):
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
-\\[right\\]: Int64Index\\(\\[0, 1\\], dtype='int64'\\)"""
+\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assertRaisesRegexp(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 61b24c858b60d..bdbcb9c0d0d3e 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -253,6 +253,7 @@ def setUp(self):
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
+ 'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index c13afb34dfb84..38f5150516551 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -10,8 +10,10 @@
import pandas
import pandas as pd
-from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
- date_range, timedelta_range, Index, DatetimeIndex, TimedeltaIndex, isnull)
+from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
+ RangeIndex, Categorical, bdate_range,
+ date_range, timedelta_range, Index, DatetimeIndex,
+ isnull)
from pandas.compat import is_platform_windows, PY3, PY35
from pandas.io.pytables import _tables, TableIterator
@@ -1619,34 +1621,51 @@ def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
- index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
- df = DataFrame(np.arange(12).reshape(3,4), columns=index)
+ index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
+ ('B', 'a'), ('B', 'b')],
+ names=['first', 'second'])
+ df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
+ expected = df.copy()
+ if isinstance(expected.index, RangeIndex):
+ expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
- store.put('df',df)
- tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
+ store.put('df', df)
+ tm.assert_frame_equal(store['df'], expected,
+ check_index_type=True,
+ check_column_type=True)
- store.put('df1',df,format='table')
- tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
+ store.put('df1', df, format='table')
+ tm.assert_frame_equal(store['df1'], expected,
+ check_index_type=True,
+ check_column_type=True)
- self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
- self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
+ self.assertRaises(ValueError, store.put, 'df2', df,
+ format='table', data_columns=['A'])
+ self.assertRaises(ValueError, store.put, 'df3', df,
+ format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
- tm.assert_frame_equal(store['df2'], concat((df,df)))
+ tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
- df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
+ df = DataFrame(np.arange(12).reshape(3, 4),
+ columns=Index(list('ABCD'), name='foo'))
+ expected = df.copy()
+ if isinstance(expected.index, RangeIndex):
+ expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
- store.put('df1',df,format='table')
- tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
+ store.put('df1', df, format='table')
+ tm.assert_frame_equal(store['df1'], expected,
+ check_index_type=True,
+ check_column_type=True)
def test_store_multiindex(self):
@@ -2478,11 +2497,6 @@ def test_backwards_compat_without_term_object(self):
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
- with assert_produces_warning(expected_warning=FutureWarning,
- check_stacklevel=False):
- result = store.select('wp', [('minor_axis', '=', ['A', 'B'])])
- expected = wp.loc[:, :, ['A', 'B']]
- assert_panel_equal(result, expected)
def test_same_name_scoping(self):
diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx
index be6e11ce70c76..892fee77eb177 100644
--- a/pandas/src/reduce.pyx
+++ b/pandas/src/reduce.pyx
@@ -179,8 +179,8 @@ cdef class SeriesBinGrouper:
if not values.flags.c_contiguous:
values = values.copy('C')
self.arr = values
- self.typ = type(series)
- self.ityp = type(series.index)
+ self.typ = series._constructor
+ self.ityp = series.index._constructor
self.index = series.index.values
self.name = getattr(series,'name',None)
@@ -306,8 +306,8 @@ cdef class SeriesGrouper:
if not values.flags.c_contiguous:
values = values.copy('C')
self.arr = values
- self.typ = type(series)
- self.ityp = type(series.index)
+ self.typ = series._constructor
+ self.ityp = series.index._constructor
self.index = series.index.values
self.name = getattr(series,'name',None)
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index a458445081be5..c5c005beeb69e 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -344,10 +344,13 @@ def test_info_memory_usage(self):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
+
# Ensure df size is as expected
+ # (cols * rows * bytes) + index size
df_size = df.memory_usage().sum()
- exp_size = (len(dtypes) + 1) * n * 8 # (cols + index) * rows * bytes
+ exp_size = len(dtypes) * n * 8 + df.index.nbytes
self.assertEqual(df_size, exp_size)
+
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) + 1 # index=True; default
self.assertEqual(size_df, np.size(df.memory_usage()))
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index d0c2d2bd15b4e..4dcc390787908 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -2,7 +2,10 @@
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta, time
-from pandas.compat import range, lrange, lzip, u, zip, PY3
+from pandas import compat
+from pandas.compat import (long, is_platform_windows, range,
+ lrange, lzip, u, zip, PY3)
+from itertools import combinations
import operator
import re
import nose
@@ -12,19 +15,18 @@
import numpy as np
from pandas import (period_range, date_range, Categorical, Series,
- Index, Float64Index, Int64Index, MultiIndex,
- CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex)
-from pandas.core.index import InvalidIndexError, NumericIndex
+ DataFrame, Index, Float64Index, Int64Index, RangeIndex,
+ MultiIndex, CategoricalIndex, DatetimeIndex,
+ TimedeltaIndex, PeriodIndex)
+from pandas.core.index import InvalidIndexError
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
-from pandas import compat
-from pandas.compat import long, is_platform_windows
+
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
-import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
@@ -90,33 +92,34 @@ def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
- lambda : idx * 1)
+ lambda: idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
- lambda : 1 * idx)
+ lambda: 1 * idx)
- div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
+ div_err = "cannot perform __truediv__" if PY3 \
+ else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
- lambda : idx / 1)
+ lambda: idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
- lambda : 1 / idx)
+ lambda: 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
- lambda : idx // 1)
+ lambda: idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
- lambda : 1 // idx)
+ lambda: 1 // idx)
def test_logical_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
'cannot perform all',
- lambda : idx.all())
+ lambda: idx.all())
tm.assertRaisesRegexp(TypeError,
'cannot perform any',
- lambda : idx.any())
+ lambda: idx.any())
def test_boolean_context_compat(self):
@@ -467,6 +470,10 @@ def test_delete_base(self):
if not len(idx):
continue
+ if isinstance(idx, RangeIndex):
+ # tested in class
+ continue
+
expected = idx[1:]
result = idx.delete(0)
self.assertTrue(result.equals(expected))
@@ -673,18 +680,19 @@ class TestIndex(Base, tm.TestCase):
def setUp(self):
self.indices = dict(
- unicodeIndex = tm.makeUnicodeIndex(100),
- strIndex = tm.makeStringIndex(100),
- dateIndex = tm.makeDateIndex(100),
- periodIndex = tm.makePeriodIndex(100),
- tdIndex = tm.makeTimedeltaIndex(100),
- intIndex = tm.makeIntIndex(100),
- floatIndex = tm.makeFloatIndex(100),
- boolIndex = Index([True,False]),
- catIndex = tm.makeCategoricalIndex(100),
- empty = Index([]),
- tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
- [1, 2, 3]))
+ unicodeIndex=tm.makeUnicodeIndex(100),
+ strIndex=tm.makeStringIndex(100),
+ dateIndex=tm.makeDateIndex(100),
+ periodIndex=tm.makePeriodIndex(100),
+ tdIndex=tm.makeTimedeltaIndex(100),
+ intIndex=tm.makeIntIndex(100),
+ rangeIndex=tm.makeIntIndex(100),
+ floatIndex=tm.makeFloatIndex(100),
+ boolIndex=Index([True, False]),
+ catIndex=tm.makeCategoricalIndex(100),
+ empty=Index([]),
+ tuples=MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
+ [1, 2, 3]))
)
self.setup_indices()
@@ -1065,7 +1073,6 @@ def test_empty_fancy(self):
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
- values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
@@ -2382,18 +2389,18 @@ def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
- tm.assert_index_equal(eval(repr(ci)),ci,exact=True)
+ tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
- if compat.PY3:
+ if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
- ci = CategoricalIndex(np.random.randint(0,5,size=100))
- if compat.PY3:
+ ci = CategoricalIndex(np.random.randint(0, 5, size=100))
+ if PY3:
str(ci)
else:
compat.text_type(ci)
@@ -2636,7 +2643,8 @@ def test_fillna_categorical(self):
self.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
- with tm.assertRaisesRegexp(ValueError, 'fill value must be in categories'):
+ with tm.assertRaisesRegexp(ValueError,
+ 'fill value must be in categories'):
idx.fillna(2.0)
@@ -2644,42 +2652,56 @@ class Numeric(Base):
def test_numeric_compat(self):
- idx = self._holder(np.arange(5,dtype='int64'))
- didx = self._holder(np.arange(5,dtype='int64')**2
- )
+ idx = self.create_index()
+ didx = idx * idx
+
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
- result = idx * idx
- tm.assert_index_equal(result, didx)
+ # in general not true for RangeIndex
+ if not isinstance(idx, RangeIndex):
+ result = idx * idx
+ tm.assert_index_equal(result, idx ** 2)
+ # truediv under PY3
result = idx / 1
- tm.assert_index_equal(result, idx)
+ expected = idx
+ if PY3:
+ expected = expected.astype('float64')
+ tm.assert_index_equal(result, expected)
+
+ result = idx / 2
+ if PY3:
+ expected = expected.astype('float64')
+ expected = Index(idx.values / 2)
+ tm.assert_index_equal(result, expected)
result = idx // 1
tm.assert_index_equal(result, idx)
- result = idx * np.array(5,dtype='int64')
- tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
+ result = idx * np.array(5, dtype='int64')
+ tm.assert_index_equal(result, idx * 5)
- result = idx * np.arange(5,dtype='int64')
+ result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
- result = idx * Series(np.arange(5,dtype='int64'))
+ result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
- result = idx * Series(np.arange(5,dtype='float64')+0.1)
- tm.assert_index_equal(result,
- Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
+ result = idx * Series(np.arange(5, dtype='float64') + 0.1)
+ expected = Float64Index(np.arange(5, dtype='float64') * (
+ np.arange(5, dtype='float64') + 0.1))
+ tm.assert_index_equal(result, expected)
# invalid
- self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
- self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
- self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
-
+ self.assertRaises(TypeError, lambda: idx * date_range('20130101',
+ periods=5)
+ )
+ self.assertRaises(ValueError, lambda: idx * idx[0:3])
+ self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_explicit_conversions(self):
@@ -2942,11 +2964,11 @@ def test_fillna_float64(self):
self.assert_index_equal(idx.fillna(0.1), exp)
# downcast
- exp = Int64Index([1, 2, 3], name='x')
+ exp = Float64Index([1.0, 2.0, 3.0], name='x')
self.assert_index_equal(idx.fillna(2), exp)
# object
- exp = Index([1, 'obj', 3], name='x')
+ exp = Index([1.0, 'obj', 3.0], name='x')
self.assert_index_equal(idx.fillna('obj'), exp)
@@ -3358,7 +3380,6 @@ def test_take_preserve_name(self):
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
- from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
@@ -3382,14 +3403,14 @@ def test_repr_roundtrip(self):
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
- if compat.PY3:
+ if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
- if compat.PY3:
+ if PY3:
bytes(idx)
else:
str(idx)
@@ -3399,40 +3420,791 @@ def test_slice_keep_name(self):
self.assertEqual(idx.name, idx[1:].name)
def test_ufunc_coercions(self):
- idx = pd.Int64Index([1, 2, 3, 4, 5], name='x')
+ idx = Int64Index([1, 2, 3, 4, 5], name='x')
result = np.sqrt(idx)
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')
+ exp = Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')
tm.assert_index_equal(result, exp)
result = np.divide(idx, 2.)
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
+ exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
tm.assert_index_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index([3., 4., 5., 6., 7.], name='x')
+ exp = Float64Index([3., 4., 5., 6., 7.], name='x')
tm.assert_index_equal(result, exp)
result = idx - 2.
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index([-1., 0., 1., 2., 3.], name='x')
+ exp = Float64Index([-1., 0., 1., 2., 3.], name='x')
tm.assert_index_equal(result, exp)
result = idx * 1.
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index([1., 2., 3., 4., 5.], name='x')
+ exp = Float64Index([1., 2., 3., 4., 5.], name='x')
tm.assert_index_equal(result, exp)
result = idx / 2.
tm.assertIsInstance(result, Float64Index)
- exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
+ exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
tm.assert_index_equal(result, exp)
+class TestRangeIndex(Numeric, tm.TestCase):
+ _holder = RangeIndex
+ _compat_props = ['shape', 'ndim', 'size', 'itemsize']
+
+ def setUp(self):
+ self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'))
+ self.setup_indices()
+
+ def create_index(self):
+ return RangeIndex(5)
+
+ def test_binops(self):
+ ops = [operator.add, operator.sub, operator.mul,
+ operator.floordiv, operator.truediv, pow]
+ scalars = [-1, 1, 2]
+ idxs = [RangeIndex(0, 10, 1),
+ RangeIndex(0, 20, 2),
+ RangeIndex(-10, 10, 2),
+ RangeIndex(5, -5, -1)]
+ for op in ops:
+ for a, b in combinations(idxs, 2):
+ result = op(a, b)
+ expected = op(Int64Index(a), Int64Index(b))
+ tm.assert_index_equal(result, expected)
+ for idx in idxs:
+ for scalar in scalars:
+ result = op(idx, scalar)
+ expected = op(Int64Index(idx), scalar)
+ tm.assert_index_equal(result, expected)
+
+ def test_too_many_names(self):
+ def testit():
+ self.index.names = ["roger", "harold"]
+ assertRaisesRegexp(ValueError, "^Length", testit)
+
+ def test_constructor(self):
+ index = RangeIndex(5)
+ expected = np.arange(5, dtype=np.int64)
+ self.assertIsInstance(index, RangeIndex)
+ self.assertEqual(index._start, 0)
+ self.assertEqual(index._stop, 5)
+ self.assertEqual(index._step, 1)
+ self.assertEqual(index.name, None)
+ tm.assert_index_equal(Index(expected), index)
+
+ index = RangeIndex(1, 5)
+ expected = np.arange(1, 5, dtype=np.int64)
+ self.assertIsInstance(index, RangeIndex)
+ self.assertEqual(index._start, 1)
+ tm.assert_index_equal(Index(expected), index)
+
+ index = RangeIndex(1, 5, 2)
+ expected = np.arange(1, 5, 2, dtype=np.int64)
+ self.assertIsInstance(index, RangeIndex)
+ self.assertEqual(index._step, 2)
+ tm.assert_index_equal(Index(expected), index)
+
+ index = RangeIndex()
+ expected = np.empty(0, dtype=np.int64)
+ self.assertIsInstance(index, RangeIndex)
+ self.assertEqual(index._start, 0)
+ self.assertEqual(index._stop, 0)
+ self.assertEqual(index._step, 1)
+ tm.assert_index_equal(Index(expected), index)
+
+ index = RangeIndex(name='Foo')
+ self.assertIsInstance(index, RangeIndex)
+ self.assertEqual(index.name, 'Foo')
+
+ # we don't allow on a bare Index
+ self.assertRaises(TypeError, lambda: Index(0, 1000))
+
+ # invalid args
+ for i in [Index(['a', 'b']),
+ Series(['a', 'b']),
+ np.array(['a', 'b']),
+ [],
+ 'foo',
+ datetime(2000, 1, 1, 0, 0),
+ np.arange(0, 10)]:
+ self.assertRaises(TypeError, lambda: RangeIndex(i))
+
+ def test_constructor_same(self):
+
+ # pass thru w and w/o copy
+ index = RangeIndex(1, 5, 2)
+ result = RangeIndex(index, copy=False)
+ self.assertTrue(result.identical(index))
+
+ result = RangeIndex(index, copy=True)
+ self.assertTrue(result.equals(index))
+
+ result = RangeIndex(index)
+ self.assertTrue(result.equals(index))
+
+ self.assertRaises(TypeError,
+ lambda: RangeIndex(index, dtype='float64'))
+
+ def test_constructor_range(self):
+
+ self.assertRaises(TypeError, lambda: RangeIndex(range(1, 5, 2)))
+
+ result = RangeIndex.from_range(range(1, 5, 2))
+ expected = RangeIndex(1, 5, 2)
+ self.assertTrue(result.equals(expected))
+
+ result = RangeIndex.from_range(range(5, 6))
+ expected = RangeIndex(5, 6, 1)
+ self.assertTrue(result.equals(expected))
+
+ # an invalid range
+ result = RangeIndex.from_range(range(5, 1))
+ expected = RangeIndex(0, 0, 1)
+ self.assertTrue(result.equals(expected))
+
+ result = RangeIndex.from_range(range(5))
+ expected = RangeIndex(0, 5, 1)
+ self.assertTrue(result.equals(expected))
+
+ result = Index(range(1, 5, 2))
+ expected = RangeIndex(1, 5, 2)
+ self.assertTrue(result.equals(expected))
+
+ self.assertRaises(TypeError,
+ lambda: Index(range(1, 5, 2), dtype='float64'))
+
+ def test_numeric_compat2(self):
+ # validate that we are handling the RangeIndex overrides to numeric ops
+ # and returning RangeIndex where possible
+
+ idx = RangeIndex(0, 10, 2)
+
+ result = idx * 2
+ expected = RangeIndex(0, 20, 4)
+ self.assertTrue(result.equals(expected))
+
+ result = idx + 2
+ expected = RangeIndex(2, 12, 2)
+ self.assertTrue(result.equals(expected))
+
+ result = idx - 2
+ expected = RangeIndex(-2, 8, 2)
+ self.assertTrue(result.equals(expected))
+
+ # truediv under PY3
+ result = idx / 2
+ if PY3:
+ expected = RangeIndex(0, 5, 1)
+ else:
+ expected = RangeIndex(0, 5, 1).astype('float64')
+ self.assertTrue(result.equals(expected))
+
+ result = idx / 4
+ expected = RangeIndex(0, 10, 2).values / 4
+ self.assertTrue(result.equals(expected))
+
+ result = idx // 1
+ expected = idx._int64index // 1
+ tm.assert_index_equal(result, expected, exact=True)
+
+ # __mul__
+ result = idx * idx
+ expected = Index(idx.values * idx.values)
+ tm.assert_index_equal(result, expected, exact=True)
+
+ # __pow__
+ idx = RangeIndex(0, 1000, 2)
+ result = idx ** 2
+ expected = idx._int64index ** 2
+ tm.assert_index_equal(Index(result.values), expected, exact=True)
+
+ # __floordiv__
+ idx = RangeIndex(0, 1000, 2)
+ result = idx // 2
+ expected = idx._int64index // 2
+ tm.assert_index_equal(result, expected, exact=True)
+
+ idx = RangeIndex(0, 1000, 1)
+ result = idx // 2
+ expected = idx._int64index // 2
+ tm.assert_index_equal(result, expected, exact=True)
+
+ def test_constructor_corner(self):
+ arr = np.array([1, 2, 3, 4], dtype=object)
+ index = RangeIndex(1, 5)
+ self.assertEqual(index.values.dtype, np.int64)
+ self.assertTrue(index.equals(arr))
+
+ # non-int raise Exception
+ self.assertRaises(TypeError, RangeIndex, '1', '10', '1')
+ self.assertRaises(TypeError, RangeIndex, 1.1, 10.2, 1.3)
+
+ # invalid passed type
+ self.assertRaises(TypeError,
+ lambda: RangeIndex(1, 5, dtype='float64'))
+
+ def test_copy(self):
+ i = RangeIndex(5, name='Foo')
+ i_copy = i.copy()
+ self.assertTrue(i_copy is not i)
+ self.assertTrue(i_copy.identical(i))
+ self.assertEqual(i_copy._start, 0)
+ self.assertEqual(i_copy._stop, 5)
+ self.assertEqual(i_copy._step, 1)
+ self.assertEqual(i_copy.name, 'Foo')
+
+ def test_repr(self):
+ i = RangeIndex(5, name='Foo')
+ result = repr(i)
+ if PY3:
+ expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
+ else:
+ expected = "RangeIndex(start=0, stop=5, step=1, name=u'Foo')"
+ self.assertTrue(result, expected)
+
+ result = eval(result)
+ self.assertTrue(result.equals(i))
+
+ i = RangeIndex(5, 0, -1)
+ result = repr(i)
+ expected = "RangeIndex(start=5, stop=0, step=-1)"
+ self.assertEqual(result, expected)
+
+ result = eval(result)
+ self.assertTrue(result.equals(i))
+
+ def test_insert(self):
+
+ idx = RangeIndex(5, name='Foo')
+ result = idx[1:4]
+
+ # test 0th element
+ self.assertTrue(idx[0:4].equals(
+ result.insert(0, idx[0])))
+
+ def test_delete(self):
+
+ idx = RangeIndex(5, name='Foo')
+ expected = idx[1:].astype(int)
+ result = idx.delete(0)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+
+ expected = idx[:-1].astype(int)
+ result = idx.delete(-1)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+
+ with tm.assertRaises((IndexError, ValueError)):
+ # either depending on numpy version
+ result = idx.delete(len(idx))
+
+ def test_view(self):
+ super(TestRangeIndex, self).test_view()
+
+ i = RangeIndex(name='Foo')
+ i_view = i.view()
+ self.assertEqual(i_view.name, 'Foo')
+
+ i_view = i.view('i8')
+ tm.assert_numpy_array_equal(i, i_view)
+
+ i_view = i.view(RangeIndex)
+ tm.assert_index_equal(i, i_view)
+
+ def test_dtype(self):
+ self.assertEqual(self.index.dtype, np.int64)
+
+ def test_is_monotonic(self):
+ self.assertTrue(self.index.is_monotonic)
+ self.assertTrue(self.index.is_monotonic_increasing)
+ self.assertFalse(self.index.is_monotonic_decreasing)
+
+ index = RangeIndex(4, 0, -1)
+ self.assertFalse(index.is_monotonic)
+ self.assertTrue(index.is_monotonic_decreasing)
+
+ index = RangeIndex(1, 2)
+ self.assertTrue(index.is_monotonic)
+ self.assertTrue(index.is_monotonic_increasing)
+ self.assertTrue(index.is_monotonic_decreasing)
+
+ def test_equals(self):
+
+ equiv_pairs = [(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
+ (RangeIndex(0), RangeIndex(1, -1, 3)),
+ (RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
+ (RangeIndex(0, -9, -2), RangeIndex(0, -10, -2))]
+ for left, right in equiv_pairs:
+ self.assertTrue(left.equals(right))
+ self.assertTrue(right.equals(left))
+
+ def test_logical_compat(self):
+ idx = self.create_index()
+ self.assertEqual(idx.all(), idx.values.all())
+ self.assertEqual(idx.any(), idx.values.any())
+
+ def test_identical(self):
+ i = Index(self.index.copy())
+ self.assertTrue(i.identical(self.index))
+
+ # we don't allow object dtype for RangeIndex
+ if isinstance(self.index, RangeIndex):
+ return
+
+ same_values_different_type = Index(i, dtype=object)
+ self.assertFalse(i.identical(same_values_different_type))
+
+ i = self.index.copy(dtype=object)
+ i = i.rename('foo')
+ same_values = Index(i, dtype=object)
+ self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
+
+ self.assertFalse(i.identical(self.index))
+ self.assertTrue(Index(same_values, name='foo', dtype=object
+ ).identical(i))
+
+ self.assertFalse(
+ self.index.copy(dtype=object)
+ .identical(self.index.copy(dtype='int64')))
+
+ def test_get_indexer(self):
+ target = RangeIndex(10)
+ indexer = self.index.get_indexer(target)
+ expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
+ self.assert_numpy_array_equal(indexer, expected)
+
+ def test_get_indexer_pad(self):
+ target = RangeIndex(10)
+ indexer = self.index.get_indexer(target, method='pad')
+ expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
+ self.assert_numpy_array_equal(indexer, expected)
+
+ def test_get_indexer_backfill(self):
+ target = RangeIndex(10)
+ indexer = self.index.get_indexer(target, method='backfill')
+ expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
+ self.assert_numpy_array_equal(indexer, expected)
+
+ def test_join_outer(self):
+ # join with Int64Index
+ other = Int64Index(np.arange(25, 14, -1))
+
+ res, lidx, ridx = self.index.join(other, how='outer',
+ return_indexers=True)
+ noidx_res = self.index.join(other, how='outer')
+ self.assertTrue(res.equals(noidx_res))
+
+ eres = Int64Index([0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25])
+ elidx = np.array([0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9,
+ -1, -1, -1, -1, -1, -1, -1], dtype=np.int64)
+ eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1, 0], dtype=np.int64)
+
+ self.assertIsInstance(res, Int64Index)
+ self.assertFalse(isinstance(res, RangeIndex))
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ # join with RangeIndex
+ other = RangeIndex(25, 14, -1)
+
+ res, lidx, ridx = self.index.join(other, how='outer',
+ return_indexers=True)
+ noidx_res = self.index.join(other, how='outer')
+ self.assertTrue(res.equals(noidx_res))
+
+ self.assertIsInstance(res, Int64Index)
+ self.assertFalse(isinstance(res, RangeIndex))
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ def test_join_inner(self):
+ # Join with non-RangeIndex
+ other = Int64Index(np.arange(25, 14, -1))
+
+ res, lidx, ridx = self.index.join(other, how='inner',
+ return_indexers=True)
+
+ # no guarantee of sortedness, so sort for comparison purposes
+ ind = res.argsort()
+ res = res.take(ind)
+ lidx = lidx.take(ind)
+ ridx = ridx.take(ind)
+
+ eres = Int64Index([16, 18])
+ elidx = np.array([8, 9])
+ eridx = np.array([9, 7])
+
+ self.assertIsInstance(res, Int64Index)
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ # Join two RangeIndex
+ other = RangeIndex(25, 14, -1)
+
+ res, lidx, ridx = self.index.join(other, how='inner',
+ return_indexers=True)
+
+ self.assertIsInstance(res, RangeIndex)
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ def test_join_left(self):
+ # Join with Int64Index
+ other = Int64Index(np.arange(25, 14, -1))
+
+ res, lidx, ridx = self.index.join(other, how='left',
+ return_indexers=True)
+ eres = self.index
+ eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7],
+ dtype=np.int64)
+
+ self.assertIsInstance(res, RangeIndex)
+ self.assertTrue(res.equals(eres))
+ self.assertIsNone(lidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ # Join withRangeIndex
+ other = Int64Index(np.arange(25, 14, -1))
+
+ res, lidx, ridx = self.index.join(other, how='left',
+ return_indexers=True)
+
+ self.assertIsInstance(res, RangeIndex)
+ self.assertTrue(res.equals(eres))
+ self.assertIsNone(lidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ def test_join_right(self):
+ # Join with Int64Index
+ other = Int64Index(np.arange(25, 14, -1))
+
+ res, lidx, ridx = self.index.join(other, how='right',
+ return_indexers=True)
+ eres = other
+ elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1],
+ dtype=np.int64)
+
+ self.assertIsInstance(other, Int64Index)
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assertIsNone(ridx)
+
+ # Join withRangeIndex
+ other = RangeIndex(25, 14, -1)
+
+ res, lidx, ridx = self.index.join(other, how='right',
+ return_indexers=True)
+ eres = other
+
+ self.assertIsInstance(other, RangeIndex)
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assertIsNone(ridx)
+
+ def test_join_non_int_index(self):
+ other = Index([3, 6, 7, 8, 10], dtype=object)
+
+ outer = self.index.join(other, how='outer')
+ outer2 = other.join(self.index, how='outer')
+ expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
+ 16, 18], dtype=object)
+ self.assertTrue(outer.equals(outer2))
+ self.assertTrue(outer.equals(expected))
+
+ inner = self.index.join(other, how='inner')
+ inner2 = other.join(self.index, how='inner')
+ expected = Index([6, 8, 10], dtype=object)
+ self.assertTrue(inner.equals(inner2))
+ self.assertTrue(inner.equals(expected))
+
+ left = self.index.join(other, how='left')
+ self.assertTrue(left.equals(self.index))
+
+ left2 = other.join(self.index, how='left')
+ self.assertTrue(left2.equals(other))
+
+ right = self.index.join(other, how='right')
+ self.assertTrue(right.equals(other))
+
+ right2 = other.join(self.index, how='right')
+ self.assertTrue(right2.equals(self.index))
+
+ def test_join_non_unique(self):
+ other = Index([4, 4, 3, 3])
+
+ res, lidx, ridx = self.index.join(other, return_indexers=True)
+
+ eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
+ elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64)
+ eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1],
+ dtype=np.int64)
+
+ self.assertTrue(res.equals(eres))
+ self.assert_numpy_array_equal(lidx, elidx)
+ self.assert_numpy_array_equal(ridx, eridx)
+
+ def test_join_self(self):
+ kinds = 'outer', 'inner', 'left', 'right'
+ for kind in kinds:
+ joined = self.index.join(self.index, how=kind)
+ self.assertIs(self.index, joined)
+
+ def test_intersection(self):
+ # intersect with Int64Index
+ other = Index(np.arange(1, 6))
+ result = self.index.intersection(other)
+ expected = np.sort(np.intersect1d(self.index.values, other.values))
+ self.assert_numpy_array_equal(result, expected)
+
+ result = other.intersection(self.index)
+ expected = np.sort(np.asarray(np.intersect1d(self.index.values,
+ other.values)))
+ self.assert_numpy_array_equal(result, expected)
+
+ # intersect with increasing RangeIndex
+ other = RangeIndex(1, 6)
+ result = self.index.intersection(other)
+ expected = np.sort(np.intersect1d(self.index.values, other.values))
+ self.assert_numpy_array_equal(result, expected)
+
+ # intersect with decreasing RangeIndex
+ other = RangeIndex(5, 0, -1)
+ result = self.index.intersection(other)
+ expected = np.sort(np.intersect1d(self.index.values, other.values))
+ self.assert_numpy_array_equal(result, expected)
+
+ def test_intersect_str_dates(self):
+ dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
+
+ i1 = Index(dt_dates, dtype=object)
+ i2 = Index(['aa'], dtype=object)
+ res = i2.intersection(i1)
+
+ self.assertEqual(len(res), 0)
+
+ def test_union_noncomparable(self):
+ from datetime import datetime, timedelta
+ # corner case, non-Int64Index
+ now = datetime.now()
+ other = Index([now + timedelta(i) for i in range(4)], dtype=object)
+ result = self.index.union(other)
+ expected = np.concatenate((self.index, other))
+ self.assert_numpy_array_equal(result, expected)
+
+ result = other.union(self.index)
+ expected = np.concatenate((other, self.index))
+ self.assert_numpy_array_equal(result, expected)
+
+ def test_nbytes(self):
+
+ # memory savings vs int index
+ i = RangeIndex(0, 1000)
+ self.assertTrue(i.nbytes < i.astype(int).nbytes / 10)
+
+ # constant memory usage
+ i2 = RangeIndex(0, 10)
+ self.assertEqual(i.nbytes, i2.nbytes)
+
+ def test_cant_or_shouldnt_cast(self):
+ # can't
+ self.assertRaises(TypeError, RangeIndex, 'foo', 'bar', 'baz')
+
+ # shouldn't
+ self.assertRaises(TypeError, RangeIndex, '0', '1', '2')
+
+ def test_view_Index(self):
+ self.index.view(Index)
+
+ def test_prevent_casting(self):
+ result = self.index.astype('O')
+ self.assertEqual(result.dtype, np.object_)
+
+ def test_take_preserve_name(self):
+ index = RangeIndex(1, 5, name='foo')
+ taken = index.take([3, 0, 1])
+ self.assertEqual(index.name, taken.name)
+
+ def test_print_unicode_columns(self):
+ df = pd.DataFrame(
+ {u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
+ repr(df.columns) # should not raise UnicodeDecodeError
+
+ def test_repr_roundtrip(self):
+ tm.assert_index_equal(eval(repr(self.index)), self.index)
+
+ def test_slice_keep_name(self):
+ idx = RangeIndex(1, 2, name='asdf')
+ self.assertEqual(idx.name, idx[1:].name)
+
+ def test_explicit_conversions(self):
+
+ # GH 8608
+ # add/sub are overriden explicity for Float/Int Index
+ idx = RangeIndex(5)
+
+ # float conversions
+ arr = np.arange(5, dtype='int64') * 3.2
+ expected = Float64Index(arr)
+ fidx = idx * 3.2
+ tm.assert_index_equal(fidx, expected)
+ fidx = 3.2 * idx
+ tm.assert_index_equal(fidx, expected)
+
+ # interops with numpy arrays
+ expected = Float64Index(arr)
+ a = np.zeros(5, dtype='float64')
+ result = fidx - a
+ tm.assert_index_equal(result, expected)
+
+ expected = Float64Index(-arr)
+ a = np.zeros(5, dtype='float64')
+ result = a - fidx
+ tm.assert_index_equal(result, expected)
+
+ def test_duplicates(self):
+ for ind in self.indices:
+ if not len(ind):
+ continue
+ idx = self.indices[ind]
+ self.assertTrue(idx.is_unique)
+ self.assertFalse(idx.has_duplicates)
+
+ def test_ufunc_compat(self):
+ idx = RangeIndex(5)
+ result = np.sin(idx)
+ expected = Float64Index(np.sin(np.arange(5, dtype='int64')))
+ tm.assert_index_equal(result, expected)
+
+ def test_extended_gcd(self):
+ result = self.index._extended_gcd(6, 10)
+ self.assertEqual(result[0], result[1] * 6 + result[2] * 10)
+ self.assertEqual(2, result[0])
+
+ result = self.index._extended_gcd(10, 6)
+ self.assertEqual(2, result[1] * 10 + result[2] * 6)
+ self.assertEqual(2, result[0])
+
+ def test_min_fitting_element(self):
+ result = RangeIndex(0, 20, 2)._min_fitting_element(1)
+ self.assertEqual(2, result)
+
+ result = RangeIndex(1, 6)._min_fitting_element(1)
+ self.assertEqual(1, result)
+
+ result = RangeIndex(18, -2, -2)._min_fitting_element(1)
+ self.assertEqual(2, result)
+
+ result = RangeIndex(5, 0, -1)._min_fitting_element(1)
+ self.assertEqual(1, result)
+
+ def test_max_fitting_element(self):
+ result = RangeIndex(0, 20, 2)._max_fitting_element(17)
+ self.assertEqual(16, result)
+
+ result = RangeIndex(1, 6)._max_fitting_element(4)
+ self.assertEqual(4, result)
+
+ result = RangeIndex(18, -2, -2)._max_fitting_element(17)
+ self.assertEqual(16, result)
+
+ result = RangeIndex(5, 0, -1)._max_fitting_element(4)
+ self.assertEqual(4, result)
+
+ def test_pickle_compat_construction(self):
+ # RangeIndex() is a valid constructor
+ pass
+
+ def test_slice_specialised(self):
+
+ # scalar indexing
+ res = self.index[1]
+ expected = 2
+ self.assertEqual(res, expected)
+
+ res = self.index[-1]
+ expected = 18
+ self.assertEqual(res, expected)
+
+ # slicing
+ # slice value completion
+ index = self.index[:]
+ expected = self.index
+ self.assert_numpy_array_equal(index, expected)
+
+ # positive slice values
+ index = self.index[7:10:2]
+ expected = np.array([14, 18])
+ self.assert_numpy_array_equal(index, expected)
+
+ # negative slice values
+ index = self.index[-1:-5:-2]
+ expected = np.array([18, 14])
+ self.assert_numpy_array_equal(index, expected)
+
+ # stop overshoot
+ index = self.index[2:100:4]
+ expected = np.array([4, 12])
+ self.assert_numpy_array_equal(index, expected)
+
+ # reverse
+ index = self.index[::-1]
+ expected = self.index.values[::-1]
+ self.assert_numpy_array_equal(index, expected)
+
+ index = self.index[-8::-1]
+ expected = np.array([4, 2, 0])
+ self.assert_numpy_array_equal(index, expected)
+
+ index = self.index[-40::-1]
+ expected = np.array([])
+ self.assert_numpy_array_equal(index, expected)
+
+ index = self.index[40::-1]
+ expected = self.index.values[40::-1]
+ self.assert_numpy_array_equal(index, expected)
+
+ index = self.index[10::-1]
+ expected = self.index.values[::-1]
+ self.assert_numpy_array_equal(index, expected)
+
+ def test_len_specialised(self):
+
+ # make sure that our len is the same as
+ # np.arange calc
+
+ for step in np.arange(1, 6, 1):
+
+ arr = np.arange(0, 5, step)
+ i = RangeIndex(0, 5, step)
+ self.assertEqual(len(i), len(arr))
+
+ i = RangeIndex(5, 0, step)
+ self.assertEqual(len(i), 0)
+
+ for step in np.arange(-6, -1, 1):
+
+ arr = np.arange(5, 0, step)
+ i = RangeIndex(5, 0, step)
+ self.assertEqual(len(i), len(arr))
+
+ i = RangeIndex(0, 5, step)
+ self.assertEqual(len(i), 0)
+
+
class DatetimeLike(Base):
def test_shift_identity(self):
@@ -4122,24 +4894,25 @@ def test_numeric_compat(self):
result = idx // 1
tm.assert_index_equal(result, idx)
- result = idx * np.array(5,dtype='int64')
- tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
+ result = idx * np.array(5, dtype='int64')
+ tm.assert_index_equal(result,
+ self._holder(np.arange(5, dtype='int64') * 5))
- result = idx * np.arange(5,dtype='int64')
+ result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
- result = idx * Series(np.arange(5,dtype='int64'))
+ result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
- result = idx * Series(np.arange(5,dtype='float64')+0.1)
+ result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result,
- Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
-
+ self._holder(np.arange(5, dtype='float64') * (
+ np.arange(5, dtype='float64') + 0.1)))
# invalid
- self.assertRaises(TypeError, lambda : idx * idx)
- self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
- self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
+ self.assertRaises(TypeError, lambda: idx * idx)
+ self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
+ self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
@@ -4842,8 +5615,9 @@ def test_iter(self):
self.assertEqual(result, expected)
def test_legacy_pickle(self):
- if compat.PY3:
- raise nose.SkipTest("testing for legacy pickles not support on py3")
+ if PY3:
+ raise nose.SkipTest("testing for legacy pickles not "
+ "support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
@@ -5926,10 +6700,11 @@ def test_repr_with_unicode_data(self):
def test_repr_roundtrip(self):
- mi = MultiIndex.from_product([list('ab'),range(3)],names=['first','second'])
+ mi = MultiIndex.from_product([list('ab'), range(3)],
+ names=['first', 'second'])
str(mi)
- if compat.PY3:
+ if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
@@ -5943,16 +6718,17 @@ def test_repr_roundtrip(self):
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
- if compat.PY3:
+ if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
- mi = MultiIndex.from_product([list('abcdefg'),range(10)],names=['first','second'])
+ mi = MultiIndex.from_product([list('abcdefg'), range(10)],
+ names=['first', 'second'])
result = str(mi)
- if compat.PY3:
+ if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
@@ -5973,7 +6749,7 @@ def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
- if compat.PY3:
+ if PY3:
str(idx)
else:
compat.text_type(idx)
@@ -5982,7 +6758,7 @@ def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
- if compat.PY3:
+ if PY3:
bytes(idx)
else:
str(idx)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index c6d80a08ad61a..5c3e4c01a965a 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -4352,25 +4352,29 @@ def check_invalid(index, loc=None, iloc=None, ix=None, getitem=None):
# related 236/4850
# trying to access with a float index
- s = Series(np.arange(len(index)),index=index)
+ s = Series(np.arange(len(index)), index=index)
if iloc is None:
iloc = TypeError
- self.assertRaises(iloc, lambda : s.iloc[3.5])
+ self.assertRaises(iloc, lambda: s.iloc[3.5])
if loc is None:
loc = TypeError
- self.assertRaises(loc, lambda : s.loc[3.5])
+ self.assertRaises(loc, lambda: s.loc[3.5])
if ix is None:
ix = TypeError
- self.assertRaises(ix, lambda : s.ix[3.5])
+ self.assertRaises(ix, lambda: s.ix[3.5])
if getitem is None:
getitem = TypeError
- self.assertRaises(getitem, lambda : s[3.5])
+ self.assertRaises(getitem, lambda: s[3.5])
- for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
- tm.makeDateIndex, tm.makePeriodIndex ]:
- check_invalid(index())
- check_invalid(Index(np.arange(5) * 2.5),loc=KeyError, ix=KeyError, getitem=KeyError)
+ for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
+ tm.makeIntIndex, tm.makeRangeIndex,
+ tm.makeDateIndex, tm.makePeriodIndex]:
+ check_invalid(index())
+ check_invalid(Index(np.arange(5) * 2.5),
+ loc=KeyError,
+ ix=KeyError,
+ getitem=KeyError)
def check_index(index, error):
index = index()
@@ -4472,37 +4476,38 @@ def check_slicing_positional(index):
############
# IntIndex #
############
- index = tm.makeIntIndex()
- s = Series(np.arange(len(index),dtype='int64')+10,index+5)
+ for index in [tm.makeIntIndex(), tm.makeRangeIndex()]:
- # this is positional
- result1 = s[2:5]
- result4 = s.iloc[2:5]
- assert_series_equal(result1, result4)
+ s = Series(np.arange(len(index), dtype='int64') + 10, index + 5)
- # these are all label based
- result2 = s.ix[2:5]
- result3 = s.loc[2:5]
- assert_series_equal(result2, result3)
+ # this is positional
+ result1 = s[2:5]
+ result4 = s.iloc[2:5]
+ assert_series_equal(result1, result4)
+
+ # these are all label based
+ result2 = s.ix[2:5]
+ result3 = s.loc[2:5]
+ assert_series_equal(result2, result3)
- # float slicers on an int index
- expected = Series([11,12,13],index=[6,7,8])
- for method in [lambda x: x.loc, lambda x: x.ix]:
- result = method(s)[6.0:8.5]
- assert_series_equal(result, expected)
+ # float slicers on an int index
+ expected = Series([11, 12, 13], index=[6, 7, 8])
+ for method in [lambda x: x.loc, lambda x: x.ix]:
+ result = method(s)[6.0:8.5]
+ assert_series_equal(result, expected)
- result = method(s)[5.5:8.5]
- assert_series_equal(result, expected)
+ result = method(s)[5.5:8.5]
+ assert_series_equal(result, expected)
- result = method(s)[5.5:8.0]
- assert_series_equal(result, expected)
+ result = method(s)[5.5:8.0]
+ assert_series_equal(result, expected)
- # make all float slicing fail for [] with an int index
- self.assertRaises(TypeError, lambda : s[6.0:8])
- self.assertRaises(TypeError, lambda : s[6.0:8.0])
- self.assertRaises(TypeError, lambda : s[6:8.0])
+ # make all float slicing fail for [] with an int index
+ self.assertRaises(TypeError, lambda: s[6.0:8])
+ self.assertRaises(TypeError, lambda: s[6.0:8.0])
+ self.assertRaises(TypeError, lambda: s[6:8.0])
- check_iloc_compat(s)
+ check_iloc_compat(s)
##############
# FloatIndex #
@@ -4658,19 +4663,20 @@ def f():
self.assertRaises(FutureWarning, f)
# slices
- for index in [ tm.makeIntIndex, tm.makeFloatIndex,
- tm.makeStringIndex, tm.makeUnicodeIndex,
- tm.makeDateIndex, tm.makePeriodIndex ]:
+ for index in [tm.makeIntIndex, tm.makeRangeIndex, tm.makeFloatIndex,
+ tm.makeStringIndex, tm.makeUnicodeIndex,
+ tm.makeDateIndex, tm.makePeriodIndex]:
index = index(5)
- for s in [ Series(range(5),index=index), DataFrame(np.random.randn(5,2),index=index) ]:
+ for s in [Series(range(5), index=index),
+ DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
- self.assertRaises(FutureWarning, lambda :
+ self.assertRaises(FutureWarning, lambda:
s.iloc[3.0:4])
- self.assertRaises(FutureWarning, lambda :
+ self.assertRaises(FutureWarning, lambda:
s.iloc[3.0:4.0])
- self.assertRaises(FutureWarning, lambda :
+ self.assertRaises(FutureWarning, lambda:
s.iloc[3:4.0])
# setitem
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index d37ac530d02e8..a2b1a84e78f22 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -826,6 +826,9 @@ def test_constructor(self):
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
+
+ # the are Index() and RangeIndex() which don't compare type equal
+ # but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
@@ -1226,7 +1229,7 @@ def test_constructor_dict(self):
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
- result, expected, check_dtype=True, check_index_type=True,
+ result, expected, check_dtype=True,
check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
@@ -7418,6 +7421,7 @@ def test_reindex_nan(self):
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
+
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 0013a6579718a..269d272525ce6 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1324,7 +1324,7 @@ def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
- tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
@@ -1393,7 +1393,7 @@ def test_split_to_dataframe_expand(self):
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
- exp = Index([np.array(['nosplit']), np.array(['alsonosplit'])])
+ exp = idx
tm.assert_index_equal(result, exp)
self.assertEqual(result.nlevels, 1)
@@ -1446,7 +1446,7 @@ def test_rsplit_to_dataframe_expand(self):
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
- exp = Index([np.array(['nosplit']), np.array(['alsonosplit'])])
+ exp = idx
tm.assert_index_equal(result, exp)
self.assertEqual(result.nlevels, 1)
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 13c0b6a08f6e7..58c4285b8394e 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -283,9 +283,8 @@ def test_index_equal_message(self):
\\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\],
labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)"""
idx1 = pd.Index([1, 2, 3])
- idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4)])
- with assertRaisesRegexp(AssertionError, expected):
- assert_index_equal(idx1, idx2)
+ idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
+ ('B', 3), ('B', 4)])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=False)
@@ -471,8 +470,8 @@ def test_series_equal_message(self):
expected = """Series are different
Series length are different
-\\[left\\]: 3, Int64Index\\(\\[0, 1, 2\\], dtype='int64'\\)
-\\[right\\]: 4, Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\)"""
+\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
+\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))
@@ -526,12 +525,11 @@ def test_frame_equal_message(self):
expected = """DataFrame are different
DataFrame shape \\(number of rows\\) are different
-\\[left\\]: 3, Int64Index\\(\\[0, 1, 2\\], dtype='int64'\\)
-\\[right\\]: 4, Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\)"""
+\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
+\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with assertRaisesRegexp(AssertionError, expected):
- assert_frame_equal(pd.DataFrame({'A':[1, 2, 3]}),
- pd.DataFrame({'A':[1, 2, 3, 4]}))
-
+ assert_frame_equal(pd.DataFrame({'A': [1, 2, 3]}),
+ pd.DataFrame({'A': [1, 2, 3, 4]}))
expected = """DataFrame are different
@@ -539,9 +537,8 @@ def test_frame_equal_message(self):
\\[left\\]: 2, Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: 1, Index\\(\\[u?'A'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
- assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]}),
- pd.DataFrame({'A':[1, 2, 3]}))
-
+ assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
+ pd.DataFrame({'A': [1, 2, 3]}))
expected = """DataFrame\\.index are different
@@ -549,10 +546,10 @@ def test_frame_equal_message(self):
\\[left\\]: Index\\(\\[u?'a', u?'b', u?'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'a', u?'b', u?'d'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
- assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
- index=['a', 'b', 'c']),
- pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
- index=['a', 'b', 'd']))
+ assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
+ index=['a', 'b', 'c']),
+ pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
+ index=['a', 'b', 'd']))
expected = """DataFrame\\.columns are different
@@ -560,11 +557,10 @@ def test_frame_equal_message(self):
\\[left\\]: Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'A', u?'b'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
- assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
- index=['a', 'b', 'c']),
- pd.DataFrame({'A':[1, 2, 3], 'b':[4, 5, 6]},
- index=['a', 'b', 'c']))
-
+ assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
+ index=['a', 'b', 'c']),
+ pd.DataFrame({'A': [1, 2, 3], 'b': [4, 5, 6]},
+ index=['a', 'b', 'c']))
expected = """DataFrame\\.iloc\\[:, 1\\] are different
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index bf37bd4afe1da..2a1e59154f3d1 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -731,13 +731,13 @@ def test_ops_compat(self):
# multiply
for offset in offsets:
- self.assertRaises(TypeError, lambda : rng * offset)
+ self.assertRaises(TypeError, lambda: rng * offset)
# divide
- expected = Int64Index((np.arange(10)+1)*12,name='foo')
+ expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
- tm.assert_index_equal(result,expected)
+ tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 1c21863415c62..685d89fee53b5 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -36,8 +36,9 @@
from pandas.computation import expressions as expr
-from pandas import (bdate_range, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
- Index, MultiIndex, Series, DataFrame, Panel, Panel4D)
+from pandas import (bdate_range, CategoricalIndex, DatetimeIndex,
+ TimedeltaIndex, PeriodIndex, RangeIndex, Index, MultiIndex,
+ Series, DataFrame, Panel, Panel4D)
from pandas.util.decorators import deprecate
from pandas import _testing
from pandas.io.common import urlopen
@@ -599,19 +600,22 @@ def assert_equal(a, b, msg=""):
...
AssertionError: 5.2 was really a dead parrot: 5.2 != 1.2
"""
- assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b)
+ assert a == b, "%s: %r != %r" % (msg.format(a, b), a, b)
-def assert_index_equal(left, right, exact=False, check_names=True,
- check_less_precise=False, check_exact=True, obj='Index'):
+def assert_index_equal(left, right, exact='equiv', check_names=True,
+ check_less_precise=False, check_exact=True,
+ obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
- exact : bool, default False
- Whether to check the Index class, dtype and inferred_type are identical.
+ exact : bool / string {'equiv'}, default False
+ Whether to check the Index class, dtype and inferred_type
+ are identical. If 'equiv', then RangeIndex can be substitued for
+ Int64Index as well
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool, default False
@@ -626,9 +630,19 @@ def assert_index_equal(left, right, exact=False, check_names=True,
def _check_types(l, r, obj='Index'):
if exact:
- if type(l) != type(r):
- msg = '{0} classes are different'.format(obj)
- raise_assert_detail(obj, msg, l, r)
+
+ if exact == 'equiv':
+ if type(l) != type(r):
+ # allow equivalence of Int64Index/RangeIndex
+ types = set([type(l).__name__, type(r).__name__])
+ if len(types - set(['Int64Index', 'RangeIndex'])):
+ msg = '{0} classes are not equivalent'.format(obj)
+ raise_assert_detail(obj, msg, l, r)
+ else:
+ if type(l) != type(r):
+ msg = '{0} classes are different'.format(obj)
+ raise_assert_detail(obj, msg, l, r)
+
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
@@ -642,7 +656,8 @@ def _get_ilevel_values(index, level):
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
- values = unique._simple_new(filled, index.names[level],
+ values = unique._simple_new(filled,
+ name=index.names[level],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
return values
@@ -652,7 +667,7 @@ def _get_ilevel_values(index, level):
assertIsInstance(right, Index, '[index] ')
# class / dtype comparison
- _check_types(left, right)
+ _check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
@@ -876,7 +891,7 @@ def assert_numpy_array_equal(left, right,
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
- check_index_type=True,
+ check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
@@ -892,8 +907,9 @@ def assert_series_equal(left, right, check_dtype=True,
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
- check_index_type : bool, default False
- Whether to check the Index class, dtype and inferred_type are identical.
+ check_index_type : bool / string {'equiv'}, default False
+ Whether to check the Index class, dtype and inferred_type
+ are identical.
check_series_type : bool, default False
Whether to check the Series class is identical.
check_less_precise : bool, default False
@@ -958,8 +974,8 @@ def assert_series_equal(left, right, check_dtype=True,
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
- check_index_type=True,
- check_column_type=True,
+ check_index_type='equiv',
+ check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
@@ -976,10 +992,12 @@ def assert_frame_equal(left, right, check_dtype=True,
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
- check_index_type : bool, default False
- Whether to check the Index class, dtype and inferred_type are identical.
- check_column_type : bool, default False
- Whether to check the columns class, dtype and inferred_type are identical.
+ check_index_type : bool / string {'equiv'}, default False
+ Whether to check the Index class, dtype and inferred_type
+ are identical.
+ check_column_type : bool / string {'equiv'}, default False
+ Whether to check the columns class, dtype and inferred_type
+ are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool, default False
@@ -1106,6 +1124,7 @@ def assert_copy(iter1, iter2, **eql_kwargs):
def getCols(k):
return string.ascii_uppercase[:k]
+
def getArangeMat():
return np.arange(N * K).reshape((N, K))
@@ -1118,38 +1137,50 @@ def makeStringIndex(k=10, name=None):
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k))
+
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
- return CategoricalIndex(np.random.choice(x,k), name=name)
+ return CategoricalIndex(np.random.choice(x, k), name=name)
+
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
- return Index([False,True], name=name)
- return Index([False,True] + [False]*(k-2), name=name)
+ return Index([False, True], name=name)
+ return Index([False, True] + [False] * (k - 2), name=name)
+
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
+
+def makeRangeIndex(k=10, name=None):
+ return RangeIndex(0, k, 1, name=name)
+
+
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
+
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
+
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
+
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
+
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
@@ -1165,6 +1196,7 @@ def all_index_generator(k=10):
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
+
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
| closes #939
replaces #9977
ToDo:
- [x] test for packers.py
- [x] more code review
Much commentary on the original issue #9977
but in essence `RangeIndex` is a complete replacement for `Int64Index`, which all indexing semantics and interop. This is now the default indexer upon construction. It should be completely transparent to the end user.
It provides a constant memory footprint for any size of index. Their is a tiny penalty for < about 10 elements (which is actually trivial to fix, e.g. we could simply instantiate an `Int64Index` for these cases). But I think it is more natural to _always_ get a `RangeIndex`.
One other change here is to `assert_index_equal` the `exact` kw now takes `equiv` as the default (in addition to a boolean) to allow for exact comparisions except for `Int64Index`/`RangeIndex` are considered equivalent (as are string/unicode as inferred types, this was pre-existing).
```
In [1]: s = Series(range(5))
In [2]: s.index
Out[2]: RangeIndex(start=0, stop=5, step=1)
In [3]: s.nbytes
Out[3]: 40
In [4]: s.index.nbytes
Out[4]: 72
In [5]: s.index.astype(int).nbytes
Out[5]: 40
In [6]: s = Series(range(100))
In [7]: s.index.astype(int).nbytes
Out[7]: 800
In [8]: s.index.nbytes
Out[8]: 72
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11892 | 2015-12-23T22:10:54Z | 2016-01-16T17:36:53Z | 2016-01-16T17:36:53Z | 2016-01-17T01:56:47Z |
Fix passing args in groupby plot (GH11805) | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 4614ce9acf3d5..89ffd0b015846 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -330,8 +330,9 @@ Bug Fixes
- Bug in ``to_numeric`` where it does not raise if input is more than one dimension (:issue:`11776`)
- Bug in parsing timezone offset strings with non-zero minutes (:issue:`11708`)
-
- Bug in ``df.plot`` using incorrect colors for bar plots under matplotlib 1.5+ (:issue:`11614`)
+- Bug in the ``groupby`` ``plot`` method when using keyword arguments (:issue:`11805`).
+
- Bug in ``.loc`` result with duplicated key may have ``Index`` with incorrect dtype (:issue:`11497`)
- Bug in ``pd.rolling_median`` where memory allocation failed even with sufficient memory (:issue:`11696`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 5428ee5484bfa..31ed2c38cd29f 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -287,7 +287,7 @@ def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
- def f(self, *args, **kwargs):
+ def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 27ca4c157f594..ff69068a3495c 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -3718,7 +3718,7 @@ def test_plain_axes(self):
def test_passed_bar_colors(self):
import matplotlib as mpl
- color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1,2,3]]).plot(kind="bar", cmap=colormap)
self.assertEqual(color_tuples, [c.get_facecolor() for c in barplot.patches])
@@ -3781,6 +3781,21 @@ def test_plot_submethod_works(self):
df.groupby('z')['x'].plot.line()
tm.close()
+ def test_plot_kwargs(self):
+
+ df = DataFrame({'x': [1, 2, 3, 4, 5],
+ 'y': [1, 2, 3, 2, 1],
+ 'z': list('ababa')})
+
+ res = df.groupby('z').plot(kind='scatter', x='x', y='y')
+ # check that a scatter plot is effectively plotted: the axes should
+ # contain a PathCollection from the scatter plot (GH11805)
+ self.assertEqual(len(res['a'].collections), 1)
+
+ res = df.groupby('z').plot.scatter(x='x', y='y')
+ self.assertEqual(len(res['a'].collections), 1)
+
+
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
| Closes https://github.com/pydata/pandas/issues/11805
So probably all groupby plot calls that use kwargs are broken in 0.17.x.
@shoyer it was only a very small change actually!
Still need to add tests.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11891 | 2015-12-23T21:18:33Z | 2015-12-28T10:08:29Z | 2015-12-28T10:08:29Z | 2015-12-28T10:08:29Z |
DOC: add examples of database drivers (GH11686) | diff --git a/doc/source/install.rst b/doc/source/install.rst
index b099186f208ae..0f2f7e6f83d78 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -247,6 +247,13 @@ Optional Dependencies
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended.
+ * Besides SQLAlchemy, you also need a database specific driver.
+ Examples of such drivers are `psycopg2 <http://initd.org/psycopg/>`__ for PostgreSQL
+ or `pymysql <https://github.com/PyMySQL/PyMySQL>`__ for MySQL. For
+ `SQLite <https://docs.python.org/3.5/library/sqlite3.html>`__ this is
+ included in Python's standard library by default.
+ You can find an overview of supported drivers for each SQL dialect in the
+ `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__.
* `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
* `statsmodels <http://statsmodels.sourceforge.net/>`__
* Needed for parts of :mod:`pandas.stats`
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 807838edb6bc4..041daaeb3b12f 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3741,8 +3741,13 @@ SQL Queries
The :mod:`pandas.io.sql` module provides a collection of query wrappers to both
facilitate data retrieval and to reduce dependency on DB-specific API. Database abstraction
-is provided by SQLAlchemy if installed, in addition you will need a driver library for
-your database.
+is provided by SQLAlchemy if installed. In addition you will need a driver library for
+your database. Examples of such drivers are `psycopg2 <http://initd.org/psycopg/>`__
+for PostgreSQL or `pymysql <https://github.com/PyMySQL/PyMySQL>`__ for MySQL.
+For `SQLite <https://docs.python.org/3.5/library/sqlite3.html>`__ this is
+included in Python's standard library by default.
+You can find an overview of supported drivers for each SQL dialect in the
+`SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__.
.. versionadded:: 0.14.0
@@ -3780,7 +3785,7 @@ To connect with SQLAlchemy you use the :func:`create_engine` function to create
object from database URI. You only need to create the engine once per database you are
connecting to.
For more information on :func:`create_engine` and the URI formatting, see the examples
-below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__
+below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/latest/core/engines.html>`__
.. ipython:: python
@@ -3994,7 +3999,7 @@ connecting to.
# or absolute, starting with a slash:
engine = create_engine('sqlite:////absolute/path/to/foo.db')
-For more information see the examples the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__
+For more information see the examples the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/latest/core/engines.html>`__
Advanced SQLAlchemy queries
| Closes #11686
| https://api.github.com/repos/pandas-dev/pandas/pulls/11888 | 2015-12-23T13:45:23Z | 2015-12-23T19:09:00Z | 2015-12-23T19:09:00Z | 2015-12-23T19:09:01Z |
Add examples for pandas.*.sample | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d3cd0840782b4..85f23b988778f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -748,10 +748,10 @@ def bool(self):
def __abs__(self):
return self.abs()
-
+
def __round__(self,decimals=0):
return self.round(decimals)
-
+
#----------------------------------------------------------------------
# Array Interface
@@ -2189,6 +2189,48 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
Returns
-------
A new object of same type as caller.
+
+ Examples
+ --------
+
+ Generate an example ``Series`` and ``DataFrame``:
+
+ >>> s = pd.Series(np.random.randn(50))
+ >>> s.head()
+ 0 -0.038497
+ 1 1.820773
+ 2 -0.972766
+ 3 -1.598270
+ 4 -1.095526
+ dtype: float64
+ >>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD'))
+ >>> df.head()
+ A B C D
+ 0 0.016443 -2.318952 -0.566372 -1.028078
+ 1 -1.051921 0.438836 0.658280 -0.175797
+ 2 -1.243569 -0.364626 -0.215065 0.057736
+ 3 1.768216 0.404512 -0.385604 -1.457834
+ 4 1.072446 -1.137172 0.314194 -0.046661
+
+ Next extract a random sample from both of these objects...
+
+ 3 random elements from the ``Series``:
+
+ >>> s.sample(n=3)
+ 27 -0.994689
+ 55 -1.049016
+ 67 -0.224565
+ dtype: float64
+
+ And a random 10% of the ``DataFrame`` with replacement:
+
+ >>> df.sample(frac=0.1, replace=True)
+ A B C D
+ 35 1.981780 0.142106 1.817165 -0.290805
+ 49 -1.336199 -0.448634 -0.789640 0.217116
+ 40 0.823173 -0.078816 1.009536 1.015108
+ 15 1.421154 -0.055301 -1.922594 -0.019696
+ 6 -0.148339 0.832938 1.787600 -1.383767
"""
if axis is None:
| The following doc files are generated from the sample() function from
pandas/core/generic.py:
pandas.DataFrame.sample.html
pandas.Panel.sample.html
pandas.Panel4D.sample.html
pandas.Series.sample.html
Examples for each have been added to the sample() function docstring.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11883 | 2015-12-22T05:54:01Z | 2015-12-23T14:27:42Z | 2015-12-23T14:27:42Z | 2015-12-23T15:29:25Z |
Doc: added names parameter in read_excel | diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 2b9ba56447dee..106d263f56093 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -71,7 +71,7 @@ def get_writer(engine_name):
raise ValueError("No Excel writer '%s'" % engine_name)
def read_excel(io, sheetname=0, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
+ index_col=None, names=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None,
convert_float=True, has_index_names=None, converters=None,
engine=None, **kwds):
@@ -116,6 +116,9 @@ def read_excel(io, sheetname=0, header=0, skiprows=None, skip_footer=0,
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``
+ names : array-like, default None
+ List of column names to use. If file contains no header row,
+ then you should explicitly pass header=None
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
| closes #11468
Please Review.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11874 | 2015-12-21T00:28:23Z | 2015-12-23T14:50:41Z | 2015-12-23T14:50:41Z | 2015-12-23T14:50:55Z |
raise NotImplemented for date parsing args in read_excel #11544 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 21e3e86e07f37..c8a83029bed9c 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -340,4 +340,8 @@ Bug Fixes
- Bug in ``Index`` prevents copying name of passed ``Index``, when a new name is not provided (:issue:`11193`)
- Bug in ``read_excel`` failing to read any non-empty sheets when empty sheets exist and ``sheetname=None`` (:issue:`11711`)
+
+- Bug in ``read_excel`` failing to raise ``NotImplemented`` error when keywords `parse_dates` and `date_parser` are provided (:issue:`11544`)
+
- Bug in ``read_sql`` with pymysql connections failing to return chunked data (:issue:`11522`)
+
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index dec7da513fb42..2b9ba56447dee 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -293,7 +293,14 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
stacklevel=3)
if 'chunksize' in kwds:
- raise NotImplementedError("Reading an Excel file in chunks "
+ raise NotImplementedError("chunksize keyword of read_excel "
+ "is not implemented")
+ if parse_dates:
+ raise NotImplementedError("parse_dates keyword of read_excel "
+ "is not implemented")
+
+ if date_parser is not None:
+ raise NotImplementedError("date_parser keyword of read_excel "
"is not implemented")
import xlrd
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index cc9f8c564ebf5..8023c25cdd660 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -167,10 +167,9 @@ def test_parse_cols_int(self):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['A', 'B', 'C'])
- df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
- parse_cols=3)
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols=3)
df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True, parse_cols=3)
+ parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
@@ -179,10 +178,9 @@ def test_parse_cols_list(self):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
- df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0,
parse_cols=[0, 2, 3])
df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True,
parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
@@ -193,28 +191,28 @@ def test_parse_cols_str(self):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
- df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
parse_cols='A:D')
df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True, parse_cols='A:D')
+ parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
- df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
parse_cols='A,C,D')
df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True, parse_cols='A,C,D')
+ parse_cols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
- df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
parse_cols='A,C:D')
df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True, parse_cols='A,C:D')
+ parse_cols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@@ -251,23 +249,23 @@ def test_excel_table_sheet_by_index(self):
excel = self.get_excelfile('test1')
dfref = self.get_csv_refdf('test1')
- df1 = read_excel(excel, 0, index_col=0, parse_dates=True)
- df2 = read_excel(excel, 1, skiprows=[1], index_col=0, parse_dates=True)
+ df1 = read_excel(excel, 0, index_col=0)
+ df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
- df1 = excel.parse(0, index_col=0, parse_dates=True)
- df2 = excel.parse(1, skiprows=[1], index_col=0, parse_dates=True)
+ df1 = excel.parse(0, index_col=0)
+ df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
- df3 = read_excel(excel, 0, index_col=0, parse_dates=True, skipfooter=1)
- df4 = read_excel(excel, 0, index_col=0, parse_dates=True, skip_footer=1)
+ df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
+ df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df1.ix[:-1])
tm.assert_frame_equal(df3, df4)
- df3 = excel.parse(0, index_col=0, parse_dates=True, skipfooter=1)
- df4 = excel.parse(0, index_col=0, parse_dates=True, skip_footer=1)
+ df3 = excel.parse(0, index_col=0, skipfooter=1)
+ df4 = excel.parse(0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df1.ix[:-1])
tm.assert_frame_equal(df3, df4)
@@ -279,16 +277,15 @@ def test_excel_table(self):
dfref = self.get_csv_refdf('test1')
- df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True)
- df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
- parse_dates=True)
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0)
+ df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
- df3 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df3 = self.get_exceldf('test1', 'Sheet1', index_col=0,
skipfooter=1)
- df4 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_dates=True,
+ df4 = self.get_exceldf('test1', 'Sheet1', index_col=0,
skip_footer=1)
tm.assert_frame_equal(df3, df1.ix[:-1])
tm.assert_frame_equal(df3, df4)
@@ -389,7 +386,7 @@ def test_reading_all_sheets_with_blank(self):
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, sheetname=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
- tm.assert_contains_all(expected_keys, dfs.keys())
+ tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self):
@@ -411,14 +408,14 @@ class XlrdTests(ReadingTestsBase):
def test_excel_read_buffer(self):
pth = os.path.join(self.dirpath, 'test1' + self.ext)
- expected = read_excel(pth, 'Sheet1', index_col=0, parse_dates=True)
+ expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
- actual = read_excel(f, 'Sheet1', index_col=0, parse_dates=True)
+ actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
- actual = read_excel(xls, 'Sheet1', index_col=0, parse_dates=True)
+ actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_read_xlrd_Book(self):
@@ -680,7 +677,7 @@ def test_excel_oldindex_format(self):
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self):
- #GH 6114
+ # GH 6114
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
@@ -692,6 +689,19 @@ def test_read_excel_chunksize(self):
pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
chunksize=100)
+ def test_read_excel_parse_dates(self):
+ # GH 11544
+ with tm.assertRaises(NotImplementedError):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ parse_dates=True)
+
+ def test_read_excel_date_parser(self):
+ # GH 11544
+ with tm.assertRaises(NotImplementedError):
+ dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ date_parser=dateparse)
+
def test_read_excel_skiprows_list(self):
#GH 4903
actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext),
@@ -1093,7 +1103,7 @@ def test_to_excel_periodindex(self):
xp.to_excel(path, 'sht1')
reader = ExcelFile(path)
- rs = read_excel(reader, 'sht1', index_col=0, parse_dates=True)
+ rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self):
| Fixes #11544
The `parse_dates` and `date_parser` args are passed to `TextReader` and then to `TextFileReader` where they don't seem to have an effect. It was decided to raise the exception at the `_parse_excel` level however, following suit with the handling of `chunksize` args.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11870 | 2015-12-19T16:21:49Z | 2015-12-19T20:03:33Z | 2015-12-19T20:03:33Z | 2016-01-15T19:05:45Z |
DOC: paramaters -> parameters | diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt
index 6a14a4024ba5a..a2597757c3353 100644
--- a/doc/source/whatsnew/v0.15.2.txt
+++ b/doc/source/whatsnew/v0.15.2.txt
@@ -194,7 +194,7 @@ Bug Fixes
- Bug in Timestamp-Timestamp not returning a Timedelta type and datelike-datelike ops with timezones (:issue:`8865`)
- Made consistent a timezone mismatch exception (either tz operated with None or incompatible timezone), will now return ``TypeError`` rather than ``ValueError`` (a couple of edge cases only), (:issue:`8865`)
- Bug in using a ``pd.Grouper(key=...)`` with no level/axis or level only (:issue:`8795`, :issue:`8866`)
-- Report a ``TypeError`` when invalid/no paramaters are passed in a groupby (:issue:`8015`)
+- Report a ``TypeError`` when invalid/no parameters are passed in a groupby (:issue:`8015`)
- Bug in packaging pandas with ``py2app/cx_Freeze`` (:issue:`8602`, :issue:`8831`)
- Bug in ``groupby`` signatures that didn't include \*args or \*\*kwargs (:issue:`8733`).
- ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo and when it receives no data from Yahoo (:issue:`8761`), (:issue:`8783`).
@@ -238,4 +238,3 @@ Bug Fixes
- Bug in plotting if sharex was enabled and index was a timeseries, would show labels on multiple axes (:issue:`3964`).
- Bug where passing a unit to the TimedeltaIndex constructor applied the to nano-second conversion twice. (:issue:`9011`).
- Bug in plotting of a period-like array (:issue:`9012`)
-
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 9b75153d9b84b..7c4a99cd76496 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1490,7 +1490,7 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None):
ascending : boolean, default True
False to sort in descending order
- level, sort_remaining are compat paramaters
+ level, sort_remaining are compat parameters
Returns
-------
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fb57a7f8bd838..fe063f5b4bc4d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -990,7 +990,7 @@ def append_to_multiple(self, d, value, selector, data_columns=None,
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
- Paramaters
+ Parameters
----------
key : object (the node to index)
@@ -3119,7 +3119,7 @@ def create_index(self, columns=None, optlevel=None, kind=None):
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
- Paramaters
+ Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx
index eb736e4569009..be6e11ce70c76 100644
--- a/pandas/src/reduce.pyx
+++ b/pandas/src/reduce.pyx
@@ -592,7 +592,7 @@ cdef class BlockSlider:
def reduce(arr, f, axis=0, dummy=None, labels=None):
"""
- Paramaters
+ Parameters
-----------
arr : NDFrame object
f : function
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index ed9bf8d3862ce..5f7dea43fc438 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -48,7 +48,7 @@ def round(self, freq):
"""
Round the index to the specified freq; this is a floor type of operation
- Paramaters
+ Parameters
----------
freq : freq string/object
| Typo that generates sphinx warning
| https://api.github.com/repos/pandas-dev/pandas/pulls/11869 | 2015-12-19T11:52:32Z | 2015-12-19T11:52:55Z | 2015-12-19T11:52:55Z | 2015-12-19T11:52:56Z |
look for colormap in rcParams['axes.prop_cycle'] (mpl 1.5+) first | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 5b8c282d3e4ed..cd404476e1d7e 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -227,8 +227,7 @@ Bug Fixes
- Bug in parsing timezone offset strings with non-zero minutes (:issue:`11708`)
-
-
+- Bug in ``df.plot`` using incorrect colors for bar plots under matplotlib 1.5+ (:issue:`11614`)
- Bug in ``.loc`` result with duplicated key may have ``Index`` with incorrect dtype (:issue:`11497`)
- Bug in ``pd.rolling_median`` where memory allocation failed even with sufficient memory (:issue:`11696`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index acf6f4b0bd48c..27ca4c157f594 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -3716,6 +3716,24 @@ def test_plain_axes(self):
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
+ def test_passed_bar_colors(self):
+ import matplotlib as mpl
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
+ colormap = mpl.colors.ListedColormap(color_tuples)
+ barplot = pd.DataFrame([[1,2,3]]).plot(kind="bar", cmap=colormap)
+ self.assertEqual(color_tuples, [c.get_facecolor() for c in barplot.patches])
+
+ def test_rcParams_bar_colors(self):
+ import matplotlib as mpl
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
+ try: # mpl 1.5
+ with mpl.rc_context(rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
+ barplot = pd.DataFrame([[1,2,3]]).plot(kind="bar")
+ except (AttributeError, KeyError): # mpl 1.4
+ with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
+ barplot = pd.DataFrame([[1,2,3]]).plot(kind="bar")
+ self.assertEqual(color_tuples, [c.get_facecolor() for c in barplot.patches])
+
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
@@ -3763,7 +3781,6 @@ def test_plot_submethod_works(self):
df.groupby('z')['x'].plot.line()
tm.close()
-
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 24b37ce39631b..3e9c788914a5a 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -163,8 +163,11 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
- colors = list(plt.rcParams.get('axes.color_cycle',
- list('bgrcmyk')))
+ try:
+ colors = [c['color'] for c in list(plt.rcParams['axes.prop_cycle'])]
+ except KeyError:
+ colors = list(plt.rcParams.get('axes.color_cycle',
+ list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
| Fixes #11614
| https://api.github.com/repos/pandas-dev/pandas/pulls/11865 | 2015-12-18T20:17:02Z | 2015-12-25T21:47:39Z | 2015-12-25T21:47:39Z | 2015-12-25T21:47:39Z |
COMPAT: remove python 3.3. compat, #11273 | diff --git a/.travis.yml b/.travis.yml
index 4e46fb7ad85ca..abca2fe9c2c7e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -57,13 +57,6 @@ matrix:
- FULL_DEPS=true
- CLIPBOARD=xsel
- BUILD_TYPE=conda
- - python: 3.3
- env:
- - JOB_NAME: "33_nslow"
- - NOSE_ARGS="not slow and not disabled"
- - FULL_DEPS=true
- - CLIPBOARD=xsel
- - BUILD_TYPE=conda
- python: 2.7
env:
- JOB_NAME: "27_slow"
@@ -104,13 +97,6 @@ matrix:
- BUILD_TYPE=pydata
- PANDAS_TESTING_MODE="deprecate"
allow_failures:
- - python: 3.3
- env:
- - JOB_NAME: "33_nslow"
- - NOSE_ARGS="not slow and not disabled"
- - FULL_DEPS=true
- - CLIPBOARD=xsel
- - BUILD_TYPE=conda
- python: 2.7
env:
- JOB_NAME: "27_slow"
diff --git a/ci/requirements-3.3.build b/ci/requirements-3.3.build
deleted file mode 100644
index ada6686f599ca..0000000000000
--- a/ci/requirements-3.3.build
+++ /dev/null
@@ -1,4 +0,0 @@
-python-dateutil
-pytz=2013b
-numpy=1.7.1
-cython=0.19.1
diff --git a/ci/requirements-3.3.pip b/ci/requirements-3.3.pip
deleted file mode 100644
index 7e172dc039087..0000000000000
--- a/ci/requirements-3.3.pip
+++ /dev/null
@@ -1,2 +0,0 @@
-blosc
-openpyxl
diff --git a/ci/requirements-3.3.run b/ci/requirements-3.3.run
deleted file mode 100644
index 2379ab42391db..0000000000000
--- a/ci/requirements-3.3.run
+++ /dev/null
@@ -1,17 +0,0 @@
-python-dateutil
-pytz=2013b
-numpy=1.7.1
-xlsxwriter=0.4.6
-xlrd=0.9.2
-xlwt
-html5lib=1.0b2
-numexpr
-pytables
-bottleneck=0.8.0
-matplotlib
-patsy
-lxml=3.2.1
-scipy
-beautiful-soup=4.2.1
-statsmodels
-jinja2=2.8
diff --git a/doc/source/install.rst b/doc/source/install.rst
index fcf497c0146c2..b099186f208ae 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.6, 2.7, 3.3, 3.4, and 3.5
+Officially Python 2.6, 2.7, 3.4, and 3.5
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 00217eb44e605..65843d4c62a2c 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -7,6 +7,10 @@ This is a major release from 0.17.1 and includes a small number of API changes,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
+.. warning::
+
+ pandas >= 0.18.0 will no longer support compatibility with Python version 3.3 (:issue:`11273`)
+
Highlights include:
Check the :ref:`API Changes <whatsnew_0180.api>` and :ref:`deprecations <whatsnew_0180.deprecations>` before updating.
diff --git a/setup.py b/setup.py
index 4bb40feb55655..e52c19167c65f 100755
--- a/setup.py
+++ b/setup.py
@@ -180,7 +180,6 @@ def build_extensions(self):
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Cython',
| closes #11273
| https://api.github.com/repos/pandas-dev/pandas/pulls/11863 | 2015-12-18T18:43:53Z | 2015-12-18T19:17:37Z | 2015-12-18T19:17:37Z | 2015-12-18T19:17:37Z |
BUG: force list type for tuples from chunked sql table reads #11522 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index de5439885a6a7..6d891ce3221ce 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -238,3 +238,4 @@ Bug Fixes
- Bug in ``df.replace`` while replacing value in mixed dtype ``Dataframe`` (:issue:`11698`)
- Bug in ``read_excel`` failing to read any non-empty sheets when empty sheets exist and ``sheetname=None`` (:issue:`11711`)
+- Bug in ``read_sql`` with pymysql connections failing to return chunked data (:issue:`11522`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index bedc71379354d..95a6d02b1ccb6 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1556,6 +1556,8 @@ def _query_iterator(cursor, chunksize, columns, index_col=None,
while True:
data = cursor.fetchmany(chunksize)
+ if type(data) == tuple:
+ data = list(data)
if not data:
cursor.close()
break
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index fe65dd4bdb02f..bfd1ac3f08ee8 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -2477,6 +2477,21 @@ def test_write_row_by_row(self):
result.index = frame.index
tm.assert_frame_equal(result, frame)
+ def test_chunksize_read_type(self):
+ _skip_if_no_pymysql()
+ frame = tm.makeTimeDataFrame()
+ frame.index.name = "index"
+ drop_sql = "DROP TABLE IF EXISTS test"
+ cur = self.conn.cursor()
+ cur.execute(drop_sql)
+ sql.to_sql(frame, name='test', con=self.conn, flavor='mysql')
+ query = "select * from test"
+ chunksize = 5
+ chunk_gen = pd.read_sql_query(sql=query, con=self.conn,
+ chunksize=chunksize, index_col="index")
+ chunk_df = next(chunk_gen)
+ tm.assert_frame_equal(frame[:chunksize], chunk_df)
+
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
| Closes #11522.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11861 | 2015-12-17T22:55:13Z | 2015-12-19T12:58:10Z | 2015-12-19T12:58:09Z | 2015-12-19T16:16:32Z |
DOC: typos in DataFrame.iterrows and itertuples docstrings | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 8658c6d45f1b6..9ecee2ea86bd1 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1212,7 +1212,7 @@ To iterate over the rows of a DataFrame, you can use the following methods:
This converts the rows to Series objects, which can change the dtypes and has some
performance implications.
* :meth:`~DataFrame.itertuples`: Iterate over the rows of a DataFrame
- as namedtuples of the values. This is a lot faster as
+ as namedtuples of the values. This is a lot faster than
:meth:`~DataFrame.iterrows`, and is in most cases preferable to use
to iterate over the values of a DataFrame.
@@ -1344,7 +1344,7 @@ and is generally faster as :meth:`~DataFrame.iterrows`.
.. note::
- The columns names will be renamed to positional names if they are
+ The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9480136e418e9..ff110880d34ba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -630,7 +630,7 @@ def iterrows(self):
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
- and which is generally faster as ``iterrows``.
+ and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
@@ -667,7 +667,7 @@ def itertuples(self, index=True, name="Pandas"):
Notes
-----
- The columns names will be renamed to positional names if they are
+ The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11860 | 2015-12-17T21:36:25Z | 2015-12-17T21:53:45Z | 2015-12-17T21:53:45Z | 2015-12-17T21:53:50Z | |
REGR: Regression in .clip with tz-aware datetimes #11838 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index a534c3ec0017a..de5439885a6a7 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -92,7 +92,7 @@ In addition, ``.round()`` will be available thru the ``.dt`` accessor of ``Serie
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- The parameter ``out`` has been removed from the ``Series.round()`` method. (:issue:`11763`)
+- The parameter ``out`` has been removed from the ``Series.round()`` method. (:issue:`11763`)
Bug in QuarterBegin with n=0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -189,7 +189,7 @@ Bug Fixes
- Bug in ``GroupBy.size`` when data-frame is empty. (:issue:`11699`)
- Bug in ``Period.end_time`` when a multiple of time period is requested (:issue:`11738`)
-
+- Regression in ``.clip`` with tz-aware datetimes (:issue:`11838`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index bf63e9a3cab45..123dca9f3ee5c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1215,10 +1215,6 @@ def func(cond, values, other):
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
- if not isinstance(result, np.ndarray):
- raise TypeError('Could not compare [%s] with block values'
- % repr(other))
-
if transpose:
result = result.T
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 34d2d0de35977..099e86a44d188 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3008,16 +3008,16 @@ def test_round(self):
name='ts')
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
-
+
def test_built_in_round(self):
if not compat.PY3:
raise nose.SkipTest('build in round cannot be overriden prior to Python 3')
-
+
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
self.assert_series_equal(result, expected_rounded0)
-
+
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
@@ -5844,6 +5844,24 @@ def test_clip_against_series(self):
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
+
+ def test_clip_with_datetimes(self):
+
+ # GH 11838
+ # naive and tz-aware datetimes
+
+ t = Timestamp('2015-12-01 09:30:30')
+ s = Series([ Timestamp('2015-12-01 09:30:00'), Timestamp('2015-12-01 09:31:00') ])
+ result = s.clip(upper=t)
+ expected = Series([ Timestamp('2015-12-01 09:30:00'), Timestamp('2015-12-01 09:30:30') ])
+ assert_series_equal(result, expected)
+
+ t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
+ s = Series([ Timestamp('2015-12-01 09:30:00', tz='US/Eastern'), Timestamp('2015-12-01 09:31:00', tz='US/Eastern') ])
+ result = s.clip(upper=t)
+ expected = Series([ Timestamp('2015-12-01 09:30:00', tz='US/Eastern'), Timestamp('2015-12-01 09:30:30', tz='US/Eastern') ])
+ assert_series_equal(result, expected)
+
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
| closes #11838
| https://api.github.com/repos/pandas-dev/pandas/pulls/11850 | 2015-12-16T01:51:21Z | 2015-12-16T13:13:06Z | 2015-12-16T13:13:06Z | 2015-12-16T13:13:06Z |
COMPAT: GH11769 Fix msgpack ExtType unpack errors on Windows in Python 3.5 | diff --git a/pandas/src/msgpack/unpack.h b/pandas/src/msgpack/unpack.h
index 5deb7cde0b929..3f9d0f1b64895 100644
--- a/pandas/src/msgpack/unpack.h
+++ b/pandas/src/msgpack/unpack.h
@@ -265,9 +265,9 @@ static inline int unpack_callback_ext(unpack_user* u, const char* base, const ch
}
// length also includes the typecode, so the actual data is length-1
#if PY_MAJOR_VERSION == 2
- py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, length-1);
+ py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, (Py_ssize_t)length-1);
#else
- py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, length-1);
+ py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, (Py_ssize_t)length-1);
#endif
if (!py)
return -1;
| closes #11769
| https://api.github.com/repos/pandas-dev/pandas/pulls/11842 | 2015-12-15T05:44:49Z | 2015-12-15T12:01:56Z | 2015-12-15T12:01:56Z | 2015-12-15T12:02:51Z |
Spelling fixes | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index b550f59b8c5e6..d976b0c8c21a5 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -525,7 +525,7 @@ they have a MultiIndex:
df.T.sort_index(level=1, axis=1)
-The ``MultiIndex`` object has code to **explicity check the sort depth**. Thus,
+The ``MultiIndex`` object has code to **explicitly check the sort depth**. Thus,
if you try to index at a depth at which the index is not sorted, it will raise
an exception. Here is a concrete example to illustrate this:
diff --git a/doc/source/api.rst b/doc/source/api.rst
index c7f815914358b..12dc0b0cb50b9 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -552,7 +552,7 @@ These can be accessed like ``Series.dt.<property>``.
String handling
~~~~~~~~~~~~~~~
``Series.str`` can be used to access the values of the series as
-strings and apply several methods to it. These can be acccessed like
+strings and apply several methods to it. These can be accessed like
``Series.str.<function/property>``.
.. autosummary::
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index b80ac18a2ac51..8658c6d45f1b6 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1097,7 +1097,7 @@ Note that the same result could have been achieved using
ts2.reindex(ts.index).fillna(method='ffill')
:meth:`~Series.reindex` will raise a ValueError if the index is not monotonic
-increasing or descreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate`
+increasing or decreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate`
will not make any checks on the order of the index.
.. _basics.limits_on_reindex_fill:
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 2dc506db7eeba..317641f1b3eea 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -178,7 +178,7 @@ It's also possible to pass in the categories in a specific order:
.. note::
- New categorical data are NOT automatically ordered. You must explicity pass ``ordered=True`` to
+ New categorical data are NOT automatically ordered. You must explicitly pass ``ordered=True`` to
indicate an ordered ``Categorical``.
@@ -342,7 +342,7 @@ necessarily make the sort order the same as the categories order.
Multi Column Sorting
~~~~~~~~~~~~~~~~~~~~
-A categorical dtyped column will partcipate in a multi-column sort in a similar manner to other columns.
+A categorical dtyped column will participate in a multi-column sort in a similar manner to other columns.
The ordering of the categorical is determined by the ``categories`` of that column.
.. ipython:: python
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst
index f51603750d61b..85d432b546f21 100644
--- a/doc/source/comparison_with_sas.rst
+++ b/doc/source/comparison_with_sas.rst
@@ -271,7 +271,7 @@ date/datetime columns.
date2 = mdy(2, 15, 2015);
date1_year = year(date1);
date2_month = month(date2);
- * shift date to begninning of next interval;
+ * shift date to beginning of next interval;
date1_next = intnx('MONTH', date1, 1);
* count intervals between dates;
months_between = intck('MONTH', date1, date2);
@@ -279,7 +279,7 @@ date/datetime columns.
The equivalent pandas operations are shown below. In addition to these
functions pandas supports other Time Series features
-not available in Base SAS (such as resampling and and custom offets) -
+not available in Base SAS (such as resampling and and custom offsets) -
see the :ref:`timeseries documentation<timeseries>` for more details.
.. ipython:: python
@@ -350,7 +350,7 @@ Sorting in SAS is accomplished via ``PROC SORT``
run;
pandas objects have a :meth:`~DataFrame.sort_values` method, which
-takes a list of columnns to sort by.
+takes a list of columns to sort by.
.. ipython:: python
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 779ebf87fdf64..92ed85071ecb8 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -333,7 +333,7 @@ The :ref:`multindexing <advanced.hierarchical>` docs.
# As Labelled Index
df = df.set_index('row');df
- # With Heirarchical Columns
+ # With Hierarchical Columns
df.columns = pd.MultiIndex.from_tuples([tuple(c.split('_')) for c in df.columns]);df
# Now stack & Reset
df = df.stack(0).reset_index(1);df
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 5a62e7dccea34..11c743d6ef047 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -46,7 +46,7 @@ Series
.. warning::
- In 0.13.0 ``Series`` has internaly been refactored to no longer sub-class ``ndarray``
+ In 0.13.0 ``Series`` has internally been refactored to no longer sub-class ``ndarray``
but instead subclass ``NDFrame``, similarly to the rest of the pandas containers. This should be
a transparent change with only very limited API implications (See the :ref:`Internal Refactoring<whatsnew_0130.refactoring>`)
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 762656ba05bd6..683cb671bca9d 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -155,7 +155,7 @@ or multi-indexed DataFrames.
fredapi is a Python interface to the `Federal Reserve Economic Data (FRED) <http://research.stlouisfed.org/fred2/>`__
provided by the Federal Reserve Bank of St. Louis. It works with both the FRED database and ALFRED database that
contains point-in-time data (i.e. historic data revisions). fredapi provides a wrapper in python to the FRED
-HTTP API, and also provides several conveninent methods for parsing and analyzing point-in-time data from ALFRED.
+HTTP API, and also provides several convenient methods for parsing and analyzing point-in-time data from ALFRED.
fredapi makes use of pandas and returns data in a Series or DataFrame. This module requires a FRED API key that
you can obtain for free on the FRED website.
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index ead4c10341fe9..946256d585c49 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -745,7 +745,7 @@ Technical Minutia Regarding Expression Evaluation
Expressions that would result in an object dtype or involve datetime operations
(because of ``NaT``) must be evaluated in Python space. The main reason for
-this behavior is to maintain backwards compatbility with versions of numpy <
+this behavior is to maintain backwards compatibility with versions of numpy <
1.7. In those versions of ``numpy`` a call to ``ndarray.astype(str)`` will
truncate any strings that are more than 60 characters in length. Second, we
can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index e517e9ab9935d..4ae2ee1927d1a 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -649,7 +649,7 @@ For dataframes with multiple columns, filters should explicitly specify a column
.. note::
Some functions when applied to a groupby object will act as a **filter** on the input, returning
- a reduced shape of the original (and potentitally eliminating groups), but with the index unchanged.
+ a reduced shape of the original (and potentially eliminating groups), but with the index unchanged.
Passing ``as_index=False`` will not affect these transformation methods.
For example: ``head, tail``.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index af5087689ca4d..72f1e5749a886 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -138,7 +138,7 @@ lower-dimensional slices. Thus,
Series; ``series[label]``; scalar value
DataFrame; ``frame[colname]``; ``Series`` corresponding to colname
- Panel; ``panel[itemname]``; ``DataFrame`` corresponing to the itemname
+ Panel; ``panel[itemname]``; ``DataFrame`` corresponding to the itemname
Here we construct a simple time series data set to use for illustrating the
indexing functionality:
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 8001cd3723601..807838edb6bc4 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3065,7 +3065,7 @@ indexed dimension as the ``where``.
i = store.root.df.table.cols.index.index
i.optlevel, i.kind
-Ofentimes when appending large amounts of data to a store, it is useful to turn off index creation for each append, then recreate at the end.
+Oftentimes when appending large amounts of data to a store, it is useful to turn off index creation for each append, then recreate at the end.
.. ipython:: python
@@ -4415,7 +4415,7 @@ whether imported ``Categorical`` variables are ordered.
*Stata* supports partially labeled series. These series have value labels for
some but not all data values. Importing a partially labeled series will produce
- a ``Categorial`` with string categories for the values that are labeled and
+ a ``Categorical`` with string categories for the values that are labeled and
numeric categories for values with no label.
.. _io.other:
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index eb75c2982131a..074b15bbbcb66 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -135,7 +135,7 @@ functionality below.
.. note::
It is worth noting however, that ``concat`` (and therefore ``append``) makes
a full copy of the data, and that constantly reusing this function can
- create a signifcant performance hit. If you need to use the operation over
+ create a significant performance hit. If you need to use the operation over
several datasets, use a list comprehension.
::
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 39b73b307be4e..c9aa10478714a 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -334,7 +334,7 @@ Similarly to other of the datetime-like indices, ``DatetimeIndex`` and ``PeriodI
index=timedelta_range('1 days', periods=100, freq='h'))
s
-Selections work similary, with coercion on string-likes and slices:
+Selections work similarly, with coercion on string-likes and slices:
.. ipython:: python
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 50f104f4529fc..b5be9cf395feb 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -672,7 +672,7 @@ used exactly like a ``Timedelta`` - see the
Note that some offsets (such as ``BQuarterEnd``) do not have a
vectorized implementation. They can still be used but may
-calculate signficantly slower and will raise a ``PerformanceWarning``
+calculate significantly slower and will raise a ``PerformanceWarning``
.. ipython:: python
:okwarning:
@@ -885,7 +885,7 @@ frequencies. We will refer to these aliases as *offset aliases*
"H", "hourly frequency"
"T, min", "minutely frequency"
"S", "secondly frequency"
- "L, ms", "milliseonds"
+ "L, ms", "milliseconds"
"U, us", "microseconds"
"N", "nanoseconds"
@@ -1374,7 +1374,7 @@ frequency. Arithmetic is not allowed between ``Period`` with different ``freq``
p == Period('2012-01', freq='3M')
-If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherise, ``ValueError`` will be raised.
+If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherwise, ``ValueError`` will be raised.
.. ipython:: python
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 4b42ceff4b46e..12be3037def75 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -466,7 +466,7 @@ When ``subplots=False`` / ``by`` is ``None``:
This is the default of ``boxplot`` in historical reason.
Note that ``plot(kind='box')`` returns ``Axes`` as default as the same as other plots.
* if ``return_type`` is ``'axes'``, a :class:`matplotlib Axes <matplotlib.axes.Axes>` containing the boxplot is returned.
-* if ``return_type`` is ``'both'`` a namedtuple containging the :class:`matplotlib Axes <matplotlib.axes.Axes>`
+* if ``return_type`` is ``'both'`` a namedtuple containing the :class:`matplotlib Axes <matplotlib.axes.Axes>`
and :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned
When ``subplots=True`` / ``by`` is some column of the DataFrame:
| https://api.github.com/repos/pandas-dev/pandas/pulls/11837 | 2015-12-14T04:04:27Z | 2015-12-14T08:38:03Z | 2015-12-14T08:38:03Z | 2015-12-14T08:38:11Z | |
CLN: frequency.get_offset always return copy | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 733e86e38e47a..56a129fddee16 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -252,6 +252,8 @@ Deprecations
For example, instead of ``s.rolling(window=5,freq='D').max()`` to get the max value on a rolling 5 Day window, one could use ``s.resample('D',how='max').rolling(window=5).max()``, which first resamples the data to daily data, then provides a rolling 5 day window.
+- ``pd.tseries.frequencies.get_offset_name`` function is deprecated. Use offset's ``.freqstr`` property as alternative (:issue:`11192`)
+
.. _whatsnew_0180.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 07546a76be431..fced24c706246 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -286,8 +286,7 @@ def _get_freq_str(base, mult=1):
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
- BYearBegin, BYearEnd, _make_offset
- )
+ BYearBegin, BYearEnd, prefix_mapping)
try:
cday = CDay()
except NotImplementedError:
@@ -547,13 +546,17 @@ def get_offset(name):
if name not in _offset_map:
try:
- # generate and cache offset
- offset = _make_offset(name)
+ split = name.split('-')
+ klass = prefix_mapping[split[0]]
+ # handles case where there's no suffix (and will TypeError if too many '-')
+ offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError('Bad rule name requested: %s.' % name)
+ # cache
_offset_map[name] = offset
- return _offset_map[name]
+ # do not return cache because it's mutable
+ return _offset_map[name].copy()
getOffset = get_offset
@@ -567,19 +570,10 @@ def get_offset_name(offset):
--------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
- if offset is None:
- raise ValueError("Offset can't be none!")
- # Hack because this is what it did before...
- if isinstance(offset, BDay):
- if offset.n != 1:
- raise ValueError('Bad rule given: %s.' % 'BusinessDays')
- else:
- return offset.rule_code
- try:
- return offset.freqstr
- except AttributeError:
- # Bad offset, give useful error.
- raise ValueError('Bad rule given: %s.' % offset)
+
+ msg = "get_offset_name(offset) is deprecated. Use offset.freqstr instead"
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ return offset.freqstr
def get_legacy_offset_name(offset):
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 82ea9eebaefa8..5beb65f0ba640 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -309,8 +309,6 @@ def _params(self):
return params
def __repr__(self):
- if hasattr(self, '_named'):
- return self._named
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
@@ -346,10 +344,7 @@ def __repr__(self):
@property
def name(self):
- if hasattr(self, '_named'):
- return self._named
- else:
- return self.rule_code
+ return self.rule_code
def __eq__(self, other):
if other is None:
@@ -516,8 +511,6 @@ class BusinessMixin(object):
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
- if hasattr(self, '_named'):
- return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
@@ -2668,16 +2661,3 @@ def generate_range(start=None, end=None, periods=None,
])
prefix_mapping['N'] = Nano
-
-def _make_offset(key):
- """Gets offset based on key. KeyError if prefix is bad, ValueError if
- suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
- public."""
- if key is None:
- return None
- split = key.split('-')
- klass = prefix_mapping[split[0]]
- # handles case where there's no suffix (and will TypeError if too many '-')
- obj = klass._from_name(*split[1:])
- obj._named = key
- return obj
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 30fadfac2a3ae..70e0cc288458e 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -16,7 +16,7 @@
QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week,
YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, Milli, Nano, Easter,
WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date,
- get_offset, get_offset_name, get_standard_freq)
+ get_offset, get_standard_freq)
from pandas import Series
from pandas.tseries.frequencies import _offset_map, get_freq_code, _get_freq_str
@@ -3593,19 +3593,20 @@ def test_compare_ticks(self):
class TestOffsetNames(tm.TestCase):
def test_get_offset_name(self):
- assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2))
-
- assert get_offset_name(BDay()) == 'B'
- assert get_offset_name(BMonthEnd()) == 'BM'
- assert get_offset_name(Week(weekday=0)) == 'W-MON'
- assert get_offset_name(Week(weekday=1)) == 'W-TUE'
- assert get_offset_name(Week(weekday=2)) == 'W-WED'
- assert get_offset_name(Week(weekday=3)) == 'W-THU'
- assert get_offset_name(Week(weekday=4)) == 'W-FRI'
-
- self.assertEqual(get_offset_name(LastWeekOfMonth(weekday=WeekDay.SUN)), "LWOM-SUN")
- self.assertEqual(get_offset_name(makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),"REQ-L-MAR-TUE-4")
- self.assertEqual(get_offset_name(makeFY5253NearestEndMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=3)), "REQ-N-MAR-TUE-3")
+ self.assertEqual(BDay().freqstr, 'B')
+ self.assertEqual(BDay(2).freqstr, '2B')
+ self.assertEqual(BMonthEnd().freqstr, 'BM')
+ self.assertEqual(Week(weekday=0).freqstr, 'W-MON')
+ self.assertEqual(Week(weekday=1).freqstr, 'W-TUE')
+ self.assertEqual(Week(weekday=2).freqstr, 'W-WED')
+ self.assertEqual(Week(weekday=3).freqstr, 'W-THU')
+ self.assertEqual(Week(weekday=4).freqstr, 'W-FRI')
+
+ self.assertEqual(LastWeekOfMonth(weekday=WeekDay.SUN).freqstr, "LWOM-SUN")
+ self.assertEqual(makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4).freqstr,
+ "REQ-L-MAR-TUE-4")
+ self.assertEqual(makeFY5253NearestEndMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=3).freqstr,
+ "REQ-N-MAR-TUE-3")
def test_get_offset():
assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish')
@@ -3834,13 +3835,10 @@ def test_str_for_named_is_name(self):
names += ['W-' + day for day in days]
names += ['WOM-' + week + day for week in ('1', '2', '3', '4')
for day in days]
- #singletons
- names += ['S', 'T', 'U', 'BM', 'BMS', 'BQ', 'QS'] # No 'Q'
_offset_map.clear()
for name in names:
offset = get_offset(name)
- self.assertEqual(repr(offset), name)
- self.assertEqual(str(offset), name)
+ self.assertEqual(offset.freqstr, name)
def get_utc_offset_hours(ts):
| Closes #11192.
Fix 2 points which should not affect to normal users (thus no release note descriptions).
- get_offset (#11192): It returns cached offset, overwriting it may leads to unexpected results. `pandas` public methods doesn't use them as it is. Thus it should not affect to almost all users, except who explicitly uses `frequencies.get_offset`
- get_offset_name: This looks leagacy impl which raises `ValueError` for `BDay` with `n != 1` which is valid input. I've marked this as deprecated, but it can be removed immediately as non-public.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11834 | 2015-12-13T04:01:11Z | 2015-12-26T00:28:22Z | 2015-12-26T00:28:22Z | 2015-12-27T06:41:58Z |
CLN: Use `is` operator for comparing with `None` (Pep8) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6aeb4d83649ef..072926405d849 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4500,7 +4500,7 @@ def describe_1d(data, percentiles):
else:
data = self
elif include == 'all':
- if exclude != None:
+ if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 584b946d47618..28d95c40c7294 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -586,7 +586,7 @@ def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
- if 'axis' not in kwargs_with_axis or kwargs_with_axis['axis']==None:
+ if 'axis' not in kwargs_with_axis or kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 8eb5bd3a202a4..5d959892791fe 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -293,10 +293,12 @@ def test_none_comparison(self):
o[0] = np.nan
+ # noinspection PyComparisonWithNone
result = o == None
self.assertFalse(result.iat[0])
self.assertFalse(result.iat[1])
+ # noinspection PyComparisonWithNone
result = o != None
self.assertTrue(result.iat[0])
self.assertTrue(result.iat[1])
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 49806491ed1c6..a6aa5ff66576c 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -274,7 +274,7 @@ def make_signature(func) :
"""
from inspect import getargspec
spec = getargspec(func)
- if spec.defaults == None :
+ if spec.defaults is None :
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else :
| https://api.github.com/repos/pandas-dev/pandas/pulls/11828 | 2015-12-12T06:01:47Z | 2015-12-15T16:37:26Z | 2015-12-15T16:37:26Z | 2015-12-15T17:58:42Z | |
CLN: Use sentinel values instead of mutable default arguments | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 9df53bd3239bf..639da4176cd61 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -475,8 +475,10 @@ def setdefault(self, key, default=None):
self[key] = default
return default
- def __repr__(self, _repr_running={}):
+ def __repr__(self, _repr_running=None):
"""od.__repr__() <==> repr(od)"""
+ if _repr_running is None:
+ _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 1433d755d294d..9b75153d9b84b 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1149,7 +1149,9 @@ def __setstate__(self, state):
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
- def __deepcopy__(self, memo={}):
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
return self.copy(deep=True)
def __nonzero__(self):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index efcb6eb818087..bf63e9a3cab45 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4740,8 +4740,10 @@ def trim_join_unit(join_unit, length):
class JoinUnit(object):
- def __init__(self, block, shape, indexers={}):
+ def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
+ if indexers is None:
+ indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index d2fcd6ed19378..b57e90509080d 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -55,7 +55,7 @@ def _ensure_like_indices(time, panels):
return time, panels
-def panel_index(time, panels, names=['time', 'panel']):
+def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
@@ -94,6 +94,8 @@ def panel_index(time, panels, names=['time', 'panel']):
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
+ if names is None:
+ names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
time_factor = Categorical.from_array(time, ordered=True)
panel_factor = Categorical.from_array(panels, ordered=True)
diff --git a/pandas/io/auth.py b/pandas/io/auth.py
index 74b6b13000108..b20b7c8ff1b04 100644
--- a/pandas/io/auth.py
+++ b/pandas/io/auth.py
@@ -45,12 +45,14 @@ class AuthenticationConfigError(ValueError):
# a secure place.
-def process_flags(flags=[]):
+def process_flags(flags=None):
"""Uses the command-line flags to set the logging level.
Args:
argv: List of command line arguments passed to the python script.
"""
+ if flags is None:
+ flags = []
# Let the gflags module process the command-line arguments.
try:
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
index e617a01b73bfd..3b47fb0c1b2bb 100644
--- a/pandas/io/wb.py
+++ b/pandas/io/wb.py
@@ -83,7 +83,7 @@
'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', \
'ZWE', 'all', 'ALL', 'All']
-def download(country=['MX', 'CA', 'US'], indicator=['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS'],
+def download(country=None, indicator=None,
start=2003, end=2005,errors='warn'):
"""
Download data series from the World Bank's World Development Indicators
@@ -123,6 +123,10 @@ def download(country=['MX', 'CA', 'US'], indicator=['NY.GDP.MKTP.CD', 'NY.GNS.IC
indicator value.
"""
+ if country is None:
+ country = ['MX', 'CA', 'US']
+ if indicator is None:
+ indicator = ['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS']
if type(country) == str:
country = [country]
diff --git a/pandas/stats/fama_macbeth.py b/pandas/stats/fama_macbeth.py
index 38fb5894c94bb..30726d82e1aa9 100644
--- a/pandas/stats/fama_macbeth.py
+++ b/pandas/stats/fama_macbeth.py
@@ -31,7 +31,9 @@ class FamaMacBeth(StringMixin):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
- cluster=None, dropped_dummies={}, verbose=False):
+ cluster=None, dropped_dummies=None, verbose=False):
+ if dropped_dummies is None:
+ dropped_dummies = {}
self._nw_lags_beta = nw_lags_beta
from pandas.stats.plm import MovingPanelOLS
@@ -143,7 +145,9 @@ class MovingFamaMacBeth(FamaMacBeth):
def __init__(self, y, x, window_type='rolling', window=10,
intercept=True, nw_lags=None, nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
- cluster=None, dropped_dummies={}, verbose=False):
+ cluster=None, dropped_dummies=None, verbose=False):
+ if dropped_dummies is None:
+ dropped_dummies = {}
self._window_type = common._get_window_type(window_type)
self._window = window
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 1e209db691fb9..24b37ce39631b 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -653,7 +653,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
- axvlines=True, axvlines_kwds={'linewidth':1,'color':'black'}, **kwds):
+ axvlines=True, axvlines_kwds=None, **kwds):
"""Parallel coordinates plotting.
Parameters
@@ -693,6 +693,8 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
+ if axvlines_kwds is None:
+ axvlines_kwds = {'linewidth':1,'color':'black'}
import matplotlib.pyplot as plt
n = len(frame)
| From [Quantified Code](https://www.quantifiedcode.com/):
Mutable list or dictionary used as default argument to a method or function. Python creates a single persistent object and uses it for every subsequent call in which the argument is left empty. This can cause problems if the program was expecting the function to return a new list or dictionary after every call. Use a sentinel value to denote an empty list or dictionary.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11827 | 2015-12-12T05:44:13Z | 2015-12-15T19:55:48Z | 2015-12-15T19:55:48Z | 2015-12-15T20:46:07Z |
DOC: Add example to pandas.DataFrame.applymap | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bc60e1dff9ab5..4ec0dd68d5261 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4124,6 +4124,22 @@ def applymap(self, func):
func : function
Python function, returns a single value from a single value
+ Examples
+ --------
+
+ >>> df = pd.DataFrame(np.random.randn(3, 3))
+ >>> df
+ 0 1 2
+ 0 -0.029638 1.081563 1.280300
+ 1 0.647747 0.831136 -1.549481
+ 2 0.513416 -0.884417 0.195343
+ >>> df = df.applymap(lambda x: '%.2f' % x)
+ >>> df
+ 0 1 2
+ 0 -0.03 1.08 1.28
+ 1 0.65 0.83 -1.55
+ 2 0.51 -0.88 0.20
+
Returns
-------
applied : DataFrame
| https://api.github.com/repos/pandas-dev/pandas/pulls/11826 | 2015-12-12T04:40:36Z | 2015-12-23T14:24:16Z | 2015-12-23T14:24:16Z | 2015-12-23T15:29:42Z | |
TST: avoid int conversions on windows, xref #11816 | diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 679ee340f72bd..7e772aeb14f6e 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -390,13 +390,13 @@ def test_today(self):
def test_asm8(self):
np.random.seed(7960929)
- ns = np.random.randint(
+ ns = [
Timestamp.min.value,
Timestamp.max.value,
1000,
- )
+ ]
for n in ns:
- self.assertEqual(Timestamp(n).asm8, np.datetime64(int(n), 'ns'), n)
+ self.assertEqual(Timestamp(n).asm8, np.datetime64(n, 'ns'), n)
self.assertEqual(Timestamp('nat').asm8, np.datetime64('nat', 'ns'))
def test_fields(self):
| xref #11816
cc @llllllllll
had to change the test as this numpy bug on window: https://github.com/numpy/numpy/issues/6812
| https://api.github.com/repos/pandas-dev/pandas/pulls/11824 | 2015-12-11T15:35:06Z | 2015-12-11T15:49:34Z | 2015-12-11T15:49:34Z | 2015-12-11T15:49:34Z |
BUG: GH11808 subclasses of DataFrame did not propagate AttributeError | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 0710f65dc9a74..b2894f669593b 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -206,7 +206,8 @@ Bug Fixes
-- Bug groupby on tz-aware data where selection not returning ``Timestamp`` (:issue:`11616`
+- Bug in subclasses of `DataFrame` where `AttributeError` did not propagate (:issue:`11808`)
+- Bug groupby on tz-aware data where selection not returning ``Timestamp`` (:issue:`11616`)
- Bug in ``pd.read_clipboard`` and ``pd.to_clipboard`` functions not supporting Unicode; upgrade included ``pyperclip`` to v1.5.15 (:issue:`9263`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6aeb4d83649ef..202b9a093759d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2356,8 +2356,7 @@ def __getattr__(self, name):
else:
if name in self._info_axis:
return self[name]
- raise AttributeError("'%s' object has no attribute '%s'" %
- (type(self).__name__, name))
+ return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6ab4c8fb7251d..09de3bf4a8046 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -15878,6 +15878,17 @@ class SubclassedPanel(Panel):
dtype='int64')
tm.assert_panel_equal(result, expected)
+ def test_subclass_attr_err_propagation(self):
+ # GH 11808
+ class A(DataFrame):
+
+ @property
+ def bar(self):
+ return self.i_dont_exist
+ with tm.assertRaisesRegexp(AttributeError, '.*i_dont_exist.*'):
+ A().bar
+
+
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
| closes #11808
| https://api.github.com/repos/pandas-dev/pandas/pulls/11822 | 2015-12-11T06:58:52Z | 2015-12-11T13:07:58Z | 2015-12-11T13:07:58Z | 2015-12-11T13:08:02Z |
DOC: Fix typo in example for df.merge() | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bc60e1dff9ab5..dd41075ddf92e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -140,7 +140,7 @@
2 baz 3 2 qux 7
3 foo 4 3 bar 8
->>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')
+>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
| https://api.github.com/repos/pandas-dev/pandas/pulls/11821 | 2015-12-11T05:41:31Z | 2015-12-11T15:37:53Z | 2015-12-11T15:37:53Z | 2015-12-15T19:45:57Z | |
BUG: read_excel fails when empty sheets exist and sheetname=None #11711 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 0710f65dc9a74..a590d8a3afd15 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -230,3 +230,5 @@ Bug Fixes
- Bug in ``df.replace`` while replacing value in mixed dtype ``Dataframe`` (:issue:`11698`)
+
+- Bug in ``read_excel`` failing to read any non-empty sheets when empty sheets exist and ``sheetname=None`` (:issue:`11711`)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 304cc3d346d1f..dec7da513fb42 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -391,7 +391,8 @@ def _parse_cell(cell_contents,cell_typ):
data.append(row)
if sheet.nrows == 0:
- return DataFrame()
+ output[asheetname] = DataFrame()
+ continue
if com.is_list_like(header) and len(header) == 1:
header = header[0]
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 35aa847492d69..cc9f8c564ebf5 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -382,6 +382,15 @@ def test_reading_multiple_specific_sheets(self):
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
+ def test_reading_all_sheets_with_blank(self):
+ # Test reading all sheetnames by setting sheetname to None,
+ # In the case where some sheets are blank.
+ # Issue #11711
+ basename = 'blank_with_header'
+ dfs = self.get_exceldf(basename, sheetname=None)
+ expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
+ tm.assert_contains_all(expected_keys, dfs.keys())
+
# GH6403
def test_read_excel_blank(self):
actual = self.get_exceldf('blank', 'Sheet1')
| Fixes issue #11711.
Existing code prematurely returns an empty dataframe when an empty sheet in the source excel file is encountered.
Fix is to store an empty dataframe in the output dict and continue to the next sheet.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11819 | 2015-12-11T05:13:19Z | 2015-12-12T13:49:39Z | 2015-12-12T13:49:39Z | 2015-12-15T04:34:56Z |
ENH: add 'asm8' to NaT | diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 4fe136fceb671..679ee340f72bd 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -388,6 +388,17 @@ def test_today(self):
self.assertTrue(abs(ts_from_string_tz.tz_localize(None)
- ts_from_method_tz.tz_localize(None)) < delta)
+ def test_asm8(self):
+ np.random.seed(7960929)
+ ns = np.random.randint(
+ Timestamp.min.value,
+ Timestamp.max.value,
+ 1000,
+ )
+ for n in ns:
+ self.assertEqual(Timestamp(n).asm8, np.datetime64(int(n), 'ns'), n)
+ self.assertEqual(Timestamp('nat').asm8, np.datetime64('nat', 'ns'))
+
def test_fields(self):
def check(value, equal):
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 84d0cc61be8e6..d1bc8025ba109 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -446,10 +446,6 @@ class Timestamp(_Timestamp):
def freqstr(self):
return getattr(self.offset, 'freqstr', self.offset)
- @property
- def asm8(self):
- return np.int64(self.value).view('M8[ns]')
-
@property
def is_month_start(self):
return self._get_start_end_field('is_month_start')
@@ -1063,6 +1059,10 @@ cdef class _Timestamp(datetime):
out = get_start_end_field(np.array([self.value], dtype=np.int64), field, freqstr, month_kw)
return out[0]
+ property asm8:
+ def __get__(self):
+ return np.datetime64(self.value, 'ns')
+
cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp
| I was suprised to find that `NaT.asm8` does not exist though there is a valid nat form for `numpy.datetime64`.
This also adds tests for the asm8 behavior for a random sample of times and `NaT`
| https://api.github.com/repos/pandas-dev/pandas/pulls/11816 | 2015-12-10T23:33:14Z | 2015-12-11T13:05:31Z | 2015-12-11T13:05:31Z | 2015-12-11T13:05:35Z |
ENH: Using built-in round on a series #11763 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index e71830d7dd8d8..a534c3ec0017a 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -34,6 +34,8 @@ Other enhancements
- Handle truncated floats in SAS xport files (:issue:`11713`)
- Added option to hide index in ``Series.to_string`` (:issue:`11729`)
- ``read_excel`` now supports s3 urls of the format ``s3://bucketname/filename`` (:issue:`11447`)
+- A simple version of ``Panel.round()`` is now implemented (:issue:`11763`)
+- For Python 3.x, ``round(DataFrame)``, ``round(Series)``, ``round(Panel)`` will work (:issue:`11763`)
.. _whatsnew_0180.enhancements.rounding:
@@ -90,6 +92,8 @@ In addition, ``.round()`` will be available thru the ``.dt`` accessor of ``Serie
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- The parameter ``out`` has been removed from the ``Series.round()`` method. (:issue:`11763`)
+
Bug in QuarterBegin with n=0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index dd41075ddf92e..9480136e418e9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4376,13 +4376,17 @@ def round(self, decimals=0, out=None):
Returns
-------
DataFrame object
+
+ See Also
+ --------
+ numpy.around
"""
from pandas.tools.merge import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
- yield np.round(vals, decimals[col])
+ yield vals.round(decimals[col])
except KeyError:
yield vals
@@ -4392,8 +4396,8 @@ def _dict_round(df, decimals):
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif com.is_integer(decimals):
- # Dispatch to numpy.round
- new_cols = [np.round(v, decimals) for _, v in self.iteritems()]
+ # Dispatch to Series.round
+ new_cols = [v.round(decimals) for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 725e33083da5d..b75573edc7157 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -749,7 +749,10 @@ def bool(self):
def __abs__(self):
return self.abs()
-
+
+ def __round__(self,decimals=0):
+ return self.round(decimals)
+
#----------------------------------------------------------------------
# Array Interface
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index b57e90509080d..e0d9405a66b75 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -624,6 +624,33 @@ def head(self, n=5):
def tail(self, n=5):
raise NotImplementedError
+
+ def round(self, decimals=0):
+ """
+ Round each value in Panel to a specified number of decimal places.
+
+ .. versionadded:: 0.18.0
+
+ Parameters
+ ----------
+ decimals : int
+ Number of decimal places to round to (default: 0).
+ If decimals is negative, it specifies the number of
+ positions to the left of the decimal point.
+
+ Returns
+ -------
+ Panel object
+
+ See Also
+ --------
+ numpy.around
+ """
+ if com.is_integer(decimals):
+ result = np.apply_along_axis(np.round, 0, self.values)
+ return self._wrap_result(result, axis=0)
+ raise TypeError("decimals must be an integer")
+
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 50616ce61f610..ca55a834a33d2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1235,15 +1235,29 @@ def idxmax(self, axis=None, out=None, skipna=True):
argmin = idxmin
argmax = idxmax
- @Appender(np.ndarray.round.__doc__)
- def round(self, decimals=0, out=None):
+ def round(self, decimals=0):
"""
+ Round each value in a Series to the given number of decimals.
+
+ Parameters
+ ----------
+ decimals : int
+ Number of decimal places to round to (default: 0).
+ If decimals is negative, it specifies the number of
+ positions to the left of the decimal point.
+
+ Returns
+ -------
+ Series object
+
+ See Also
+ --------
+ numpy.around
"""
- result = _values_from_object(self).round(decimals, out=out)
- if out is None:
- result = self._constructor(result,
- index=self.index).__finalize__(self)
+ result = _values_from_object(self).round(decimals)
+ result = self._constructor(result,
+ index=self.index).__finalize__(self)
return result
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 935c85ca3e29d..f2290877676fa 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1,4 +1,4 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
import re
@@ -2969,128 +2969,6 @@ def test_to_csv_engine_kw_deprecation(self):
df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
df.to_csv(engine='python')
- def test_round_dataframe(self):
-
- # GH 2665
-
- # Test that rounding an empty DataFrame does nothing
- df = DataFrame()
- tm.assert_frame_equal(df, df.round())
-
- # Here's the test frame we'll be working with
- df = DataFrame(
- {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
-
- # Default round to integer (i.e. decimals=0)
- expected_rounded = DataFrame(
- {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
- tm.assert_frame_equal(df.round(), expected_rounded)
-
- # Round with an integer
- decimals = 2
- expected_rounded = DataFrame(
- {'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
- tm.assert_frame_equal(df.round(decimals), expected_rounded)
-
- # This should also work with np.round (since np.round dispatches to
- # df.round)
- tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
-
- # Round with a list
- round_list = [1, 2]
- with self.assertRaises(TypeError):
- df.round(round_list)
-
- # Round with a dictionary
- expected_rounded = DataFrame(
- {'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
- round_dict = {'col1': 1, 'col2': 2}
- tm.assert_frame_equal(df.round(round_dict), expected_rounded)
-
- # Incomplete dict
- expected_partially_rounded = DataFrame(
- {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
- partial_round_dict = {'col2': 1}
- tm.assert_frame_equal(
- df.round(partial_round_dict), expected_partially_rounded)
-
- # Dict with unknown elements
- wrong_round_dict = {'col3': 2, 'col2': 1}
- tm.assert_frame_equal(
- df.round(wrong_round_dict), expected_partially_rounded)
-
- # float input to `decimals`
- non_int_round_dict = {'col1': 1, 'col2': 0.5}
- if sys.version < LooseVersion('2.7'):
- # np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
- with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- df.round(non_int_round_dict)
- else:
- with self.assertRaises(TypeError):
- df.round(non_int_round_dict)
-
- # String input
- non_int_round_dict = {'col1': 1, 'col2': 'foo'}
- with self.assertRaises(TypeError):
- df.round(non_int_round_dict)
-
- non_int_round_Series = Series(non_int_round_dict)
- with self.assertRaises(TypeError):
- df.round(non_int_round_Series)
-
- # List input
- non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
- with self.assertRaises(TypeError):
- df.round(non_int_round_dict)
-
- non_int_round_Series = Series(non_int_round_dict)
- with self.assertRaises(TypeError):
- df.round(non_int_round_Series)
-
- # Non integer Series inputs
- non_int_round_Series = Series(non_int_round_dict)
- with self.assertRaises(TypeError):
- df.round(non_int_round_Series)
-
- non_int_round_Series = Series(non_int_round_dict)
- with self.assertRaises(TypeError):
- df.round(non_int_round_Series)
-
- # Negative numbers
- negative_round_dict = {'col1': -1, 'col2': -2}
- big_df = df * 100
- expected_neg_rounded = DataFrame(
- {'col1':[110., 210, 310], 'col2':[100., 200, 300]})
- tm.assert_frame_equal(
- big_df.round(negative_round_dict), expected_neg_rounded)
-
- # nan in Series round
- nan_round_Series = Series({'col1': nan, 'col2':1})
- expected_nan_round = DataFrame(
- {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
- if sys.version < LooseVersion('2.7'):
- # Rounding with decimal is a ValueError in Python < 2.7
- with self.assertRaises(ValueError):
- df.round(nan_round_Series)
- else:
- with self.assertRaises(TypeError):
- df.round(nan_round_Series)
-
- # Make sure this doesn't break existing Series.round
- tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
-
- def test_round_issue(self):
- # GH11611
-
- df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
- index=['first', 'second', 'third'])
-
- dfs = pd.concat((df, df), axis=1)
- rounded = dfs.round()
- self.assertTrue(rounded.index.equals(dfs.index))
-
- decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
- self.assertRaises(ValueError, df.round, decimals)
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 09de3bf4a8046..70548b7c9f42f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -13391,6 +13391,143 @@ def wrapper(x):
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
+ def test_round(self):
+
+ # GH 2665
+
+ # Test that rounding an empty DataFrame does nothing
+ df = DataFrame()
+ tm.assert_frame_equal(df, df.round())
+
+ # Here's the test frame we'll be working with
+ df = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
+
+ # Default round to integer (i.e. decimals=0)
+ expected_rounded = DataFrame(
+ {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
+ tm.assert_frame_equal(df.round(), expected_rounded)
+
+ # Round with an integer
+ decimals = 2
+ expected_rounded = DataFrame(
+ {'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
+ tm.assert_frame_equal(df.round(decimals), expected_rounded)
+
+ # This should also work with np.round (since np.round dispatches to
+ # df.round)
+ tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
+
+ # Round with a list
+ round_list = [1, 2]
+ with self.assertRaises(TypeError):
+ df.round(round_list)
+
+ # Round with a dictionary
+ expected_rounded = DataFrame(
+ {'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
+ round_dict = {'col1': 1, 'col2': 2}
+ tm.assert_frame_equal(df.round(round_dict), expected_rounded)
+
+ # Incomplete dict
+ expected_partially_rounded = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
+ partial_round_dict = {'col2': 1}
+ tm.assert_frame_equal(
+ df.round(partial_round_dict), expected_partially_rounded)
+
+ # Dict with unknown elements
+ wrong_round_dict = {'col3': 2, 'col2': 1}
+ tm.assert_frame_equal(
+ df.round(wrong_round_dict), expected_partially_rounded)
+
+ # float input to `decimals`
+ non_int_round_dict = {'col1': 1, 'col2': 0.5}
+ if sys.version < LooseVersion('2.7'):
+ # np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
+ with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
+ df.round(non_int_round_dict)
+ else:
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ # String input
+ non_int_round_dict = {'col1': 1, 'col2': 'foo'}
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # List input
+ non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Non integer Series inputs
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Negative numbers
+ negative_round_dict = {'col1': -1, 'col2': -2}
+ big_df = df * 100
+ expected_neg_rounded = DataFrame(
+ {'col1':[110., 210, 310], 'col2':[100., 200, 300]})
+ tm.assert_frame_equal(
+ big_df.round(negative_round_dict), expected_neg_rounded)
+
+ # nan in Series round
+ nan_round_Series = Series({'col1': nan, 'col2':1})
+ expected_nan_round = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
+ if sys.version < LooseVersion('2.7'):
+ # Rounding with decimal is a ValueError in Python < 2.7
+ with self.assertRaises(ValueError):
+ df.round(nan_round_Series)
+ else:
+ with self.assertRaises(TypeError):
+ df.round(nan_round_Series)
+
+ # Make sure this doesn't break existing Series.round
+ tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
+
+ def test_round_issue(self):
+ # GH11611
+
+ df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
+ index=['first', 'second', 'third'])
+
+ dfs = pd.concat((df, df), axis=1)
+ rounded = dfs.round()
+ self.assertTrue(rounded.index.equals(dfs.index))
+
+ decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
+ self.assertRaises(ValueError, df.round, decimals)
+
+ def test_built_in_round(self):
+ if not compat.PY3:
+ raise nose.SkipTest('build in round cannot be overriden prior to Python 3')
+
+ # GH11763
+ # Here's the test frame we'll be working with
+ df = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
+
+ # Default round to integer (i.e. decimals=0)
+ expected_rounded = DataFrame(
+ {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
+ tm.assert_frame_equal(round(df), expected_rounded)
+
def test_quantile(self):
from numpy import percentile
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 1f8bcf8c9879f..f12d851a6772d 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1901,6 +1901,22 @@ def test_pct_change(self):
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2./3, .5, 1./3]})})
assert_panel_equal(result, expected)
+
+ def test_round(self):
+ values = [[[-3.2,2.2],[0,-4.8213],[3.123,123.12],
+ [-1566.213,88.88],[-12,94.5]],
+ [[-5.82,3.5],[6.21,-73.272], [-9.087,23.12],
+ [272.212,-99.99],[23,-76.5]]]
+ evalues = [[[float(np.around(i)) for i in j] for j in k] for k in values]
+ p = Panel(values, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A','B'])
+ expected = Panel(evalues, items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A','B'])
+ result = p.round()
+ self.assert_panel_equal(expected, result)
+
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index bbdd7c3637981..34d2d0de35977 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1,4 +1,4 @@
-# coding=utf-8
+# coding=utf-8
# pylint: disable-msg=E1101,W0612
import re
@@ -3003,11 +3003,26 @@ def _check_accum_op(self, name):
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
- result = np.round(self.ts, 2)
+ result = self.ts.round(2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index,
name='ts')
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
+
+ def test_built_in_round(self):
+ if not compat.PY3:
+ raise nose.SkipTest('build in round cannot be overriden prior to Python 3')
+
+ s = Series([1.123, 2.123, 3.123], index=lrange(3))
+ result = round(s)
+ expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
+ self.assert_series_equal(result, expected_rounded0)
+
+ decimals = 2
+ expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
+ result = round(s, decimals)
+ self.assert_series_equal(result, expected_rounded)
+
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
| closes #11763
This contains changes to support `round()` (works in Python 3 only) for DataFrame, Series and Panel. Also includes `Panel.round()`. Note on code changes - moved code from test_format.py to test_frame.py because it made more logical sense to put the testing code for `round()` into test_frame.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/11809 | 2015-12-09T23:17:21Z | 2015-12-15T22:51:56Z | 2015-12-15T22:51:56Z | 2016-12-09T16:42:35Z |
be more careful with half-opened date_range | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index de5439885a6a7..5b8c282d3e4ed 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -190,6 +190,7 @@ Bug Fixes
- Bug in ``GroupBy.size`` when data-frame is empty. (:issue:`11699`)
- Bug in ``Period.end_time`` when a multiple of time period is requested (:issue:`11738`)
- Regression in ``.clip`` with tz-aware datetimes (:issue:`11838`)
+- Bug in ``date_range`` when the boundaries fell on the frequency (:issue:`11804`)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 14acfb57afe56..091c6245e346d 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -486,10 +486,9 @@ def _generate(cls, start, end, periods, name, offset,
index = index.view(_NS_DTYPE)
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
-
- if not left_closed:
+ if not left_closed and len(index) and index[0] == start:
index = index[1:]
- if not right_closed:
+ if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
return index
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 42136c3433977..00336615aeab4 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -477,13 +477,37 @@ def test_range_closed(self):
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
+ expected_left = left
+ expected_right = right
- expected_left = closed[:-1]
- expected_right = closed[1:]
+ if end == closed[-1]:
+ expected_left = closed[:-1]
+ if begin == closed[0]:
+ expected_right = closed[1:]
self.assertTrue(expected_left.equals(left))
self.assertTrue(expected_right.equals(right))
+ def test_range_closed_boundary(self):
+ # GH 11804
+ for closed in ['right', 'left', None]:
+ right_boundary = date_range('2015-09-12', '2015-12-01', freq='QS-MAR', closed=closed)
+ left_boundary = date_range('2015-09-01', '2015-09-12', freq='QS-MAR', closed=closed)
+ both_boundary = date_range('2015-09-01', '2015-12-01', freq='QS-MAR', closed=closed)
+ expected_right = expected_left = expected_both = both_boundary
+
+ if closed == 'right':
+ expected_left = both_boundary[1:]
+ if closed == 'left':
+ expected_right = both_boundary[:-1]
+ if closed is None:
+ expected_right = both_boundary[1:]
+ expected_left = both_boundary[:-1]
+
+ self.assertTrue(right_boundary.equals(expected_right))
+ self.assertTrue(left_boundary.equals(expected_left))
+ self.assertTrue(both_boundary.equals(expected_both))
+
def test_years_only(self):
# GH 6961
dr = date_range('2014', '2015', freq='M')
| closes #11804
This changes the definition of 'left' and 'right'. If the generated_dates are strictly within (start, end), then changing the value of closed has no impact. Before the code would always remove the leftmost date or rightmost date independently of the value of start and end.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11806 | 2015-12-09T18:54:09Z | 2015-12-17T22:43:13Z | 2015-12-17T22:43:13Z | 2015-12-18T19:33:16Z |
Fix import-time DeprecationWarning on Python 3.5 | diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index ad34fc4b41d95..84d0cc61be8e6 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4773,8 +4773,8 @@ class TimeRE(dict):
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
- whitespace_replacement = re_compile('\s+')
- format = whitespace_replacement.sub('\s+', format)
+ whitespace_replacement = re_compile(r'\s+')
+ format = whitespace_replacement.sub(r'\\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
| Fixes the following:
```
$ python3.5 -Wall
Python 3.5.0 (default, Sep 18 2015, 09:31:01)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-11)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas.tslib
pandas/__init__.py:7: DeprecationWarning: bad escape \s
from pandas import hashtable, tslib, lib
```
In Python 3.5 re.sub replacement patterns containing unrecognized character escapes are deprecated. This raw string format is needed to substitute `\s` literally.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11798 | 2015-12-08T22:57:18Z | 2015-12-10T01:30:31Z | 2015-12-10T01:30:31Z | 2015-12-10T17:04:35Z |
BUG: bug in deep copy of datetime tz-aware objects, #11794 | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index c1b7ff82f4c76..94ca376df8436 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -170,7 +170,7 @@ Bug Fixes
- Bug in ``Timedelta.round`` with negative values (:issue:`11690`)
- Bug in ``.loc`` against ``CategoricalIndex`` may result in normal ``Index`` (:issue:`11586`)
- Bug in ``DataFrame.info`` when duplicated column names exist (:issue:`11761`)
-
+- Bug in ``.copy`` of datetime tz-aware objects (:issue:`11794`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index b3a42335e14da..e81b58a3f7eef 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -232,7 +232,7 @@ def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
- if dtype.kind in ('O', 'S', 'U'):
+ if is_string_dtype(dtype):
if is_categorical_dtype(values):
from pandas import Categorical
if not isinstance(values, Categorical):
@@ -243,7 +243,7 @@ def _isnull_ndarraylike(obj):
# Working around NumPy ticket 1542
shape = values.shape
- if dtype.kind in ('S', 'U'):
+ if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
@@ -267,11 +267,11 @@ def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
- if dtype.kind in ('O', 'S', 'U'):
+ if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
- if values.dtype.kind in ('S', 'U'):
+ if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
@@ -2208,13 +2208,17 @@ def is_numeric_v_string_like(a, b):
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
+ is_a_string_array = is_a_array and is_string_like_dtype(a)
+ is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
return (
is_a_numeric_array and is_b_scalar_string_like) or (
- is_b_numeric_array and is_a_scalar_string_like
+ is_b_numeric_array and is_a_scalar_string_like) or (
+ is_a_numeric_array and is_b_string_array) or (
+ is_b_numeric_array and is_a_string_array
)
def is_datetimelike_v_numeric(a, b):
@@ -2257,6 +2261,15 @@ def is_numeric_dtype(arr_or_dtype):
and not issubclass(tipo, (np.datetime64, np.timedelta64)))
+def is_string_dtype(arr_or_dtype):
+ dtype = _get_dtype(arr_or_dtype)
+ return dtype.kind in ('O', 'S', 'U')
+
+def is_string_like_dtype(arr_or_dtype):
+ # exclude object as its a mixed dtype
+ dtype = _get_dtype(arr_or_dtype)
+ return dtype.kind in ('S', 'U')
+
def is_float_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
diff --git a/pandas/core/dtypes.py b/pandas/core/dtypes.py
index 0b13471aadcfb..69957299aa9bb 100644
--- a/pandas/core/dtypes.py
+++ b/pandas/core/dtypes.py
@@ -65,6 +65,9 @@ def __hash__(self):
def __eq__(self, other):
raise NotImplementedError("sub-classes should implement an __eq__ method")
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
@classmethod
def is_dtype(cls, dtype):
""" Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 28c845f63f9d4..efcb6eb818087 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -168,17 +168,11 @@ def make_block(self, values, placement=None, ndim=None, **kwargs):
return make_block(values, placement=placement, ndim=ndim, **kwargs)
- def make_block_same_class(self, values, placement, copy=False, fastpath=True,
- **kwargs):
- """
- Wrap given values in a block of same type as self.
-
- `kwargs` are used in SparseBlock override.
-
- """
- if copy:
- values = values.copy()
- return make_block(values, placement, klass=self.__class__,
+ def make_block_same_class(self, values, placement=None, fastpath=True, **kwargs):
+ """ Wrap given values in a block of same type as self. """
+ if placement is None:
+ placement = self.mgr_locs
+ return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
@@ -573,12 +567,11 @@ def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs):
# block actions ####
def copy(self, deep=True, mgr=None):
+ """ copy constructor """
values = self.values
if deep:
values = values.copy()
- return self.make_block(values,
- klass=self.__class__,
- fastpath=True)
+ return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
@@ -2140,6 +2133,13 @@ def __init__(self, values, placement, ndim=2,
placement=placement,
ndim=ndim,
**kwargs)
+ def copy(self, deep=True, mgr=None):
+ """ copy constructor """
+ values = self.values
+ if deep:
+ values = values.copy(deep=True)
+ return self.make_block_same_class(values)
+
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for external
compat with ndarray, export as a ndarray of Timestamps """
@@ -3257,10 +3257,14 @@ def get_scalar(self, tup):
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
- full_loc[0] = self._blklocs[full_loc[0]]
+ values = blk.values
# FIXME: this may return non-upcasted types?
- return blk.values[tuple(full_loc)]
+ if values.ndim == 1:
+ return values[full_loc[1]]
+
+ full_loc[0] = self._blklocs[full_loc[0]]
+ return values[tuple(full_loc)]
def delete(self, item):
"""
@@ -4415,11 +4419,14 @@ def _putmask_smart(v, m, n):
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
- comp = (nn == nn_at)
- if is_list_like(comp) and comp.all():
- nv = v.copy()
- nv[m] = nn_at
- return nv
+
+ # avoid invalid dtype comparisons
+ if not is_numeric_v_string_like(nn, nn_at):
+ comp = (nn == nn_at)
+ if is_list_like(comp) and comp.all():
+ nv = v.copy()
+ nv[m] = nn_at
+ return nv
except (ValueError, IndexError, TypeError):
pass
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index fbab0d2a92203..0f46c1106ed08 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -147,6 +147,8 @@ def create_mgr(descr, item_shape=None):
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
+ if not len(d):
+ continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
@@ -324,7 +326,8 @@ class TestBlockManager(tm.TestCase):
def setUp(self):
self.mgr = create_mgr('a: f8; b: object; c: f8; d: object; e: f8;'
- 'f: bool; g: i8; h: complex')
+ 'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
+ 'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def test_constructor_corner(self):
pass
@@ -476,16 +479,24 @@ def test_set_change_dtype_slice(self): # GH8850
DataFrame([[3], [6]], columns=cols[2:]))
def test_copy(self):
- shallow = self.mgr.copy(deep=False)
-
- # we don't guaranteee block ordering
- for blk in self.mgr.blocks:
- found = False
- for cp_blk in shallow.blocks:
- if cp_blk.values is blk.values:
- found = True
- break
- self.assertTrue(found)
+ cp = self.mgr.copy(deep=False)
+ for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
+
+ # view assertion
+ self.assertTrue(cp_blk.equals(blk))
+ self.assertTrue(cp_blk.values.base is blk.values.base)
+
+ cp = self.mgr.copy(deep=True)
+ for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
+
+ # copy assertion
+ # we either have a None for a base or in case of some blocks it is an array (e.g. datetimetz),
+ # but was copied
+ self.assertTrue(cp_blk.equals(blk))
+ if cp_blk.values.base is not None and blk.values.base is not None:
+ self.assertFalse(cp_blk.values.base is blk.values.base)
+ else:
+ self.assertTrue(cp_blk.values.base is None and blk.values.base is None)
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
@@ -688,7 +699,10 @@ def test_consolidate_ordering_issues(self):
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
+ # we have datetime/tz blocks in self.mgr
cons = self.mgr.consolidate()
+ self.assertEqual(cons.nblocks, 4)
+ cons = self.mgr.consolidate().get_numeric_data()
self.assertEqual(cons.nblocks, 1)
assert_almost_equal(cons.blocks[0].mgr_locs,
np.arange(len(cons.items)))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 0fb66ee2dfa7c..bbdd7c3637981 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5111,12 +5111,50 @@ def test_cov(self):
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_copy(self):
- ts = self.ts.copy()
- ts[::2] = np.NaN
+ for deep in [None, False, True]:
+ s = Series(np.arange(10),dtype='float64')
+
+ # default deep is True
+ if deep is None:
+ s2 = s.copy()
+ else:
+ s2 = s.copy(deep=deep)
+
+ s2[::2] = np.NaN
+
+ if deep is None or deep is True:
+ # Did not modify original Series
+ self.assertTrue(np.isnan(s2[0]))
+ self.assertFalse(np.isnan(s[0]))
+ else:
- # Did not modify original Series
- self.assertFalse(np.isnan(self.ts[0]))
+ # we DID modify the original Series
+ self.assertTrue(np.isnan(s2[0]))
+ self.assertTrue(np.isnan(s[0]))
+
+ # GH 11794
+ # copy of tz-aware
+ expected = Series([Timestamp('2012/01/01', tz='UTC')])
+ expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
+
+ for deep in [None, False, True]:
+ s = Series([Timestamp('2012/01/01', tz='UTC')])
+
+ if deep is None:
+ s2 = s.copy()
+ else:
+ s2 = s.copy(deep=deep)
+
+ s2[0] = pd.Timestamp('1999/01/01', tz='UTC')
+
+ # default deep is True
+ if deep is None or deep is True:
+ assert_series_equal(s, expected)
+ assert_series_equal(s2, expected2)
+ else:
+ assert_series_equal(s, expected2)
+ assert_series_equal(s2, expected2)
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
| closes #11794
cleanups in copy / remove warnings
| https://api.github.com/repos/pandas-dev/pandas/pulls/11796 | 2015-12-08T14:42:16Z | 2015-12-08T20:14:58Z | 2015-12-08T20:14:58Z | 2015-12-08T20:14:58Z |
DOC: Add examples for MultiIndex.get_locs + cleanups | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index d79937829cf3f..61e28dde2e34c 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -229,7 +229,7 @@ class Categorical(PandasObject):
See also
--------
- pandas.api.types.CategoricalDtype
+ pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 35f738b347a3e..9ffac0832062d 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -72,8 +72,8 @@ class MultiIndex(Index):
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
- methods :meth:`MultiIndex.from_arrays``, :meth:`MultiIndex.from_product``
- and :meth:`MultiIndex.from_tuples``. For example (using ``.from_arrays``):
+ methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
+ and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
@@ -1982,33 +1982,41 @@ def _partial_tup_index(self, tup, side='left'):
def get_loc(self, key, method=None):
"""
- Get integer location, slice or boolean mask for requested label or
- tuple. If the key is past the lexsort depth, the return may be a
- boolean mask array, otherwise it is always a slice or int.
+ Get location for a label or a tuple of labels as an integer, slice or
+ boolean mask.
Parameters
----------
- key : label or tuple
+ key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
+ If the key is past the lexsort depth, the return may be a
+ boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
+
>>> mi.get_loc('b')
slice(1, 3, None)
+
>>> mi.get_loc(('b', 'e'))
1
+ Notes
+ ------
+ The key cannot be a slice, list of same-level labels, a boolean mask,
+ or a sequence of such. If you want to use those, use
+ :meth:`MultiIndex.get_locs` instead.
+
See also
--------
Index.get_loc : get_loc method for (single-level) index.
- get_locs : Given a tuple of slices/lists/labels/boolean indexer to a
- level-wise spec, produce an indexer to extract those
- locations.
+ MultiIndex.get_locs : Get location for a label/slice/list/mask or a
+ sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
@@ -2117,8 +2125,9 @@ def get_loc_level(self, key, level=0, drop_level=True):
See Also
---------
- MultiIndex.get_loc : Get integer location, slice or boolean mask for
- requested label or tuple.
+ MultiIndex.get_loc : Get location for a label or a tuple of labels.
+ MultiIndex.get_locs : Get location for a label/slice/list/mask or a
+ sequence of such
"""
def maybe_droplevels(indexer, levels, drop_level):
@@ -2328,23 +2337,41 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
j = labels.searchsorted(loc, side='right')
return slice(i, j)
- def get_locs(self, tup):
+ def get_locs(self, seq):
"""
- Given a tuple of slices/lists/labels/boolean indexer to a level-wise
- spec produce an indexer to extract those locations
+ Get location for a given label/slice/list/mask or a sequence of such as
+ an array of integers.
Parameters
----------
- key : tuple of (slices/list/labels)
+ seq : label/slice/list/mask or a sequence of such
+ You should use one of the above for each level.
+ If a level should not be used, set it to ``slice(None)``.
Returns
-------
- locs : integer list of locations or boolean indexer suitable
- for passing to iloc
+ locs : array of integers suitable for passing to iloc
+
+ Examples
+ ---------
+ >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
+
+ >>> mi.get_locs('b')
+ array([1, 2], dtype=int64)
+
+ >>> mi.get_locs([slice(None), ['e', 'f']])
+ array([1, 2], dtype=int64)
+
+ >>> mi.get_locs([[True, False, True], slice('e', 'f')])
+ array([2], dtype=int64)
+
+ See also
+ --------
+ MultiIndex.get_loc : Get location for a label or a tuple of labels.
"""
# must be lexsorted to at least as many levels
- true_slices = [i for (i, s) in enumerate(is_true_slices(tup)) if s]
+ true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
@@ -2377,7 +2404,7 @@ def _update_indexer(idxr, indexer=indexer):
return indexer
return indexer & idxr
- for i, k in enumerate(tup):
+ for i, k in enumerate(seq):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
| - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Adds examples to ``MultiIndex.get_locs`` and also some changes to ``.get_loc`` and ``.get_loc_values``. | https://api.github.com/repos/pandas-dev/pandas/pulls/17675 | 2017-09-25T22:33:48Z | 2017-09-30T15:25:58Z | 2017-09-30T15:25:58Z | 2017-10-09T21:00:07Z |
Fixed Value Error when doing HDFStore.Select of contiguous mixed-data | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 06f19782682b0..da615c1176cd1 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -580,6 +580,7 @@ I/O
- Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`)
- Bug in :func:`read_csv` where automatic delimiter detection caused a ``TypeError`` to be thrown when a bad line was encountered rather than the correct error message (:issue:`13374`)
- Bug in ``DataFrame.to_html()`` with ``notebook=True`` where DataFrames with named indices or non-MultiIndex indices had undesired horizontal or vertical alignment for column or row labels, respectively (:issue:`16792`)
+- Bug in :func:`HDFStore.select` when reading a contiguous mixed-data table featuring VLArray (:issue:`17021`)
Plotting
^^^^^^^^
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4d300b200971a..ea69116ec363d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2441,13 +2441,12 @@ def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
- data = node[start:stop]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
- ret = data[0]
+ ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
@@ -2456,7 +2455,7 @@ def read_array(self, key, start=None, stop=None):
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
- ret = data
+ ret = node[start:stop]
if dtype == u('datetime64'):
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index ff21afc11d220..ae8f7221d48ac 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -4387,6 +4387,19 @@ def test_path_pathlib(self):
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
+ @pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
+ def test_contiguous_mixed_data_table(self, start, stop):
+ # GH 17021
+ # ValueError when reading a contiguous mixed-data table ft. VLArray
+ df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
+ 'b': Series(['ab', 'cd', 'ab'])})
+
+ with ensure_clean_store(self.path) as store:
+ store.append('test_dataset', df)
+
+ result = store.select('test_dataset', start=start, stop=stop)
+ assert_frame_equal(df[start:stop], result)
+
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
| Fixed Value Error when doing HDFStore.Select of contiguous mixed-data table ft. VLArray
Closes 17021
Signed-off-by: Amol Kahat <akahat@redhat.com>
- [x] closes #17021
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/17670 | 2017-09-25T18:50:38Z | 2017-09-28T14:12:51Z | 2017-09-28T14:12:50Z | 2017-09-28T14:12:53Z |
Last of the timezones funcs | diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
index e5d1343e1c984..95e0474b3a174 100644
--- a/pandas/_libs/tslibs/timezones.pxd
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -1,8 +1,6 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-from numpy cimport ndarray
-
cdef bint is_utc(object tz)
cdef bint is_tzlocal(object tz)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 48d82996a0bd0..7f778dde86e23 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -1,5 +1,8 @@
# -*- coding: utf-8 -*-
# cython: profile=False
+# cython: linetrace=False
+# distutils: define_macros=CYTHON_TRACE=0
+# distutils: define_macros=CYTHON_TRACE_NOGIL=0
cimport cython
from cython cimport Py_ssize_t
@@ -275,3 +278,19 @@ cdef object get_dst_info(object tz):
dst_cache[cache_key] = (trans, deltas, typ)
return dst_cache[cache_key]
+
+
+def infer_tzinfo(start, end):
+ if start is not None and end is not None:
+ tz = start.tzinfo
+ if end.tzinfo:
+ if not (get_timezone(tz) == get_timezone(end.tzinfo)):
+ msg = 'Inputs must both have the same timezone, {tz1} != {tz2}'
+ raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo))
+ elif start is not None:
+ tz = start.tzinfo
+ elif end is not None:
+ tz = end.tzinfo
+ else:
+ tz = None
+ return tz
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 39dc24642235b..9127864eab8a1 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -443,7 +443,7 @@ def _generate(cls, start, end, periods, name, offset,
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
- inferred_tz = tools._infer_tzinfo(start, end)
+ inferred_tz = timezones.infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 97ac8445faf4c..4f7f14cabee97 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -4,7 +4,6 @@
from pandas._libs import tslib
from pandas._libs.tslibs.strptime import array_strptime
-from pandas._libs.tslibs.timezones import get_timezone
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import ( # noqa
parse_time_string,
@@ -29,24 +28,6 @@
from pandas.core import algorithms
-def _infer_tzinfo(start, end):
- def _infer(a, b):
- tz = a.tzinfo
- if b and b.tzinfo:
- if not (get_timezone(tz) == get_timezone(b.tzinfo)):
- raise AssertionError('Inputs must both have the same timezone,'
- ' {timezone1} != {timezone2}'
- .format(timezone1=tz, timezone2=b.tzinfo))
- return tz
-
- tz = None
- if start is not None:
- tz = _infer(start, end)
- elif end is not None:
- tz = _infer(end, start)
- return tz
-
-
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index e7b470e01e2af..aa8fe90ea6500 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -12,7 +12,6 @@
from datetime import datetime, timedelta, tzinfo, date
import pandas.util.testing as tm
-import pandas.core.tools.datetimes as tools
import pandas.tseries.offsets as offsets
from pandas.compat import lrange, zip
from pandas.core.indexes.datetimes import bdate_range, date_range
@@ -646,20 +645,20 @@ def test_infer_tz(self):
start = self.localize(eastern, _start)
end = self.localize(eastern, _end)
- assert (tools._infer_tzinfo(start, end) is self.localize(
- eastern, _start).tzinfo)
- assert (tools._infer_tzinfo(start, None) is self.localize(
- eastern, _start).tzinfo)
- assert (tools._infer_tzinfo(None, end) is self.localize(eastern,
- _end).tzinfo)
+ assert (timezones.infer_tzinfo(start, end) is
+ self.localize(eastern, _start).tzinfo)
+ assert (timezones.infer_tzinfo(start, None) is
+ self.localize(eastern, _start).tzinfo)
+ assert (timezones.infer_tzinfo(None, end) is
+ self.localize(eastern, _end).tzinfo)
start = utc.localize(_start)
end = utc.localize(_end)
- assert (tools._infer_tzinfo(start, end) is utc)
+ assert (timezones.infer_tzinfo(start, end) is utc)
end = self.localize(eastern, _end)
- pytest.raises(Exception, tools._infer_tzinfo, start, end)
- pytest.raises(Exception, tools._infer_tzinfo, end, start)
+ pytest.raises(Exception, timezones.infer_tzinfo, start, end)
+ pytest.raises(Exception, timezones.infer_tzinfo, end, start)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10,
| Moves `tools.datetimes._infer_tzinfo` to `tslibs.timezones._infer_tzinfo`.
Refactor a large timezone-specific chunk of `tslib.tz_localize_to_utc` to `tslibs.timezones._infer_dst`
The new func `timezones._infer_dst` is taken out of `tslib.tz_localize_to_utc`. It is very nearly a cut/paste. The only change is removed a couple calls to `Timestamp` that are used for rendering an exception message, used np.datetime64 in its place.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17669 | 2017-09-25T18:12:51Z | 2017-09-29T10:16:36Z | 2017-09-29T10:16:36Z | 2017-10-30T16:23:13Z |
DOC: improve docstring of function where | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3d55e07df6eac..8e2a91ee9fd61 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5822,13 +5822,15 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
_shared_docs['where'] = ("""
Return an object of same shape as self and whose corresponding
- entries are from self where cond is %(cond)s and otherwise are from
- other.
+ entries are from self where `cond` is %(cond)s and otherwise are from
+ `other`.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
- If cond is callable, it is computed on the %(klass)s and
+ Where `cond` is %(cond)s, keep the original value. Where
+ %(cond_rev)s, replace with corresponding value from `other`.
+ If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
@@ -5836,6 +5838,8 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
A callable can be used as cond.
other : scalar, %(klass)s, or callable
+ Entries where `cond` is %(cond_rev)s are replaced with
+ corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
@@ -5881,6 +5885,20 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
3 3.0
4 4.0
+ >>> s.mask(s > 0)
+ 0 0.0
+ 1 NaN
+ 2 NaN
+ 3 NaN
+ 4 NaN
+
+ >>> s.where(s > 1, 10)
+ 0 10.0
+ 1 10.0
+ 2 2.0
+ 3 3.0
+ 4 4.0
+
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> m = df %% 3 == 0
>>> df.where(m, -df)
@@ -5911,7 +5929,8 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
""")
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True",
- name='where', name_other='mask'))
+ cond_rev="False", name='where',
+ name_other='mask'))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
try_cast=False, raise_on_error=True):
@@ -5920,7 +5939,8 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
raise_on_error)
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False",
- name='mask', name_other='where'))
+ cond_rev="True", name='mask',
+ name_other='where'))
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
try_cast=False, raise_on_error=True):
| Clarified parameters `cond` and `other`.
Added example with scalar `other`. | https://api.github.com/repos/pandas-dev/pandas/pulls/17665 | 2017-09-25T12:01:08Z | 2017-09-25T23:19:22Z | 2017-09-25T23:19:22Z | 2017-09-25T23:19:26Z |
COMPAT: skip 32-bit test on int repr | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index b3209da6449d6..230a5806ccb2e 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -213,8 +213,8 @@ def test_itertuples(self):
assert (list(dfaa.itertuples()) ==
[(0, 1, 1), (1, 2, 2), (2, 3, 3)])
- # repr with be int/long on windows
- if not compat.is_platform_windows():
+ # repr with be int/long on 32-bit/windows
+ if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (repr(list(df.itertuples(name=None))) ==
'[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
| closes #17121
| https://api.github.com/repos/pandas-dev/pandas/pulls/17664 | 2017-09-25T11:10:45Z | 2017-09-25T14:12:46Z | 2017-09-25T14:12:46Z | 2017-09-25T14:21:23Z |
DOC: Correct wrong doc string for MultiIndex.get_loc_level + added examples | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 0b7c5f414b178..a3accb58cad19 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2056,16 +2056,42 @@ def _maybe_str_to_time_stamp(key, lev):
def get_loc_level(self, key, level=0, drop_level=True):
"""
- Get integer location slice for requested label or tuple
+ Get both the location for the requested label(s) and the
+ resulting sliced index.
Parameters
----------
- key : label or tuple
- level : int/level name or list thereof
+ key : label or sequence of labels
+ level : int/level name or list thereof, optional
+ drop_level : bool, default True
+ if ``False``, the resulting index will not drop any level.
Returns
-------
- loc : int or slice object
+ loc : A 2-tuple where the elements are:
+ Element 0: int, slice object or boolean array
+ Element 1: The resulting sliced multiindex/index. If the key
+ contains all levels, this will be ``None``.
+
+ Examples
+ --------
+ >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
+ ... names=['A', 'B'])
+
+ >>> mi.get_loc_level('b')
+ (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
+
+ >>> mi.get_loc_level('e', level='B')
+ (array([False, True, False], dtype=bool),
+ Index(['b'], dtype='object', name='A'))
+
+ >>> mi.get_loc_level(['b', 'e'])
+ (1, None)
+
+ See Also
+ ---------
+ MultiIndex.get_loc : Get integer location, slice or boolean mask for
+ requested label or tuple.
"""
def maybe_droplevels(indexer, levels, drop_level):
| - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The doc string for MultiIndex.get_loc_level is currently wrong. This corrects that + adds some examples. | https://api.github.com/repos/pandas-dev/pandas/pulls/17663 | 2017-09-25T08:56:40Z | 2017-09-25T11:54:03Z | 2017-09-25T11:54:03Z | 2017-09-26T09:39:43Z |
CLN: replace %s syntax with .format in io | diff --git a/pandas/io/clipboard/exceptions.py b/pandas/io/clipboard/exceptions.py
index 413518e53660a..d948ad414327c 100644
--- a/pandas/io/clipboard/exceptions.py
+++ b/pandas/io/clipboard/exceptions.py
@@ -8,5 +8,5 @@ class PyperclipException(RuntimeError):
class PyperclipWindowsException(PyperclipException):
def __init__(self, message):
- message += " (%s)" % ctypes.WinError()
+ message += " ({err})".format(err=ctypes.WinError())
super(PyperclipWindowsException, self).__init__(message)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index faafdba435ff2..afecd76c498ef 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -165,7 +165,7 @@ def register_writer(klass):
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
- config.register_option("io.excel.%s.writer" % ext,
+ config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
@@ -190,7 +190,8 @@ def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
- raise ValueError("No Excel writer '%s'" % engine_name)
+ raise ValueError("No Excel writer '{engine}'"
+ .format(engine=engine_name))
@Appender(_read_excel_doc)
@@ -259,7 +260,7 @@ def __init__(self, io, **kwds):
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
- raise ValueError("Unknown engine: %s" % engine)
+ raise ValueError("Unknown engine: {engine}".format(engine=engine))
# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
@@ -445,7 +446,7 @@ def _parse_cell(cell_contents, cell_typ):
for asheetname in sheets:
if verbose:
- print("Reading sheet %s" % asheetname)
+ print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
@@ -634,7 +635,7 @@ def _conv_value(val):
elif is_bool(val):
val = bool(val)
elif isinstance(val, Period):
- val = "%s" % val
+ val = "{val}".format(val=val)
elif is_list_like(val):
val = str(val)
@@ -697,9 +698,11 @@ def __new__(cls, path, engine=None, **kwargs):
ext = 'xlsx'
try:
- engine = config.get_option('io.excel.%s.writer' % ext)
+ engine = config.get_option('io.excel.{ext}.writer'
+ .format(ext=ext))
except KeyError:
- error = ValueError("No engine for filetype: '%s'" % ext)
+ error = ValueError("No engine for filetype: '{ext}'"
+ .format(ext=ext))
raise error
cls = get_writer(engine)
@@ -787,8 +790,9 @@ def check_extension(cls, ext):
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
- msg = (u("Invalid extension for engine '%s': '%s'") %
- (pprint_thing(cls.engine), pprint_thing(ext)))
+ msg = (u("Invalid extension for engine '{engine}': '{ext}'")
+ .format(engine=pprint_thing(cls.engine),
+ ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
@@ -813,8 +817,8 @@ class _Openpyxl1Writer(ExcelWriter):
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
- 'time. Use {0}.x.y.'
- .format(self.openpyxl_majorver))
+ 'time. Use {majorver}.x.y.'
+ .format(majorver=self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
@@ -854,7 +858,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
- xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
+ xcell = wks.cell("{col}{row}".format(col=colletter,
+ row=startrow + cell.row + 1))
if (isinstance(cell.val, compat.string_types) and
xcell.data_type_for_value(cell.val) != xcell.TYPE_STRING):
xcell.set_value_explicit(cell.val)
@@ -876,10 +881,12 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
- wks.merge_cells('%s%s:%s%s' % (cletterstart,
- startrow + cell.row + 1,
- cletterend,
- startrow + cell.mergestart + 1))
+ wks.merge_cells('{start}{row}:{end}{mergestart}'
+ .format(start=cletterstart,
+ row=startrow + cell.row + 1,
+ end=cletterend,
+ mergestart=startrow +
+ cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
@@ -895,7 +902,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
- xcell = wks.cell("%s%s" % (colletter, row))
+ xcell = wks.cell("{col}{row}"
+ .format(col=colletter, row=row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
@@ -955,7 +963,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
- xcell = wks["%s%s" % (colletter, startrow + cell.row + 1)]
+ xcell = wks["{col}{row}"
+ .format(col=colletter, row=startrow + cell.row + 1)]
xcell.value = _conv_value(cell.val)
style_kwargs = {}
@@ -977,10 +986,12 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
- wks.merge_cells('%s%s:%s%s' % (cletterstart,
- startrow + cell.row + 1,
- cletterend,
- startrow + cell.mergestart + 1))
+ wks.merge_cells('{start}{row}:{end}{mergestart}'
+ .format(start=cletterstart,
+ row=startrow + cell.row + 1,
+ end=cletterend,
+ mergestart=startrow +
+ cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
@@ -996,7 +1007,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
- xcell = wks["%s%s" % (colletter, row)]
+ xcell = wks["{col}{row}"
+ .format(col=colletter, row=row)]
xcell.style = xcell.style.copy(**style_kwargs)
@classmethod
@@ -1030,7 +1042,7 @@ def _convert_to_style_kwargs(cls, style_dict):
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
- _conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
+ _conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
@@ -1505,17 +1517,19 @@ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
"""
if hasattr(item, 'items'):
if firstlevel:
- it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
+ it = ["{key}: {val}"
+ .format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
- out = "%s " % (line_sep).join(it)
+ out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
- it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
+ it = ["{key} {val}"
+ .format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
- out = "%s " % (field_sep).join(it)
+ out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
- item = "%s" % item
+ item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
diff --git a/pandas/io/html.py b/pandas/io/html.py
index a4acb26af5259..b5aaffcf710c2 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -439,14 +439,15 @@ def _parse_tables(self, doc, match, attrs):
unique_tables.add(table)
if not result:
- raise ValueError("No tables found matching pattern %r" %
- match.pattern)
+ raise ValueError("No tables found matching pattern {patt!r}"
+ .format(patt=match.pattern))
return result
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
- raise ValueError('No text parsed from document: %s' % self.io)
+ raise ValueError('No text parsed from document: {doc}'
+ .format(doc=self.io))
return raw_text
def _build_doc(self):
@@ -473,8 +474,8 @@ def _build_xpath_expr(attrs):
if 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
- s = [u("@%s=%r") % (k, v) for k, v in iteritems(attrs)]
- return u('[%s]') % ' and '.join(s)
+ s = [u("@{key}={val!r}").format(key=k, val=v) for k, v in iteritems(attrs)]
+ return u('[{expr}]').format(expr=' and '.join(s))
_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
@@ -517,8 +518,8 @@ def _parse_tables(self, doc, match, kwargs):
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
- query = '//table//*[re:test(text(), %r)]/ancestor::table'
- xpath_expr = u(query) % pattern
+ query = '//table//*[re:test(text(), {patt!r})]/ancestor::table'
+ xpath_expr = u(query).format(patt=pattern)
# if any table attributes were given build an xpath expression to
# search for them
@@ -528,7 +529,8 @@ def _parse_tables(self, doc, match, kwargs):
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
if not tables:
- raise ValueError("No tables found matching regex %r" % pattern)
+ raise ValueError("No tables found matching regex {patt!r}"
+ .format(patt=pattern))
return tables
def _build_doc(self):
@@ -574,8 +576,9 @@ def _build_doc(self):
scheme = parse_url(self.io).scheme
if scheme not in _valid_schemes:
# lxml can't parse it
- msg = ('%r is not a valid url scheme, valid schemes are '
- '%s') % (scheme, _valid_schemes)
+ msg = (('{invalid!r} is not a valid url scheme, valid '
+ 'schemes are {valid}')
+ .format(invalid=scheme, valid=_valid_schemes))
raise ValueError(msg)
else:
# something else happened: maybe a faulty connection
@@ -670,8 +673,9 @@ def _parser_dispatch(flavor):
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
- raise ValueError('%r is not a valid flavor, valid flavors are %s' %
- (flavor, valid_parsers))
+ raise ValueError('{invalid!r} is not a valid flavor, valid flavors '
+ 'are {valid}'
+ .format(invalid=flavor, valid=valid_parsers))
if flavor in ('bs4', 'html5lib'):
if not _HAS_HTML5LIB:
@@ -695,7 +699,7 @@ def _parser_dispatch(flavor):
def _print_as_set(s):
- return '{%s}' % ', '.join([pprint_thing(el) for el in s])
+ return '{{arg}}'.format(arg=', '.join([pprint_thing(el) for el in s]))
def _validate_flavor(flavor):
@@ -705,21 +709,23 @@ def _validate_flavor(flavor):
flavor = flavor,
elif isinstance(flavor, collections.Iterable):
if not all(isinstance(flav, string_types) for flav in flavor):
- raise TypeError('Object of type %r is not an iterable of strings' %
- type(flavor).__name__)
+ raise TypeError('Object of type {typ!r} is not an iterable of '
+ 'strings'
+ .format(typ=type(flavor).__name__))
else:
- fmt = '{0!r}' if isinstance(flavor, string_types) else '{0}'
+ fmt = '{flavor!r}' if isinstance(flavor, string_types) else '{flavor}'
fmt += ' is not a valid flavor'
- raise ValueError(fmt.format(flavor))
+ raise ValueError(fmt.format(flavor=flavor))
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
- raise ValueError('%s is not a valid set of flavors, valid flavors are '
- '%s' % (_print_as_set(flavor_set),
- _print_as_set(valid_flavors)))
+ raise ValueError('{invalid} is not a valid set of flavors, valid '
+ 'flavors are {valid}'
+ .format(invalid=_print_as_set(flavor_set),
+ valid=_print_as_set(valid_flavors)))
return flavor
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index a1d48719ba9c0..5dae6099446d0 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -99,7 +99,7 @@ class SeriesWriter(Writer):
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
- "'%s'" % self.orient)
+ "'{orient}'".format(orient=self.orient))
class FrameWriter(Writer):
@@ -110,11 +110,11 @@ def _format_axes(self):
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
- "'%s'." % self.orient)
+ "'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
- "'%s'." % self.orient)
+ "'{orient}'.".format(orient=self.orient))
class JSONTableWriter(FrameWriter):
@@ -134,8 +134,9 @@ def __init__(self, obj, orient, date_format, double_precision,
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
- "`date_format='%s'`. Table Schema requires dates "
- "to be formatted with `date_format='iso'`" % date_format)
+ "`date_format='{fmt}'`. Table Schema requires dates "
+ "to be formatted with `date_format='iso'`"
+ .format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj)
@@ -166,8 +167,8 @@ def __init__(self, obj, orient, date_format, double_precision,
def write(self):
data = super(JSONTableWriter, self).write()
- serialized = '{{"schema": {}, "data": {}}}'.format(
- dumps(self.schema), data)
+ serialized = '{{"schema": {schema}, "data": {data}}}'.format(
+ schema=dumps(self.schema), data=data)
return serialized
@@ -391,8 +392,8 @@ def __init__(self, json, orient, dtype=True, convert_axes=True,
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
- raise ValueError('date_unit must be one of %s' %
- (self._STAMP_UNITS,))
+ raise ValueError('date_unit must be one of {units}'
+ .format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
@@ -410,8 +411,8 @@ def check_keys_split(self, decoded):
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
- raise ValueError(u("JSON data had unexpected key(s): %s") %
- pprint_thing(bad_keys))
+ raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
+ .format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 72776ed01de15..e811dd1eab142 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -249,7 +249,8 @@ def _recursive_extract(data, path, seen_meta, level=0):
raise \
KeyError("Try running with "
"errors='ignore' as key "
- "%s is not always present", e)
+ "{err} is not always present"
+ .format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
@@ -267,8 +268,8 @@ def _recursive_extract(data, path, seen_meta, level=0):
k = meta_prefix + k
if k in result:
- raise ValueError('Conflicting metadata name %s, '
- 'need distinguishing prefix ' % k)
+ raise ValueError('Conflicting metadata name {name}, '
+ 'need distinguishing prefix '.format(name=k))
result[k] = np.array(v).repeat(lengths)
diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx
index ad7ce1fb2531a..fd3f4612fb432 100644
--- a/pandas/io/msgpack/_packer.pyx
+++ b/pandas/io/msgpack/_packer.pyx
@@ -224,7 +224,7 @@ cdef class Packer(object):
default_used = 1
continue
else:
- raise TypeError("can't serialize %r" % (o,))
+ raise TypeError("can't serialize {thing!r}".format(thing=o))
return ret
cpdef pack(self, object obj):
diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx
index 504bfed48df3c..22401d7514f65 100644
--- a/pandas/io/msgpack/_unpacker.pyx
+++ b/pandas/io/msgpack/_unpacker.pyx
@@ -94,7 +94,7 @@ cdef inline init_ctx(unpack_context *ctx,
def default_read_extended_type(typecode, data):
raise NotImplementedError("Cannot decode extended type "
- "with typecode=%d" % typecode)
+ "with typecode={code}".format(code=typecode))
def unpackb(object packed, object object_hook=None, object list_hook=None,
@@ -144,7 +144,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
buf + off, buf_len - off))
return obj
else:
- raise UnpackValueError("Unpack failed: error = %d" % (ret,))
+ raise UnpackValueError("Unpack failed: error = {ret}".format(ret=ret))
def unpack(object stream, object object_hook=None, object list_hook=None,
@@ -411,7 +411,8 @@ cdef class Unpacker(object):
else:
raise OutOfData("No more data to unpack.")
else:
- raise ValueError("Unpack failed: error = %d" % (ret,))
+ raise ValueError("Unpack failed: error = {ret}"
+ .format(ret=ret))
def read_bytes(self, Py_ssize_t nbytes):
"""Read a specified number of raw bytes from the stream"""
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 4396180da44cb..41c03cb2799a3 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -101,10 +101,12 @@ cdef np.ndarray[uint8_t, ndim=1] rle_decompress(
result[rpos] = 0x00
rpos += 1
else:
- raise ValueError("unknown control byte: %v", control_byte)
+ raise ValueError("unknown control byte: {byte}"
+ .format(byte=control_byte))
if len(result) != result_length:
- raise ValueError("RLE: %v != %v", (len(result), result_length))
+ raise ValueError("RLE: {got} != {expect}".format(got=len(result),
+ expect=result_length))
return np.asarray(result)
@@ -185,7 +187,8 @@ cdef np.ndarray[uint8_t, ndim=1] rdc_decompress(
raise ValueError("unknown RDC command")
if len(outbuff) != result_length:
- raise ValueError("RDC: %v != %v\n", len(outbuff), result_length)
+ raise ValueError("RDC: {got} != {expect}\n"
+ .format(got=len(outbuff), expect=result_length))
return np.asarray(outbuff)
@@ -258,7 +261,8 @@ cdef class Parser(object):
self.column_types[j] = column_type_string
else:
raise ValueError("unknown column type: "
- "%s" % self.parser.columns[j].ctype)
+ "{typ}"
+ .format(typ=self.parser.columns[j].ctype))
# compression
if parser.compression == const.rle_compression:
@@ -378,8 +382,8 @@ cdef class Parser(object):
return True
return False
else:
- raise ValueError("unknown page type: %s",
- self.current_page_type)
+ raise ValueError("unknown page type: {typ}"
+ .format(typ=self.current_page_type))
cdef void process_byte_array_with_data(self, int offset, int length):
| Progress toward issue #16130. Converted old string formatting to new string formatting in io/html.py, io/excel.py, msgpack/_packer.pyx, msgpack/_unpacker.pyx, clipboard/exceptions.py, json/json.py, json/normalize.py, sas/sas.pyx
| https://api.github.com/repos/pandas-dev/pandas/pulls/17660 | 2017-09-24T22:48:39Z | 2017-09-25T10:10:06Z | 2017-09-25T10:10:06Z | 2017-09-25T11:08:23Z |
TST: Fix repeat parameter overwritten in the sparse asv test | diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index 7259e8cdb7d61..b958f5e0e5c34 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -1,4 +1,4 @@
-from itertools import repeat
+import itertools
from .pandas_vb_common import *
import scipy.sparse
@@ -33,7 +33,7 @@ def time_sparse_from_scipy(self):
SparseDataFrame(scipy.sparse.rand(1000, 1000, 0.005))
def time_sparse_from_dict(self):
- SparseDataFrame(dict(zip(range(1000), repeat([0]))))
+ SparseDataFrame(dict(zip(range(1000), itertools.repeat([0]))))
class sparse_series_from_coo(object):
| - [x] closes #17658
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17659 | 2017-09-24T16:43:02Z | 2017-09-24T19:06:42Z | 2017-09-24T19:06:42Z | 2017-09-24T19:55:27Z |
TST: install cython from pip for 3.6_NUMPY_DEV build | diff --git a/ci/requirements-3.6_NUMPY_DEV.build b/ci/requirements-3.6_NUMPY_DEV.build
index 900c050f1cc9e..336fbe86b57d8 100644
--- a/ci/requirements-3.6_NUMPY_DEV.build
+++ b/ci/requirements-3.6_NUMPY_DEV.build
@@ -1,3 +1,2 @@
python=3.6*
pytz
-cython
diff --git a/ci/requirements-3.6_NUMPY_DEV.build.sh b/ci/requirements-3.6_NUMPY_DEV.build.sh
index 90ed04f8f0c17..fd79142c5cebb 100644
--- a/ci/requirements-3.6_NUMPY_DEV.build.sh
+++ b/ci/requirements-3.6_NUMPY_DEV.build.sh
@@ -14,4 +14,7 @@ pip install --pre --upgrade --timeout=60 -f $PRE_WHEELS numpy scipy
# install dateutil from master
pip install -U git+git://github.com/dateutil/dateutil.git
+# cython via pip
+pip install cython
+
true
| https://api.github.com/repos/pandas-dev/pandas/pulls/17657 | 2017-09-24T14:23:11Z | 2017-09-24T18:42:54Z | 2017-09-24T18:42:54Z | 2017-09-24T18:43:57Z | |
DEPR: deprecate .as_blocks() | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index ef6b2d6ef2c90..ede52908bbe0f 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -95,17 +95,7 @@ will be completed:
df2.append df2.combine_first
df2.apply df2.compound
df2.applymap df2.consolidate
- df2.as_blocks df2.convert_objects
- df2.asfreq df2.copy
- df2.as_matrix df2.corr
- df2.astype df2.corrwith
- df2.at df2.count
- df2.at_time df2.cov
- df2.axes df2.cummax
- df2.B df2.cummin
- df2.between_time df2.cumprod
- df2.bfill df2.cumsum
- df2.blocks df2.D
+ df2.D
As you can see, the columns ``A``, ``B``, ``C``, and ``D`` are automatically
tab completed. ``E`` is there as well; the rest of the attributes have been
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 1365901c2ce5e..07cc00b3724e4 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -488,10 +488,9 @@ Other API Changes
Deprecations
~~~~~~~~~~~~
- :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`).
-
- ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`).
-
- :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`).
+- :func:`DataFrame.as_blocks` is deprecated, as this is exposing the internal implementation (:issue:`17302`)
.. _whatsnew_0210.prior_deprecations:
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index af068bd1f32b3..8ddc625887a51 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -165,7 +165,7 @@ def _has_bool_dtype(x):
return x.dtype == bool
except AttributeError:
try:
- return 'bool' in x.blocks
+ return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3d55e07df6eac..b49eeed6db85f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1650,7 +1650,7 @@ def to_xarray(self):
coords=coords,
)
- _shared_docs['to_latex'] = """
+ _shared_docs['to_latex'] = r"""
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
@@ -3271,7 +3271,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
- _shared_docs['pipe'] = ("""
+ _shared_docs['pipe'] = (r"""
Apply func(self, \*args, \*\*kwargs)
Parameters
@@ -3692,6 +3692,8 @@ def as_blocks(self, copy=True):
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
+ .. deprecated:: 0.21.0
+
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
@@ -3699,32 +3701,34 @@ def as_blocks(self, copy=True):
----------
copy : boolean, default True
- .. versionadded: 0.16.1
-
Returns
-------
values : a dict of dtype -> Constructor Types
"""
- self._consolidate_inplace()
-
- bd = {}
- for b in self._data.blocks:
- bd.setdefault(str(b.dtype), []).append(b)
-
- result = {}
- for dtype, blocks in bd.items():
- # Must combine even after consolidation, because there may be
- # sparse items which are never consolidated into one block.
- combined = self._data.combine(blocks, copy=copy)
- result[dtype] = self._constructor(combined).__finalize__(self)
-
- return result
+ warnings.warn("as_blocks is deprecated and will "
+ "be removed in a future version",
+ FutureWarning, stacklevel=2)
+ return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
- """Internal property, property synonym for as_blocks()"""
+ """
+ Internal property, property synonym for as_blocks()
+
+ .. deprecated:: 0.21.0
+ """
return self.as_blocks()
+ def _to_dict_of_blocks(self, copy=True):
+ """
+ Return a dict of dtype -> Constructor Types that
+ each is a homogeneous dtype.
+
+ Internal ONLY
+ """
+ return {k: self._constructor(v).__finalize__(self)
+ for k, v, in self._data.to_dict(copy=copy).items()}
+
@deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors',
mapping={True: 'raise', False: 'ignore'})
def astype(self, dtype, copy=True, errors='raise', **kwargs):
@@ -3931,13 +3935,12 @@ def convert_objects(self, convert_dates=True, convert_numeric=False,
-------
converted : same as input object
"""
- from warnings import warn
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
- warn(msg, FutureWarning, stacklevel=2)
+ warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
@@ -4310,9 +4313,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if axis is not None:
- from warnings import warn
- warn('the "axis" argument is deprecated and will be removed in'
- 'v0.13; this argument has no effect')
+ warnings.warn('the "axis" argument is deprecated '
+ 'and will be removed in'
+ 'v0.13; this argument has no effect')
self._consolidate_inplace()
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2046bae759b9a..e6f61a22e3137 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3583,6 +3583,31 @@ def _interleave(self):
return result
+ def to_dict(self, copy=True):
+ """
+ Return a dict of str(dtype) -> BlockManager
+
+ Parameters
+ ----------
+ copy : boolean, default True
+
+ Returns
+ -------
+ values : a dict of dtype -> BlockManager
+
+ Notes
+ -----
+ This consolidates based on str(dtype)
+ """
+ self._consolidate_inplace()
+
+ bd = {}
+ for b in self.blocks:
+ bd.setdefault(str(b.dtype), []).append(b)
+
+ return {dtype: self.combine(blocks, copy=copy)
+ for dtype, blocks in bd.items()}
+
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d' %
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 4bd959f52673c..869296503225d 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -141,7 +141,7 @@ def _create_blocks(self, how):
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
- blocks = obj.as_blocks(copy=False).values()
+ blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index afa3c4f25789a..3ca185cf158a7 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -320,7 +320,11 @@ def test_copy_blocks(self):
column = df.columns[0]
# use the default copy=True, change a column
- blocks = df.as_blocks()
+
+ # deprecated 0.21.0
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
@@ -334,7 +338,11 @@ def test_no_copy_blocks(self):
column = df.columns[0]
# use the copy=False, change a column
- blocks = df.as_blocks(copy=False)
+
+ # deprecated 0.21.0
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d942330ecd8a6..d0cd1899a0a3c 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1766,7 +1766,7 @@ def test_from_records_sequencelike(self):
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
- blocks = df.blocks
+ blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
@@ -1841,8 +1841,9 @@ def test_from_records_dictlike(self):
# columns is in a different order here than the actual items iterated
# from the dict
+ blocks = df._to_dict_of_blocks()
columns = []
- for dtype, b in compat.iteritems(df.blocks):
+ for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 0900d21b250ed..f40fc151676da 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -469,10 +469,11 @@ def test_set_change_dtype_slice(self): # GH8850
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
- assert sorted(df.blocks.keys()) == ['float64', 'int64']
- assert_frame_equal(df.blocks['float64'], DataFrame(
+ blocks = df._to_dict_of_blocks()
+ assert sorted(blocks.keys()) == ['float64', 'int64']
+ assert_frame_equal(blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
- assert_frame_equal(df.blocks['int64'], DataFrame(
+ assert_frame_equal(blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index 004af5066fe83..ed4a3a9e5f75f 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -1099,7 +1099,10 @@ def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
- df_blocks = df.blocks
+ # deprecated 0.21.0
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ df_blocks = df.blocks
assert list(df_blocks.keys()) == ['float64']
tm.assert_frame_equal(df_blocks['float64'], df)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 5adbd1498bb6a..c5f73ca0e885b 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1385,8 +1385,8 @@ def assert_frame_equal(left, right, check_dtype=True,
# compare by blocks
if by_blocks:
- rblocks = right.blocks
- lblocks = left.blocks
+ rblocks = right._to_dict_of_blocks()
+ lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
| closes #17302
| https://api.github.com/repos/pandas-dev/pandas/pulls/17656 | 2017-09-24T14:11:54Z | 2017-09-25T10:14:48Z | 2017-09-25T10:14:47Z | 2017-09-25T10:16:02Z |
DOC: Fixed errors in doc string for Categorical + cleanup | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 98d6d7a68017a..ac9840c57301a 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -196,34 +196,34 @@ class Categorical(PandasObject):
Examples
--------
- >>> from pandas import Categorical
- >>> Categorical([1, 2, 3, 1, 2, 3])
+ >>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
- Categories (3, int64): [1 < 2 < 3]
+ Categories (3, int64): [1, 2, 3]
- >>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
- Categories (3, object): [a < b < c]
+ Categories (3, object): [a, b, c]
- Only ordered `Categoricals` can be sorted (according to the order
- of the categories) and have a min and max value.
+ Ordered `Categoricals` can be sorted according to the custom order
+ of the categories and can have a min and max value.
- >>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'],
- ordered=True)
- >>> a.min()
+ >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
+ ... categories=['c', 'b', 'a'])
+ >>> c
+ [a, b, c, a, b, c]
+ Categories (3, object): [c < b < a]
+ >>> c.min()
'c'
Notes
-----
- See the :ref:`user guide <categorical>` for more.
+ See the `user guide
+ <http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
- Categorical.sort
- Categorical.order
- Categorical.min
- Categorical.max
pandas.api.types.CategoricalDtype
+ CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
| The doc string for ``Categorical`` has the examples wrong wrt. orderedness. This fixes that + some cleanup. | https://api.github.com/repos/pandas-dev/pandas/pulls/17655 | 2017-09-24T09:42:57Z | 2017-09-25T07:13:11Z | 2017-09-25T07:13:11Z | 2017-09-26T09:39:53Z |
BUG: Fix series rename called with str altering name rather index (GH17407) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 4a3122a78b234..e0e0c18052550 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -589,6 +589,7 @@ Indexing
- Bug in intersection of ``RangeIndex`` with negative step (:issue:`17296`)
- Bug in ``IntervalIndex`` where performing a scalar lookup fails for included right endpoints of non-overlapping monotonic decreasing indexes (:issue:`16417`, :issue:`17271`)
- Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` when no valid entry (:issue:`17400`)
+- Bug in :func:`Series.rename` when called with a `callable`, incorrectly alters the name of the `Series`, rather than the name of the `Index`. (:issue:`17407`)
I/O
^^^
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index ff7e215951a1f..de769c69f44fd 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -3,6 +3,7 @@
import collections
import re
import numpy as np
+from collections import Iterable
from numbers import Number
from pandas.compat import (PY2, string_types, text_type,
string_and_binary_types)
@@ -262,7 +263,7 @@ def is_list_like(obj):
False
"""
- return (hasattr(obj, '__iter__') and
+ return (isinstance(obj, Iterable) and
not isinstance(obj, string_and_binary_types))
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index dbde7ae5081d4..857f7a283aa95 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -58,7 +58,7 @@ def __getitem__(self):
def test_is_list_like():
passes = ([], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
- fails = (1, '2', object())
+ fails = (1, '2', object(), str)
for p in passes:
assert inference.is_list_like(p)
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 2182e3fbfc212..83d6a09d38f41 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -2188,6 +2188,16 @@ def test_reindex_fill_value(self):
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
+ def test_rename(self):
+
+ # GH 17407
+ s = Series(range(1, 6), index=pd.Index(range(2, 7), name='IntIndex'))
+ result = s.rename(str)
+ expected = s.rename(lambda i: str(i))
+ assert_series_equal(result, expected)
+
+ assert result.name == expected.name
+
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
| - [x] closes #17407
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17654 | 2017-09-24T01:20:01Z | 2017-09-30T19:33:53Z | 2017-09-30T19:33:53Z | 2017-10-01T15:42:02Z |
DOC: Added example to MultiIndex doc string | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 0b7c5f414b178..8c6b26c9070a9 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -68,6 +68,33 @@ class MultiIndex(Index):
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
+
+ Examples
+ ---------
+ A new ``MultiIndex`` is typically constructed using one of the helper
+ methods :meth:`MultiIndex.from_arrays``, :meth:`MultiIndex.from_product``
+ and :meth:`MultiIndex.from_tuples``. For example (using ``.from_arrays``):
+
+ >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
+ >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
+ MultiIndex(levels=[[1, 2], ['blue', 'red']],
+ labels=[[0, 0, 1, 1], [1, 0, 1, 0]],
+ names=['number', 'color'])
+
+ See further examples for how to construct a MultiIndex in the doc strings
+ of the mentioned helper methods.
+
+ Notes
+ -----
+ See the `user guide
+ <http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
+
+ See Also
+ --------
+ MultiIndex.from_arrays : Convert list of arrays to MultiIndex
+ MultiIndex.from_product : Create a MultiIndex from the cartesian product
+ of iterables
+ MultiIndex.from_tuples : Convert list of tuples to a MultiIndex
"""
# initialize to zero-length tuples to make everything work
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I think ``MultiIndex`` doc string should have a quick note on how ``MultiIndex`` is typically instantiated, as you normally wouldn't instantiate it directly.
I've added an example to the doc string, as in my experience ``MultiIndex`` is one of the more complex areas of Pandas, so a quick example is justified. This may be debatable, so if others disagree on having an example here, I can remove the example and just retain the links to the ``.from_*`` methods. | https://api.github.com/repos/pandas-dev/pandas/pulls/17653 | 2017-09-24T00:07:27Z | 2017-09-25T08:12:35Z | 2017-09-25T08:12:35Z | 2017-09-26T09:40:02Z |
cut/paste AccessorProperty and PandasDelegate to core.accessor | diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 9f8556d1e6961..c8476841bfce4 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -5,6 +5,7 @@
that can be mixed into or pinned onto other pandas classes.
"""
+from pandas.core.common import AbstractMethodError
class DirNamesMixin(object):
@@ -33,3 +34,97 @@ def __dir__(self):
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
+
+
+class AccessorProperty(object):
+ """Descriptor for implementing accessor properties like Series.str
+ """
+
+ def __init__(self, accessor_cls, construct_accessor=None):
+ self.accessor_cls = accessor_cls
+ self.construct_accessor = (construct_accessor or
+ accessor_cls._make_accessor)
+ self.__doc__ = accessor_cls.__doc__
+
+ def __get__(self, instance, owner=None):
+ if instance is None:
+ # this ensures that Series.str.<method> is well defined
+ return self.accessor_cls
+ return self.construct_accessor(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+class PandasDelegate(object):
+ """ an abstract base class for delegating methods/properties """
+
+ @classmethod
+ def _make_accessor(cls, data):
+ raise AbstractMethodError("_make_accessor should be implemented"
+ "by subclass and return an instance"
+ "of `cls`.")
+
+ def _delegate_property_get(self, name, *args, **kwargs):
+ raise TypeError("You cannot access the "
+ "property {name}".format(name=name))
+
+ def _delegate_property_set(self, name, value, *args, **kwargs):
+ raise TypeError("The property {name} cannot be set".format(name=name))
+
+ def _delegate_method(self, name, *args, **kwargs):
+ raise TypeError("You cannot call method {name}".format(name=name))
+
+ @classmethod
+ def _add_delegate_accessors(cls, delegate, accessors, typ,
+ overwrite=False):
+ """
+ add accessors to cls from the delegate class
+
+ Parameters
+ ----------
+ cls : the class to add the methods/properties to
+ delegate : the class to get methods/properties & doc-strings
+ acccessors : string list of accessors to add
+ typ : 'property' or 'method'
+ overwrite : boolean, default False
+ overwrite the method/property in the target class if it exists
+ """
+
+ def _create_delegator_property(name):
+
+ def _getter(self):
+ return self._delegate_property_get(name)
+
+ def _setter(self, new_values):
+ return self._delegate_property_set(name, new_values)
+
+ _getter.__name__ = name
+ _setter.__name__ = name
+
+ return property(fget=_getter, fset=_setter,
+ doc=getattr(delegate, name).__doc__)
+
+ def _create_delegator_method(name):
+
+ def f(self, *args, **kwargs):
+ return self._delegate_method(name, *args, **kwargs)
+
+ f.__name__ = name
+ f.__doc__ = getattr(delegate, name).__doc__
+
+ return f
+
+ for name in accessors:
+
+ if typ == 'property':
+ f = _create_delegator_property(name)
+ else:
+ f = _create_delegator_method(name)
+
+ # don't overwrite existing methods/properties
+ if overwrite or not hasattr(cls, name):
+ setattr(cls, name, f)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index be021f3621c73..19f6728642645 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -153,100 +153,6 @@ def __setattr__(self, key, value):
object.__setattr__(self, key, value)
-class PandasDelegate(PandasObject):
- """ an abstract base class for delegating methods/properties """
-
- @classmethod
- def _make_accessor(cls, data):
- raise AbstractMethodError("_make_accessor should be implemented"
- "by subclass and return an instance"
- "of `cls`.")
-
- def _delegate_property_get(self, name, *args, **kwargs):
- raise TypeError("You cannot access the "
- "property {name}".format(name=name))
-
- def _delegate_property_set(self, name, value, *args, **kwargs):
- raise TypeError("The property {name} cannot be set".format(name=name))
-
- def _delegate_method(self, name, *args, **kwargs):
- raise TypeError("You cannot call method {name}".format(name=name))
-
- @classmethod
- def _add_delegate_accessors(cls, delegate, accessors, typ,
- overwrite=False):
- """
- add accessors to cls from the delegate class
-
- Parameters
- ----------
- cls : the class to add the methods/properties to
- delegate : the class to get methods/properties & doc-strings
- acccessors : string list of accessors to add
- typ : 'property' or 'method'
- overwrite : boolean, default False
- overwrite the method/property in the target class if it exists
- """
-
- def _create_delegator_property(name):
-
- def _getter(self):
- return self._delegate_property_get(name)
-
- def _setter(self, new_values):
- return self._delegate_property_set(name, new_values)
-
- _getter.__name__ = name
- _setter.__name__ = name
-
- return property(fget=_getter, fset=_setter,
- doc=getattr(delegate, name).__doc__)
-
- def _create_delegator_method(name):
-
- def f(self, *args, **kwargs):
- return self._delegate_method(name, *args, **kwargs)
-
- f.__name__ = name
- f.__doc__ = getattr(delegate, name).__doc__
-
- return f
-
- for name in accessors:
-
- if typ == 'property':
- f = _create_delegator_property(name)
- else:
- f = _create_delegator_method(name)
-
- # don't overwrite existing methods/properties
- if overwrite or not hasattr(cls, name):
- setattr(cls, name, f)
-
-
-class AccessorProperty(object):
- """Descriptor for implementing accessor properties like Series.str
- """
-
- def __init__(self, accessor_cls, construct_accessor=None):
- self.accessor_cls = accessor_cls
- self.construct_accessor = (construct_accessor or
- accessor_cls._make_accessor)
- self.__doc__ = accessor_cls.__doc__
-
- def __get__(self, instance, owner=None):
- if instance is None:
- # this ensures that Series.str.<method> is well defined
- return self.accessor_cls
- return self.construct_accessor(instance)
-
- def __set__(self, instance, value):
- raise AttributeError("can't set attribute")
-
- def __delete__(self, instance):
- raise AttributeError("can't delete attribute")
-
-
class GroupByError(Exception):
pass
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 98d6d7a68017a..743bae2fd2848 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -30,7 +30,8 @@
from pandas.core.common import is_null_slice, _maybe_box_datetimelike
from pandas.core.algorithms import factorize, take_1d, unique1d
-from pandas.core.base import (PandasObject, PandasDelegate,
+from pandas.core.accessor import PandasDelegate
+from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
@@ -2065,7 +2066,7 @@ def repeat(self, repeats, *args, **kwargs):
# The Series.cat accessor
-class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
+class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 346eeb8d2642c..899ae99d5deb1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -90,7 +90,7 @@
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
-import pandas.core.base as base
+from pandas.core import accessor
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
@@ -5897,7 +5897,8 @@ def isin(self, values):
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
- plot = base.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods)
+ plot = accessor.AccessorProperty(gfx.FramePlotMethods,
+ gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 88297ac70984d..2176338574304 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -11,7 +11,8 @@
is_timedelta64_dtype, is_categorical_dtype,
is_list_like)
-from pandas.core.base import PandasDelegate, NoNewAttributesMixin
+from pandas.core.accessor import PandasDelegate
+from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas._libs.period import IncompatibleFrequency # noqa
from pandas.core.indexes.period import PeriodIndex
@@ -81,7 +82,7 @@ def maybe_to_datetimelike(data, copy=False):
"datetimelike index".format(type(data)))
-class Properties(PandasDelegate, NoNewAttributesMixin):
+class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
def __init__(self, values, index, name, orig=None):
self.values = values
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 562a758f83edc..f28ff9697e517 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -57,7 +57,7 @@
import pandas.core.sorting as sorting
from pandas.io.formats.printing import pprint_thing
from pandas.core.ops import _comp_method_OBJECT_ARRAY
-from pandas.core import strings
+from pandas.core import strings, accessor
from pandas.core.config import get_option
@@ -159,7 +159,7 @@ class Index(IndexOpsMixin, PandasObject):
_accessors = frozenset(['str'])
# String Methods
- str = base.AccessorProperty(strings.StringMethods)
+ str = accessor.AccessorProperty(strings.StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 9a055afccd799..8b680127723c3 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -19,6 +19,7 @@
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
+from pandas.core import accessor
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
@@ -27,7 +28,7 @@
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
-class CategoricalIndex(Index, base.PandasDelegate):
+class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ea9aeefe3b665..db8ee2529ef57 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -62,6 +62,7 @@
from pandas.compat import zip, u, OrderedDict, StringIO
from pandas.compat.numpy import function as nv
+from pandas.core import accessor
import pandas.core.ops as ops
import pandas.core.algorithms as algorithms
@@ -2901,19 +2902,19 @@ def to_period(self, freq=None, copy=True):
# -------------------------------------------------------------------------
# Datetimelike delegation methods
- dt = base.AccessorProperty(CombinedDatetimelikeProperties)
+ dt = accessor.AccessorProperty(CombinedDatetimelikeProperties)
# -------------------------------------------------------------------------
# Categorical methods
- cat = base.AccessorProperty(CategoricalAccessor)
+ cat = accessor.AccessorProperty(CategoricalAccessor)
# String Methods
- str = base.AccessorProperty(strings.StringMethods)
+ str = accessor.AccessorProperty(strings.StringMethods)
# ----------------------------------------------------------------------
# Add plotting methods to Series
- plot = base.AccessorProperty(gfx.SeriesPlotMethods,
- gfx.SeriesPlotMethods)
+ plot = accessor.AccessorProperty(gfx.SeriesPlotMethods,
+ gfx.SeriesPlotMethods)
hist = gfx.hist_series
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 38d78b12b31aa..5bfd8eb7eae24 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -18,7 +18,8 @@
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
-from pandas.core.base import PandasDelegate, NoNewAttributesMixin
+from pandas.core.accessor import PandasDelegate
+from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
@@ -105,7 +106,7 @@ def bar(self, *args, **kwargs):
""" a test bar method """
pass
- class Delegate(PandasDelegate):
+ class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
| To try to get some positive momentum on wrapping up #17042, this PR cut/pastes AccessorProperty and PandasDelegate from core.base to core.accessor, updates the appropriate imports.
The idea is that it will be easier to review actual design changes in isolation of cut/paste changes.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17651 | 2017-09-23T23:28:20Z | 2017-09-24T13:15:30Z | 2017-09-24T13:15:30Z | 2017-10-30T16:23:25Z |
Fix apparent copy/paste error skewness --> excess kurtosis | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 858aed7fd3e23..388b2ecdff445 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -565,7 +565,7 @@ def nanskew(values, axis=None, skipna=True):
@disallow('M8', 'm8')
def nankurt(values, axis=None, skipna=True):
- """ Compute the sample skewness.
+ """ Compute the sample excess kurtosis.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17647 | 2017-09-23T16:53:44Z | 2017-09-23T17:00:50Z | 2017-09-23T17:00:50Z | 2017-10-30T16:23:26Z |
TST: remove some more warnings | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 101612893cb02..49b7b1d1d3a9b 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -327,25 +327,7 @@ def array_equivalent(left, right, strict_nan=False):
left = left.view('i8')
right = right.view('i8')
- # NaNs cannot occur otherwise.
- try:
- return np.array_equal(left, right)
- except AttributeError:
- # see gh-13388
- #
- # NumPy v1.7.1 has a bug in its array_equal
- # function that prevents it from correctly
- # comparing two arrays with complex dtypes.
- # This bug is corrected in v1.8.0, so remove
- # this try-except block as soon as we stop
- # supporting NumPy versions < 1.8.0
- if not is_dtype_equal(left.dtype, right.dtype):
- return False
-
- left = left.tolist()
- right = right.tolist()
-
- return left == right
+ return np.array_equal(left, right)
def _infer_fill_value(val):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 83b382ec0ed72..6799d3b5746d0 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1289,6 +1289,15 @@ def get_result(other):
elif is_numeric_v_string_like(values, other):
result = False
+ # avoid numpy warning of elementwise comparisons
+ elif func.__name__ == 'eq':
+ if is_list_like(other) and not isinstance(other, np.ndarray):
+ other = np.asarray(other)
+
+ # if we can broadcast, then ok
+ if values.shape[-1] != other.shape[-1]:
+ return False
+ result = func(values, other)
else:
result = func(values, other)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 93514a8a42215..aac8f785f3d99 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -2,6 +2,7 @@
from __future__ import print_function
+import warnings
from datetime import timedelta
from distutils.version import LooseVersion
import sys
@@ -102,7 +103,6 @@ def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
- # it works!
df3.cov()
df3.corr()
@@ -117,7 +117,11 @@ def test_corr_int_and_boolean(self):
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
- tm.assert_frame_equal(df.corr(meth), expected)
+
+ # RuntimeWarning
+ with warnings.catch_warnings(record=True):
+ result = df.corr(meth)
+ tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index d6bdb764f1c8e..055a490bc6b5d 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -23,6 +23,19 @@
PossiblePrecisionLoss, StataMissingValue)
+@pytest.fixture
+def dirpath():
+ return tm.get_data_path()
+
+
+@pytest.fixture
+def parsed_114(dirpath):
+ dta14_114 = os.path.join(dirpath, 'stata5_114.dta')
+ parsed_114 = read_stata(dta14_114, convert_dates=True)
+ parsed_114.index.name = 'index'
+ return parsed_114
+
+
class TestStata(object):
def setup_method(self, method):
@@ -108,10 +121,12 @@ def test_data_method(self):
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
- def test_read_dta1(self):
+ @pytest.mark.parametrize(
+ 'file', ['dta1_114', 'dta1_117'])
+ def test_read_dta1(self, file):
- parsed_114 = self.read_dta(self.dta1_114)
- parsed_117 = self.read_dta(self.dta1_117)
+ file = getattr(self, file)
+ parsed = self.read_dta(file)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
@@ -123,8 +138,7 @@ def test_read_dta1(self):
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
- tm.assert_frame_equal(parsed_114, expected)
- tm.assert_frame_equal(parsed_117, expected)
+ tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
@@ -193,11 +207,12 @@ def test_read_dta2(self):
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
- def test_read_dta3(self):
- parsed_113 = self.read_dta(self.dta3_113)
- parsed_114 = self.read_dta(self.dta3_114)
- parsed_115 = self.read_dta(self.dta3_115)
- parsed_117 = self.read_dta(self.dta3_117)
+ @pytest.mark.parametrize(
+ 'file', ['dta3_113', 'dta3_114', 'dta3_115', 'dta3_117'])
+ def test_read_dta3(self, file):
+
+ file = getattr(self, file)
+ parsed = self.read_dta(file)
# match stata here
expected = self.read_csv(self.csv3)
@@ -205,16 +220,14 @@ def test_read_dta3(self):
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
- tm.assert_frame_equal(parsed_113, expected)
- tm.assert_frame_equal(parsed_114, expected)
- tm.assert_frame_equal(parsed_115, expected)
- tm.assert_frame_equal(parsed_117, expected)
+ tm.assert_frame_equal(parsed, expected)
+
+ @pytest.mark.parametrize(
+ 'file', ['dta4_113', 'dta4_114', 'dta4_115', 'dta4_117'])
+ def test_read_dta4(self, file):
- def test_read_dta4(self):
- parsed_113 = self.read_dta(self.dta4_113)
- parsed_114 = self.read_dta(self.dta4_114)
- parsed_115 = self.read_dta(self.dta4_115)
- parsed_117 = self.read_dta(self.dta4_117)
+ file = getattr(self, file)
+ parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
@@ -237,10 +250,7 @@ def test_read_dta4(self):
for col in expected], axis=1)
# stata doesn't save .category metadata
- tm.assert_frame_equal(parsed_113, expected, check_categorical=False)
- tm.assert_frame_equal(parsed_114, expected, check_categorical=False)
- tm.assert_frame_equal(parsed_115, expected, check_categorical=False)
- tm.assert_frame_equal(parsed_117, expected, check_categorical=False)
+ tm.assert_frame_equal(parsed, expected, check_categorical=False)
# File containing strls
def test_read_dta12(self):
@@ -427,7 +437,13 @@ def test_read_write_dta13(self):
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
- def test_read_write_reread_dta14(self):
+ @pytest.mark.parametrize(
+ 'file', ['dta14_113', 'dta14_114', 'dta14_115', 'dta14_117'])
+ def test_read_write_reread_dta14(self, file, parsed_114):
+ file = getattr(self, file)
+ parsed = self.read_dta(file)
+ parsed.index.name = 'index'
+
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
@@ -436,18 +452,7 @@ def test_read_write_reread_dta14(self):
expected['date_td'] = pd.to_datetime(
expected['date_td'], errors='coerce')
- parsed_113 = self.read_dta(self.dta14_113)
- parsed_113.index.name = 'index'
- parsed_114 = self.read_dta(self.dta14_114)
- parsed_114.index.name = 'index'
- parsed_115 = self.read_dta(self.dta14_115)
- parsed_115.index.name = 'index'
- parsed_117 = self.read_dta(self.dta14_117)
- parsed_117.index.name = 'index'
-
- tm.assert_frame_equal(parsed_114, parsed_113)
- tm.assert_frame_equal(parsed_114, parsed_115)
- tm.assert_frame_equal(parsed_114, parsed_117)
+ tm.assert_frame_equal(parsed_114, parsed)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
@@ -455,7 +460,10 @@ def test_read_write_reread_dta14(self):
tm.assert_frame_equal(
written_and_read_again.set_index('index'), parsed_114)
- def test_read_write_reread_dta15(self):
+ @pytest.mark.parametrize(
+ 'file', ['dta15_113', 'dta15_114', 'dta15_115', 'dta15_117'])
+ def test_read_write_reread_dta15(self, file):
+
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
@@ -465,15 +473,10 @@ def test_read_write_reread_dta15(self):
expected['date_td'] = expected['date_td'].apply(
datetime.strptime, args=('%Y-%m-%d',))
- parsed_113 = self.read_dta(self.dta15_113)
- parsed_114 = self.read_dta(self.dta15_114)
- parsed_115 = self.read_dta(self.dta15_115)
- parsed_117 = self.read_dta(self.dta15_117)
+ file = getattr(self, file)
+ parsed = self.read_dta(file)
- tm.assert_frame_equal(expected, parsed_114)
- tm.assert_frame_equal(parsed_113, parsed_114)
- tm.assert_frame_equal(parsed_114, parsed_115)
- tm.assert_frame_equal(parsed_114, parsed_117)
+ tm.assert_frame_equal(expected, parsed)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['variable'])
@@ -710,7 +713,9 @@ def test_missing_value_generator(self):
'<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
assert val.string == '.z'
- def test_missing_value_conversion(self):
+ @pytest.mark.parametrize(
+ 'file', ['dta17_113', 'dta17_115', 'dta17_117'])
+ def test_missing_value_conversion(self, file):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
@@ -721,13 +726,8 @@ def test_missing_value_conversion(self):
data.append(row)
expected = DataFrame(data, columns=columns)
- parsed_113 = read_stata(self.dta17_113, convert_missing=True)
- parsed_115 = read_stata(self.dta17_115, convert_missing=True)
- parsed_117 = read_stata(self.dta17_117, convert_missing=True)
-
- tm.assert_frame_equal(expected, parsed_113)
- tm.assert_frame_equal(expected, parsed_115)
- tm.assert_frame_equal(expected, parsed_117)
+ parsed = read_stata(getattr(self, file), convert_missing=True)
+ tm.assert_frame_equal(parsed, expected)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
@@ -919,7 +919,9 @@ def test_categorical_with_stata_missing_values(self):
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, original, check_categorical=False)
- def test_categorical_order(self):
+ @pytest.mark.parametrize(
+ 'file', ['dta19_115', 'dta19_117'])
+ def test_categorical_order(self, file):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
@@ -944,91 +946,91 @@ def test_categorical_order(self):
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
- parsed_115 = read_stata(self.dta19_115)
- parsed_117 = read_stata(self.dta19_117)
- tm.assert_frame_equal(expected, parsed_115, check_categorical=False)
- tm.assert_frame_equal(expected, parsed_117, check_categorical=False)
+ file = getattr(self, file)
+ parsed = read_stata(file)
+ tm.assert_frame_equal(expected, parsed, check_categorical=False)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
- parsed_115[col].cat.codes)
+ parsed[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
- parsed_115[col].cat.categories)
+ parsed[col].cat.categories)
+
+ @pytest.mark.parametrize(
+ 'file', ['dta20_115', 'dta20_117'])
+ def test_categorical_sorting(self, file):
+ parsed = read_stata(getattr(self, file))
- def test_categorical_sorting(self):
- parsed_115 = read_stata(self.dta20_115)
- parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
- parsed_115 = parsed_115.sort_values("srh")
- parsed_117 = parsed_117.sort_values("srh")
+ parsed = parsed.sort_values("srh")
+
# Don't sort index
- parsed_115.index = np.arange(parsed_115.shape[0])
- parsed_117.index = np.arange(parsed_117.shape[0])
+ parsed.index = np.arange(parsed.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name='srh')
- tm.assert_series_equal(expected, parsed_115["srh"],
- check_categorical=False)
- tm.assert_series_equal(expected, parsed_117["srh"],
+ tm.assert_series_equal(expected, parsed["srh"],
check_categorical=False)
- def test_categorical_ordering(self):
- parsed_115 = read_stata(self.dta19_115)
- parsed_117 = read_stata(self.dta19_117)
+ @pytest.mark.parametrize(
+ 'file', ['dta19_115', 'dta19_117'])
+ def test_categorical_ordering(self, file):
+ file = getattr(self, file)
+ parsed = read_stata(file)
- parsed_115_unordered = read_stata(self.dta19_115,
- order_categoricals=False)
- parsed_117_unordered = read_stata(self.dta19_117,
- order_categoricals=False)
- for col in parsed_115:
- if not is_categorical_dtype(parsed_115[col]):
+ parsed_unordered = read_stata(file,
+ order_categoricals=False)
+ for col in parsed:
+ if not is_categorical_dtype(parsed[col]):
continue
- assert parsed_115[col].cat.ordered
- assert parsed_117[col].cat.ordered
- assert not parsed_115_unordered[col].cat.ordered
- assert not parsed_117_unordered[col].cat.ordered
-
- def test_read_chunks_117(self):
- files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
- self.dta4_117, self.dta14_117, self.dta15_117,
- self.dta16_117, self.dta17_117, self.dta18_117,
- self.dta19_117, self.dta20_117]
-
- for fname in files_117:
- for chunksize in 1, 2:
- for convert_categoricals in False, True:
- for convert_dates in False, True:
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- parsed = read_stata(
- fname,
- convert_categoricals=convert_categoricals,
- convert_dates=convert_dates)
- itr = read_stata(
- fname, iterator=True,
- convert_categoricals=convert_categoricals,
- convert_dates=convert_dates)
-
- pos = 0
- for j in range(5):
- with warnings.catch_warnings(record=True) as w: # noqa
- warnings.simplefilter("always")
- try:
- chunk = itr.read(chunksize)
- except StopIteration:
- break
- from_frame = parsed.iloc[pos:pos + chunksize, :]
- tm.assert_frame_equal(
- from_frame, chunk, check_dtype=False,
- check_datetimelike_compat=True,
- check_categorical=False)
-
- pos += chunksize
- itr.close()
+ assert parsed[col].cat.ordered
+ assert not parsed_unordered[col].cat.ordered
+
+ @pytest.mark.parametrize(
+ 'file', ['dta1_117', 'dta2_117', 'dta3_117',
+ 'dta4_117', 'dta14_117', 'dta15_117',
+ 'dta16_117', 'dta17_117', 'dta18_117',
+ 'dta19_117', 'dta20_117'])
+ @pytest.mark.parametrize(
+ 'chunksize', [1, 2])
+ @pytest.mark.parametrize(
+ 'convert_categoricals', [False, True])
+ @pytest.mark.parametrize(
+ 'convert_dates', [False, True])
+ def test_read_chunks_117(self, file, chunksize,
+ convert_categoricals, convert_dates):
+ fname = getattr(self, file)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ parsed = read_stata(
+ fname,
+ convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates)
+ itr = read_stata(
+ fname, iterator=True,
+ convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates)
+
+ pos = 0
+ for j in range(5):
+ with warnings.catch_warnings(record=True) as w: # noqa
+ warnings.simplefilter("always")
+ try:
+ chunk = itr.read(chunksize)
+ except StopIteration:
+ break
+ from_frame = parsed.iloc[pos:pos + chunksize, :]
+ tm.assert_frame_equal(
+ from_frame, chunk, check_dtype=False,
+ check_datetimelike_compat=True,
+ check_categorical=False)
+
+ pos += chunksize
+ itr.close()
def test_iterator(self):
@@ -1057,46 +1059,50 @@ def test_iterator(self):
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
- def test_read_chunks_115(self):
- files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
- self.dta14_115, self.dta15_115, self.dta16_115,
- self.dta17_115, self.dta18_115, self.dta19_115,
- self.dta20_115]
-
- for fname in files_115:
- for chunksize in 1, 2:
- for convert_categoricals in False, True:
- for convert_dates in False, True:
-
- # Read the whole file
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- parsed = read_stata(
- fname,
- convert_categoricals=convert_categoricals,
- convert_dates=convert_dates)
-
- # Compare to what we get when reading by chunk
- itr = read_stata(
- fname, iterator=True,
- convert_dates=convert_dates,
- convert_categoricals=convert_categoricals)
- pos = 0
- for j in range(5):
- with warnings.catch_warnings(record=True) as w: # noqa
- warnings.simplefilter("always")
- try:
- chunk = itr.read(chunksize)
- except StopIteration:
- break
- from_frame = parsed.iloc[pos:pos + chunksize, :]
- tm.assert_frame_equal(
- from_frame, chunk, check_dtype=False,
- check_datetimelike_compat=True,
- check_categorical=False)
-
- pos += chunksize
- itr.close()
+ @pytest.mark.parametrize(
+ 'file', ['dta2_115', 'dta3_115', 'dta4_115',
+ 'dta14_115', 'dta15_115', 'dta16_115',
+ 'dta17_115', 'dta18_115', 'dta19_115',
+ 'dta20_115'])
+ @pytest.mark.parametrize(
+ 'chunksize', [1, 2])
+ @pytest.mark.parametrize(
+ 'convert_categoricals', [False, True])
+ @pytest.mark.parametrize(
+ 'convert_dates', [False, True])
+ def test_read_chunks_115(self, file, chunksize,
+ convert_categoricals, convert_dates):
+ fname = getattr(self, file)
+
+ # Read the whole file
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ parsed = read_stata(
+ fname,
+ convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates)
+
+ # Compare to what we get when reading by chunk
+ itr = read_stata(
+ fname, iterator=True,
+ convert_dates=convert_dates,
+ convert_categoricals=convert_categoricals)
+ pos = 0
+ for j in range(5):
+ with warnings.catch_warnings(record=True) as w: # noqa
+ warnings.simplefilter("always")
+ try:
+ chunk = itr.read(chunksize)
+ except StopIteration:
+ break
+ from_frame = parsed.iloc[pos:pos + chunksize, :]
+ tm.assert_frame_equal(
+ from_frame, chunk, check_dtype=False,
+ check_datetimelike_compat=True,
+ check_categorical=False)
+
+ pos += chunksize
+ itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
@@ -1299,7 +1305,8 @@ def test_pickle_path_localpath(self):
result = tm.round_trip_localpath(df.to_stata, reader)
tm.assert_frame_equal(df, result)
- @pytest.mark.parametrize('write_index', [True, False])
+ @pytest.mark.parametrize(
+ 'write_index', [True, False])
def test_value_labels_iterator(self, write_index):
# GH 16923
d = {'A': ['B', 'E', 'C', 'A', 'E']}
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 1cc0ad8bb4041..0fe51121abef6 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -1175,7 +1175,7 @@ def test_rolling_quantile_np_percentile(self):
# is analogus to Numpy's percentile
row = 10
col = 5
- idx = pd.date_range(20100101, periods=row, freq='B')
+ idx = pd.date_range('20100101', periods=row, freq='B')
df = pd.DataFrame(np.random.rand(row * col).reshape((row, -1)),
index=idx)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 7dac83953ad8f..14952e391c63f 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1892,7 +1892,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
for i in range(nlevels):
def keyfunc(x):
import re
- numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
+ numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
@@ -2427,7 +2427,7 @@ def stdin_encoding(encoding=None):
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
- """
+ r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
| TST: parametrize stata tests
| https://api.github.com/repos/pandas-dev/pandas/pulls/17645 | 2017-09-23T15:13:14Z | 2017-09-23T18:52:12Z | 2017-09-23T18:52:12Z | 2017-09-23T18:53:17Z |
API: harmonize drop/reindex/rename args (GH12392) - drop | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 261e12b824509..5a14e3c189571 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -91,6 +91,24 @@ This does not raise any obvious exceptions, but also does not create a new colum
Setting a list-like data structure into a new attribute now raise a ``UserWarning`` about the potential for unexpected behavior. See :ref:`Attribute Access <indexing.attribute_access>`.
+``drop`` now also accepts index/columns keywords
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :meth:`~DataFrame.drop` method has gained ``index``/``columns`` keywords as an
+alternative to specify the ``axis`` and to make it similar in usage to ``reindex``
+(:issue:`12392`).
+
+For example:
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.arange(8).reshape(2,4),
+ columns=['A', 'B', 'C', 'D'])
+ df
+ df.drop(['B', 'C'], axis=1)
+ # the following is now equivalent
+ df.drop(columns=['B', 'C'])
+
.. _whatsnew_0210.enhancements.categorical_dtype:
``CategoricalDtype`` for specifying categoricals
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 241204ef555f6..3d55e07df6eac 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2333,14 +2333,23 @@ def reindex_like(self, other, method=None, copy=True, limit=None,
return self.reindex(**d)
- def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
+ def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
+ inplace=False, errors='raise'):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
+ Index or column labels to drop.
axis : int or axis name
+ Whether to drop labels from the index (0 / 'index') or
+ columns (1 / 'columns').
+ index, columns : single label or list-like
+ Alternative to specifying `axis` (``labels, axis=1`` is
+ equivalent to ``columns=labels``).
+
+ .. versionadded:: 0.21.0
level : int or level name, default None
For MultiIndex
inplace : bool, default False
@@ -2354,36 +2363,80 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
Examples
--------
- >>> df = pd.DataFrame([[1, 2, 3, 4],
- ... [5, 6, 7, 8],
- ... [9, 1, 2, 3],
- ... [4, 5, 6, 7]
- ... ],
- ... columns=list('ABCD'))
+ >>> df = pd.DataFrame(np.arange(12).reshape(3,4),
+ columns=['A', 'B', 'C', 'D'])
>>> df
- A B C D
- 0 1 2 3 4
- 1 5 6 7 8
- 2 9 1 2 3
- 3 4 5 6 7
+ A B C D
+ 0 0 1 2 3
+ 1 4 5 6 7
+ 2 8 9 10 11
+
+ Drop columns
+
+ >>> df.drop(['B', 'C'], axis=1)
+ A D
+ 0 0 3
+ 1 4 7
+ 2 8 11
+
+ >>> df.drop(columns=['B', 'C'])
+ A D
+ 0 0 3
+ 1 4 7
+ 2 8 11
Drop a row by index
>>> df.drop([0, 1])
- A B C D
- 2 9 1 2 3
- 3 4 5 6 7
+ A B C D
+ 2 8 9 10 11
- Drop columns
+ Notes
+ -----
+ Specifying both `labels` and `index` or `columns` will raise a
+ ValueError.
- >>> df.drop(['A', 'B'], axis=1)
- C D
- 0 3 4
- 1 7 8
- 2 2 3
- 3 6 7
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
+
+ if labels is not None:
+ if index is not None or columns is not None:
+ raise ValueError("Cannot specify both 'labels' and "
+ "'index'/'columns'")
+ axis_name = self._get_axis_name(axis)
+ axes = {axis_name: labels}
+ elif index is not None or columns is not None:
+ axes, _ = self._construct_axes_from_arguments((index, columns), {})
+ else:
+ raise ValueError("Need to specify at least one of 'labels', "
+ "'index' or 'columns'")
+
+ obj = self
+
+ for axis, labels in axes.items():
+ if labels is not None:
+ obj = obj._drop_axis(labels, axis, level=level, errors=errors)
+
+ if inplace:
+ self._update_inplace(obj)
+ else:
+ return obj
+
+ def _drop_axis(self, labels, axis, level=None, errors='raise'):
+ """
+ Drop labels from specified axis. Used in the ``drop`` method
+ internally.
+
+ Parameters
+ ----------
+ labels : single label or list-like
+ axis : int or axis name
+ level : int or level name, default None
+ For MultiIndex
+ errors : {'ignore', 'raise'}, default 'raise'
+ If 'ignore', suppress error and existing labels are dropped.
+
+ """
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
@@ -2416,10 +2469,7 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
result = self.loc[tuple(slicer)]
- if inplace:
- self._update_inplace(result)
- else:
- return result
+ return result
def _update_inplace(self, result, verify_is_copy=True):
"""
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index e76869bf6712b..fb9b8c2ed7aff 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -146,6 +146,41 @@ def test_drop_multiindex_not_lexsorted(self):
tm.assert_frame_equal(result, expected)
+ def test_drop_api_equivalence(self):
+ # equivalence of the labels/axis and index/columns API's (GH12392)
+ df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
+ index=['a', 'b', 'c'],
+ columns=['d', 'e', 'f'])
+
+ res1 = df.drop('a')
+ res2 = df.drop(index='a')
+ tm.assert_frame_equal(res1, res2)
+
+ res1 = df.drop('d', 1)
+ res2 = df.drop(columns='d')
+ tm.assert_frame_equal(res1, res2)
+
+ res1 = df.drop(labels='e', axis=1)
+ res2 = df.drop(columns='e')
+ tm.assert_frame_equal(res1, res2)
+
+ res1 = df.drop(['a'], axis=0)
+ res2 = df.drop(index=['a'])
+ tm.assert_frame_equal(res1, res2)
+
+ res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
+ res2 = df.drop(index=['a'], columns=['d'])
+ tm.assert_frame_equal(res1, res2)
+
+ with pytest.raises(ValueError):
+ df.drop(labels='a', index='b')
+
+ with pytest.raises(ValueError):
+ df.drop(labels='a', columns='b')
+
+ with pytest.raises(ValueError):
+ df.drop(axis=1)
+
def test_merge_join_different_levels(self):
# GH 9455
| xref #12392.
For now this only deals with `drop` (making it similar with `reindex`), and not the other way around (`reindex` similar to `drop`). So the title is not fully correct, but this is the more easy part. | https://api.github.com/repos/pandas-dev/pandas/pulls/17644 | 2017-09-23T11:52:56Z | 2017-09-24T11:48:01Z | 2017-09-24T11:48:00Z | 2017-09-24T11:48:08Z |
Accept CategoricalDtype in read_csv | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d6abed6e9d1ad..4d47d8b77aebf 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -452,7 +452,8 @@ Specifying Categorical dtype
.. versionadded:: 0.19.0
-``Categorical`` columns can be parsed directly by specifying ``dtype='category'``
+``Categorical`` columns can be parsed directly by specifying ``dtype='category'`` or
+``dtype=CategoricalDtype(categories, ordered)``.
.. ipython:: python
@@ -468,12 +469,40 @@ Individual columns can be parsed as a ``Categorical`` using a dict specification
pd.read_csv(StringIO(data), dtype={'col1': 'category'}).dtypes
+.. versionadded:: 0.21.0
+
+Specifying ``dtype='cateogry'`` will result in an unordered ``Categorical``
+whose ``categories`` are the unique values observed in the data. For more
+control on the categories and order, create a
+:class:`~pandas.api.types.CategoricalDtype` ahead of time, and pass that for
+that column's ``dtype``.
+
+.. ipython:: python
+
+ from pandas.api.types import CategoricalDtype
+
+ dtype = CategoricalDtype(['d', 'c', 'b', 'a'], ordered=True)
+ pd.read_csv(StringIO(data), dtype={'col1': dtype}).dtypes
+
+When using ``dtype=CategoricalDtype``, "unexpected" values outside of
+``dtype.categories`` are treated as missing values.
+
+.. ipython:: python
+
+ dtype = CategoricalDtype(['a', 'b', 'd']) # No 'c'
+ pd.read_csv(StringIO(data), dtype={'col1': dtype}).col1
+
+This matches the behavior of :meth:`Categorical.set_categories`.
+
.. note::
- The resulting categories will always be parsed as strings (object dtype).
- If the categories are numeric they can be converted using the
- :func:`to_numeric` function, or as appropriate, another converter
- such as :func:`to_datetime`.
+ With ``dtype='category'``, the resulting categories will always be parsed
+ as strings (object dtype). If the categories are numeric they can be
+ converted using the :func:`to_numeric` function, or as appropriate, another
+ converter such as :func:`to_datetime`.
+
+ When ``dtype`` is a ``CategoricalDtype`` with homogenous ``categories`` (
+ all numeric, all datetimes, etc.), the conversion is done automatically.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index dae93feb48b02..72847de135d91 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -119,7 +119,7 @@ expanded to include the ``categories`` and ``ordered`` attributes. A
``CategoricalDtype`` can be used to specify the set of categories and
orderedness of an array, independent of the data themselves. This can be useful,
e.g., when converting string data to a ``Categorical`` (:issue:`14711`,
-:issue:`15078`, :issue:`16015`):
+:issue:`15078`, :issue:`16015`, :issue:`17643`):
.. ipython:: python
@@ -129,8 +129,37 @@ e.g., when converting string data to a ``Categorical`` (:issue:`14711`,
dtype = CategoricalDtype(categories=['a', 'b', 'c', 'd'], ordered=True)
s.astype(dtype)
+One place that deserves special mention is in :meth:`read_csv`. Previously, with
+``dtype={'col': 'category'}``, the returned values and categories would always
+be strings.
+
+.. ipython:: python
+ :suppress:
+
+ from pandas.compat import StringIO
+
+.. ipython:: python
+
+ data = 'A,B\na,1\nb,2\nc,3'
+ pd.read_csv(StringIO(data), dtype={'B': 'category'}).B.cat.categories
+
+Notice the "object" dtype.
+
+With a ``CategoricalDtype`` of all numerics, datetimes, or
+timedeltas, we can automatically convert to the correct type
+
+ dtype = {'B': CategoricalDtype([1, 2, 3])}
+ pd.read_csv(StringIO(data), dtype=dtype).B.cat.categories
+
+The values have been correctly interpreted as integers.
+
The ``.dtype`` property of a ``Categorical``, ``CategoricalIndex`` or a
-``Series`` with categorical type will now return an instance of ``CategoricalDtype``.
+``Series`` with categorical type will now return an instance of
+``CategoricalDtype``. For the most part, this is backwards compatible, though
+the string repr has changed. If you were previously using ``str(s.dtype) ==
+'category'`` to detect categorical data, switch to
+:func:`pandas.api.types.is_categorical_dtype`, which is compatible with the old
+and new ``CategoricalDtype``.
See the :ref:`CategoricalDtype docs <categorical.categoricaldtype>` for more.
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 5bf9f4ce83cbf..60a646769dd1a 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -45,7 +45,7 @@ from pandas.core.dtypes.common import (
is_bool_dtype, is_object_dtype,
is_string_dtype, is_datetime64_dtype,
pandas_dtype)
-from pandas.core.categorical import Categorical
+from pandas.core.categorical import Categorical, _recode_for_categories
from pandas.core.algorithms import take_1d
from pandas.core.dtypes.concat import union_categoricals
from pandas import Index
@@ -1267,19 +1267,14 @@ cdef class TextReader:
return self._string_convert(i, start, end, na_filter,
na_hashset)
elif is_categorical_dtype(dtype):
+ # TODO: I suspect that _categorical_convert could be
+ # optimized when dtype is an instance of CategoricalDtype
codes, cats, na_count = _categorical_convert(
self.parser, i, start, end, na_filter,
na_hashset, self.c_encoding)
- # sort categories and recode if necessary
- cats = Index(cats)
- if not cats.is_monotonic_increasing:
- unsorted = cats.copy()
- cats = cats.sort_values()
- indexer = cats.get_indexer(unsorted)
- codes = take_1d(indexer, codes, fill_value=-1)
-
- return Categorical(codes, categories=cats, ordered=False,
- fastpath=True), na_count
+ cat = Categorical._from_inferred_categories(cats, codes, dtype)
+ return cat, na_count
+
elif is_object_dtype(dtype):
return self._string_convert(i, start, end, na_filter,
na_hashset)
@@ -2230,8 +2225,11 @@ def _concatenate_chunks(list chunks):
if common_type == np.object:
warning_columns.append(str(name))
- if is_categorical_dtype(dtypes.pop()):
- result[name] = union_categoricals(arrs, sort_categories=True)
+ dtype = dtypes.pop()
+ if is_categorical_dtype(dtype):
+ sort_categories = isinstance(dtype, str)
+ result[name] = union_categoricals(arrs,
+ sort_categories=sort_categories)
else:
result[name] = np.concatenate(arrs)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index d79937829cf3f..ce71e6fd74326 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -21,6 +21,8 @@
_ensure_platform_int,
is_dtype_equal,
is_datetimelike,
+ is_datetime64_dtype,
+ is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_integer_dtype,
@@ -509,6 +511,59 @@ def base(self):
""" compat, we are always our own object """
return None
+ @classmethod
+ def _from_inferred_categories(cls, inferred_categories, inferred_codes,
+ dtype):
+ """Construct a Categorical from inferred values
+
+ For inferred categories (`dtype` is None) the categories are sorted.
+ For explicit `dtype`, the `inferred_categories` are cast to the
+ appropriate type.
+
+ Parameters
+ ----------
+
+ inferred_categories : Index
+ inferred_codes : Index
+ dtype : CategoricalDtype or 'category'
+
+ Returns
+ -------
+ Categorical
+ """
+ from pandas import Index, to_numeric, to_datetime, to_timedelta
+
+ cats = Index(inferred_categories)
+
+ known_categories = (isinstance(dtype, CategoricalDtype) and
+ dtype.categories is not None)
+
+ if known_categories:
+ # Convert to a specialzed type with `dtype` if specified
+ if dtype.categories.is_numeric():
+ cats = to_numeric(inferred_categories, errors='coerce')
+ elif is_datetime64_dtype(dtype.categories):
+ cats = to_datetime(inferred_categories, errors='coerce')
+ elif is_timedelta64_dtype(dtype.categories):
+ cats = to_timedelta(inferred_categories, errors='coerce')
+
+ if known_categories:
+ # recode from observation oder to dtype.categories order
+ categories = dtype.categories
+ codes = _recode_for_categories(inferred_codes, cats, categories)
+ elif not cats.is_monotonic_increasing:
+ # sort categories and recode for unknown categories
+ unsorted = cats.copy()
+ categories = cats.sort_values()
+ codes = _recode_for_categories(inferred_codes, unsorted,
+ categories)
+ dtype = CategoricalDtype(categories, ordered=False)
+ else:
+ dtype = CategoricalDtype(cats, ordered=False)
+ codes = inferred_codes
+
+ return cls(codes, dtype=dtype, fastpath=True)
+
@classmethod
def from_array(cls, data, **kwargs):
"""
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index eeb79552477e1..c8b2987d591ef 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -21,6 +21,7 @@
is_float, is_dtype_equal,
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
+from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.index import (Index, MultiIndex, RangeIndex,
@@ -1602,12 +1603,20 @@ def _cast_types(self, values, cast_type, column):
"""
if is_categorical_dtype(cast_type):
- # XXX this is for consistency with
- # c-parser which parses all categories
- # as strings
- if not is_object_dtype(values):
+ known_cats = (isinstance(cast_type, CategoricalDtype) and
+ cast_type.categories is not None)
+
+ if not is_object_dtype(values) and not known_cats:
+ # XXX this is for consistency with
+ # c-parser which parses all categories
+ # as strings
values = astype_nansafe(values, str)
- values = Categorical(values)
+
+ cats = Index(values).unique().dropna()
+ values = Categorical._from_inferred_categories(
+ cats, cats.get_indexer(values), cast_type
+ )
+
else:
try:
values = astype_nansafe(values, cast_type, copy=True)
diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py
index 402fa0817595c..7d3df6201a390 100644
--- a/pandas/tests/io/parser/dtypes.py
+++ b/pandas/tests/io/parser/dtypes.py
@@ -149,6 +149,105 @@ def test_categorical_dtype_chunksize(self):
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
+ @pytest.mark.parametrize('ordered', [False, True])
+ @pytest.mark.parametrize('categories', [
+ ['a', 'b', 'c'],
+ ['a', 'c', 'b'],
+ ['a', 'b', 'c', 'd'],
+ ['c', 'b', 'a'],
+ ])
+ def test_categorical_categoricaldtype(self, categories, ordered):
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ expected = pd.DataFrame({
+ "a": [1, 1, 1, 2],
+ "b": Categorical(['a', 'b', 'b', 'c'],
+ categories=categories,
+ ordered=ordered)
+ })
+ dtype = {"b": CategoricalDtype(categories=categories,
+ ordered=ordered)}
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ def test_categorical_categoricaldtype_unsorted(self):
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ dtype = CategoricalDtype(['c', 'b', 'a'])
+ expected = pd.DataFrame({
+ 'a': [1, 1, 1, 2],
+ 'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
+ })
+ result = self.read_csv(StringIO(data), dtype={'b': dtype})
+ tm.assert_frame_equal(result, expected)
+
+ def test_categoricaldtype_coerces_numeric(self):
+ dtype = {'b': CategoricalDtype([1, 2, 3])}
+ data = "b\n1\n1\n2\n3"
+ expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ def test_categoricaldtype_coerces_datetime(self):
+ dtype = {
+ 'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
+ }
+ data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
+ expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ dtype = {
+ 'b': CategoricalDtype([pd.Timestamp("2014")])
+ }
+ data = "b\n2014-01-01\n2014-01-01T00:00:00"
+ expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ def test_categoricaldtype_coerces_timedelta(self):
+ dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
+ data = "b\n1H\n2H\n3H"
+ expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ def test_categoricaldtype_unexpected_categories(self):
+ dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
+ data = "b\nd\na\nc\nd" # Unexpected c
+ expected = pd.DataFrame({"b": Categorical(list('dacd'),
+ dtype=dtype['b'])})
+ result = self.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+ def test_categorical_categoricaldtype_chunksize(self):
+ # GH 10153
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ cats = ['a', 'b', 'c']
+ expecteds = [pd.DataFrame({'a': [1, 1],
+ 'b': Categorical(['a', 'b'],
+ categories=cats)}),
+ pd.DataFrame({'a': [1, 2],
+ 'b': Categorical(['b', 'c'],
+ categories=cats)},
+ index=[2, 3])]
+ dtype = CategoricalDtype(cats)
+ actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
+ chunksize=2)
+
+ for actual, expected in zip(actuals, expecteds):
+ tm.assert_frame_equal(actual, expected)
+
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index d43901ea091b7..9e3bd40dc275a 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -560,6 +560,40 @@ def f():
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
+ @pytest.mark.parametrize('dtype', [None, 'category'])
+ def test_from_inferred_categories(self, dtype):
+ cats = ['a', 'b']
+ codes = np.array([0, 0, 1, 1], dtype='i8')
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical.from_codes(codes, cats)
+ tm.assert_categorical_equal(result, expected)
+
+ @pytest.mark.parametrize('dtype', [None, 'category'])
+ def test_from_inferred_categories_sorts(self, dtype):
+ cats = ['b', 'a']
+ codes = np.array([0, 1, 1, 1], dtype='i8')
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_inferred_categories_dtype(self):
+ cats = ['a', 'b', 'd']
+ codes = np.array([0, 1, 0, 2], dtype='i8')
+ dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical(['a', 'b', 'a', 'd'],
+ categories=['c', 'b', 'a'],
+ ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_inferred_categories_coerces(self):
+ cats = ['1', '2', 'bad']
+ codes = np.array([0, 0, 1, 2], dtype='i8')
+ dtype = CategoricalDtype([1, 2])
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical([1, 1, 2, np.nan])
+ tm.assert_categorical_equal(result, expected)
+
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
| ```python
import pandas as pd
from io import StringIO
from pandas.api.types import CategoricalDtype
data = 'col1,col2,col3\na,b,1\na,b,2\nc,d,3'
dtype = CategoricalDtype(['d', 'c', 'b', 'a'], ordered=True)
pd.read_csv(StringIO(data), dtype={'col1': dtype}).dtypes
```
This is for after https://github.com/pandas-dev/pandas/pull/16015
cc @chris-b1 | https://api.github.com/repos/pandas-dev/pandas/pulls/17643 | 2017-09-23T11:27:36Z | 2017-10-02T14:10:43Z | 2017-10-02T14:10:43Z | 2017-10-02T14:32:55Z |
DOC: fix no autosummary for numerical index api pages | diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py
index 710c3cc9842c4..f06915997c616 100755
--- a/doc/sphinxext/numpydoc/numpydoc.py
+++ b/doc/sphinxext/numpydoc/numpydoc.py
@@ -43,9 +43,10 @@ def mangle_docstrings(app, what, name, obj, options, lines,
)
# PANDAS HACK (to remove the list of methods/attributes for Categorical)
- if what == "class" and (name.endswith(".Categorical") or
- name.endswith("CategoricalIndex") or
- name.endswith("IntervalIndex")):
+ no_autosummary = [".Categorical", "CategoricalIndex", "IntervalIndex",
+ "RangeIndex", "Int64Index", "UInt64Index",
+ "Float64Index"]
+ if what == "class" and any(name.endswith(n) for n in no_autosummary):
cfg['class_members_list'] = False
if what == 'module':
| For some reason the 'class_without_autosummary' template is not working for the newly added numeric index classes.
I asked in the PR https://github.com/pandas-dev/pandas/pull/17611 to combine them, but now as a test splitting them again (all other cases where it does work is only one class, but if this works seems a bug in sphinx)
Don't merge yet. | https://api.github.com/repos/pandas-dev/pandas/pulls/17642 | 2017-09-23T10:48:10Z | 2017-09-26T14:30:40Z | 2017-09-26T14:30:40Z | 2017-09-26T14:30:44Z |
DOC: correct example use of nth dropna keyword | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index e9a7d8dd0a46e..91d806ca5dd4f 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -1060,7 +1060,7 @@ To select from a DataFrame or Series the nth item, use the nth method. This is a
g.nth(-1)
g.nth(1)
-If you want to select the nth not-null item, use the ``dropna`` kwarg. For a DataFrame this should be either ``'any'`` or ``'all'`` just like you would pass to dropna, for a Series this just needs to be truthy.
+If you want to select the nth not-null item, use the ``dropna`` kwarg. For a DataFrame this should be either ``'any'`` or ``'all'`` just like you would pass to dropna:
.. ipython:: python
@@ -1072,7 +1072,7 @@ If you want to select the nth not-null item, use the ``dropna`` kwarg. For a Dat
g.nth(-1, dropna='any') # NaNs denote group exhausted when using dropna
g.last()
- g.B.nth(0, dropna=True)
+ g.B.nth(0, dropna='all')
As with other methods, passing ``as_index=False``, will achieve a filtration, which returns the grouped row.
| dropna=True is deprecated now, small follow-up to ttps://github.com/pandas-dev/pandas/pull/17493 | https://api.github.com/repos/pandas-dev/pandas/pulls/17641 | 2017-09-23T10:34:54Z | 2017-09-23T14:13:01Z | 2017-09-23T14:13:01Z | 2017-09-23T14:13:03Z |
BUG: overflow on Timedelta construction & arithmetic now raises | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 5003aa0d97c1c..43e90f06ed504 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -498,6 +498,7 @@ Conversion
- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
- Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`)
- Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`)
+- Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`)
Indexing
^^^^^^^^
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 6ba37062ac869..077603af96947 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -3514,7 +3514,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns')
if isinstance(ts, timedelta):
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns')
elif not isinstance(ts, np.timedelta64):
raise ValueError("Invalid type for timedelta "
"scalar: %s" % type(ts))
@@ -3891,8 +3891,7 @@ for _maybe_method_name in dir(NaTType):
#----------------------------------------------------------------------
# Conversion routines
-
-cpdef int64_t _delta_to_nanoseconds(delta):
+cpdef int64_t _delta_to_nanoseconds(delta) except? -1:
if isinstance(delta, np.ndarray):
return delta.astype('m8[ns]').astype('int64')
if hasattr(delta, 'nanos'):
@@ -3903,6 +3902,7 @@ cpdef int64_t _delta_to_nanoseconds(delta):
return delta.astype("timedelta64[ns]").item()
if is_integer_object(delta):
return delta
+
return (delta.days * 24 * 60 * 60 * 1000000
+ delta.seconds * 1000000
+ delta.microseconds) * 1000
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index be27334384f6b..e0ccedb834adf 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -787,6 +787,13 @@ def test_to_datetime_freq(self):
assert xp.freq == rs.freq
assert xp.tzinfo == rs.tzinfo
+ def test_to_datetime_overflow(self):
+ # gh-17637
+ # we are overflowing Timedelta range here
+
+ with pytest.raises(OverflowError):
+ date_range(start='1/1/1700', freq='B', periods=100000)
+
def test_string_na_nat_conversion(self):
# GH #999, #858
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index bc9a0388df9d9..2cabbfacf6416 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -166,6 +166,13 @@ def test_overflow_on_construction(self):
value = pd.Timedelta('1day').value * 20169940
pytest.raises(OverflowError, pd.Timedelta, value)
+ # xref gh-17637
+ with pytest.raises(OverflowError):
+ pd.Timedelta(7 * 19999, unit='D')
+
+ with pytest.raises(OverflowError):
+ pd.Timedelta(timedelta(days=13 * 19999))
+
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
@@ -612,6 +619,14 @@ def test_timedelta_arithmetic(self):
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
+ def test_arithmetic_overflow(self):
+
+ with pytest.raises(OverflowError):
+ pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
+
+ with pytest.raises(OverflowError):
+ pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
+
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
| closes #17637 | https://api.github.com/repos/pandas-dev/pandas/pulls/17640 | 2017-09-23T02:48:12Z | 2017-09-23T14:11:02Z | 2017-09-23T14:11:01Z | 2017-09-23T14:12:05Z |
TST: remove some warnings | diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 7260bc9a8b7a1..bff09be6149f3 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -851,7 +851,7 @@ def lreshape(data, groups, dropna=True, label=None):
return DataFrame(mdata, columns=id_cols + pivot_cols)
-def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
+def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 7a40018494fc4..aa919d600ec52 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2726,7 +2726,7 @@ def barh(self, x=None, y=None, **kwds):
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
- """
+ r"""
Boxplot
.. versionadded:: 0.17.0
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 5052bef24e95a..309c0f0244d7c 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -1035,6 +1035,12 @@ def test_boolean_comparison(self):
result = df == tup
assert_frame_equal(result, expected)
+ def test_boolean_comparison_error(self):
+
+ # GH 4576
+ # boolean comparisons with a tuple/list give unexpected results
+ df = DataFrame(np.arange(6).reshape((3, 2)))
+
# not shape compatible
pytest.raises(ValueError, lambda: df == (2, 2))
pytest.raises(ValueError, lambda: df == [2, 2])
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index dc59495f619b0..b55bab3a210cc 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -1068,7 +1068,7 @@ def test_errors(self):
interval_range(start='foo', periods=10)
# invalid end
- msg = 'end must be numeric or datetime-like, got \(0, 1\]'
+ msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 94a0ac31e093e..d6bdb764f1c8e 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1053,7 +1053,8 @@ def test_iterator(self):
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
- from_chunks = pd.concat(read_stata(fname, chunksize=4))
+ with read_stata(fname, chunksize=4) as itr:
+ from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
def test_read_chunks_115(self):
@@ -1306,8 +1307,9 @@ def test_value_labels_iterator(self, write_index):
df['A'] = df['A'].astype('category')
with tm.ensure_clean() as path:
df.to_stata(path, write_index=write_index)
- dta_iter = pd.read_stata(path, iterator=True)
- value_labels = dta_iter.value_labels()
+
+ with pd.read_stata(path, iterator=True) as dta_iter:
+ value_labels = dta_iter.value_labels()
assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}}
def test_set_index(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/17638 | 2017-09-23T01:45:39Z | 2017-09-23T02:22:36Z | 2017-09-23T02:22:36Z | 2017-09-23T03:34:36Z | |
DEPR: deprecate .select() | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index e47926d95d2fa..400519788d38b 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -667,6 +667,31 @@ Deprecations
- passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`)
- Passing a non-existant column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`)
+.. _whatsnew_0210.deprecations.select:
+
+Series.select and DataFrame.select
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :meth:`Series.select` and :meth:`DataFrame.select` methods are deprecated in favor of using ``df.loc[labels.map(crit)]`` (:issue:`12401`)
+
+.. ipython:: python
+
+ df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
+
+.. code-block:: ipython
+
+ In [3]: df.select(lambda x: x in ['bar', 'baz'])
+ FutureWarning: select is deprecated and will be removed in a future release. You can use .loc[crit] as a replacement
+ Out[3]:
+ A
+ bar 2
+ baz 3
+
+.. ipython:: python
+
+ df.loc[df.index.map(lambda x: x in ['bar', 'baz'])]
+
+
.. _whatsnew_0210.deprecations.argmin_min:
Series.argmax and Series.argmin
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 2686ad370e1ed..e0dc420bc53f8 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -445,9 +445,17 @@ def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
+
+ Parameters
+ ----------
+ maybe_callable : possibly a callable
+ obj : NDFrame
+ **kwargs
"""
+
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
+
return maybe_callable
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5dd770b2600a0..bc8f68eb763d2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2339,6 +2339,8 @@ def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
+ DEPRECATED: use df.loc[df.index.map(crit)] to select via labels
+
Parameters
----------
crit : function
@@ -2349,6 +2351,11 @@ def select(self, crit, axis=0):
-------
selection : type of caller
"""
+ warnings.warn("'select' is deprecated and will be removed in a "
+ "future release. You can use "
+ ".loc[labels.map(crit)] as a replacement",
+ FutureWarning, stacklevel=2)
+
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
@@ -3101,7 +3108,7 @@ def filter(self, items=None, like=None, regex=None, axis=None):
See Also
--------
- pandas.DataFrame.select
+ pandas.DataFrame.loc
Notes
-----
@@ -3120,20 +3127,23 @@ def filter(self, items=None, like=None, regex=None, axis=None):
if axis is None:
axis = self._info_axis_name
- axis_name = self._get_axis_name(axis)
- axis_values = self._get_axis(axis_name)
+ labels = self._get_axis(axis)
if items is not None:
- return self.reindex(**{axis_name:
- [r for r in items if r in axis_values]})
+ name = self._get_axis_name(axis)
+ return self.reindex(
+ **{name: [r for r in items if r in labels]})
elif like:
- matchf = lambda x: (like in x if isinstance(x, string_types) else
- like in str(x))
- return self.select(matchf, axis=axis_name)
+ def f(x):
+ if not isinstance(x, string_types):
+ x = str(x)
+ return like in x
+ values = labels.map(f)
+ return self.loc(axis=axis)[values]
elif regex:
matcher = re.compile(regex)
- return self.select(lambda x: matcher.search(str(x)) is not None,
- axis=axis_name)
+ values = labels.map(lambda x: matcher.search(str(x)) is not None)
+ return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index e977e84702982..199aa9cfca506 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -99,6 +99,8 @@ def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.obj, self.name)
+ if axis is not None:
+ axis = self.obj._get_axis_number(axis)
new_self.axis = axis
return new_self
@@ -107,7 +109,8 @@ def __iter__(self):
def __getitem__(self, key):
if type(key) is tuple:
- key = tuple(com._apply_if_callable(x, self.obj) for x in key)
+ key = tuple(com._apply_if_callable(x, self.obj)
+ for x in key)
try:
values = self.obj.get_value(*key)
if is_scalar(values):
@@ -117,10 +120,16 @@ def __getitem__(self, key):
return self._getitem_tuple(key)
else:
+ # we by definition only have the 0th axis
+ axis = self.axis or 0
+
key = com._apply_if_callable(key, self.obj)
- return self._getitem_axis(key, axis=0)
+ return self._getitem_axis(key, axis=axis)
+
+ def _get_label(self, label, axis=None):
+ if axis is None:
+ axis = self.axis or 0
- def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
@@ -135,10 +144,14 @@ def _get_label(self, label, axis=0):
return self.obj._xs(label, axis=axis)
- def _get_loc(self, key, axis=0):
+ def _get_loc(self, key, axis=None):
+ if axis is None:
+ axis = self.axis
return self.obj._ixs(key, axis=axis)
- def _slice(self, obj, axis=0, kind=None):
+ def _slice(self, obj, axis=None, kind=None):
+ if axis is None:
+ axis = self.axis
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
@@ -173,7 +186,8 @@ def _get_setitem_indexer(self, key):
def __setitem__(self, key, value):
if isinstance(key, tuple):
- key = tuple(com._apply_if_callable(x, self.obj) for x in key)
+ key = tuple(com._apply_if_callable(x, self.obj)
+ for x in key)
else:
key = com._apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
@@ -192,10 +206,12 @@ def _has_valid_tuple(self, key):
"[{types}] types"
.format(types=self._valid_types))
- def _should_validate_iterable(self, axis=0):
+ def _should_validate_iterable(self, axis=None):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
+ if axis is None:
+ axis = self.axis or 0
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
@@ -233,6 +249,8 @@ def _convert_range(self, key, is_setter=False):
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
+ if axis is None:
+ axis = 0
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
@@ -895,7 +913,9 @@ def _multi_take(self, tup):
except(KeyError, IndexingError):
raise self._exception
- def _convert_for_reindex(self, key, axis=0):
+ def _convert_for_reindex(self, key, axis=None):
+ if axis is None:
+ axis = self.axis or 0
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
@@ -925,7 +945,7 @@ def _handle_lowerdim_multi_index_axis0(self, tup):
try:
# fast path for series or for tup devoid of slices
- return self._get_label(tup, axis=0)
+ return self._get_label(tup, axis=self.axis)
except TypeError:
# slices are unhashable
pass
@@ -1015,7 +1035,7 @@ def _getitem_nested_tuple(self, tup):
# this is a series with a multi-index specified a tuple of
# selectors
- return self._getitem_axis(tup, axis=0)
+ return self._getitem_axis(tup, axis=self.axis)
# handle the multi-axis by taking sections and reducing
# this is iterative
@@ -1049,7 +1069,10 @@ def _getitem_nested_tuple(self, tup):
return obj
- def _getitem_axis(self, key, axis=0):
+ def _getitem_axis(self, key, axis=None):
+
+ if axis is None:
+ axis = self.axis or 0
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
@@ -1084,7 +1107,10 @@ def _getitem_axis(self, key, axis=0):
return self._get_label(key, axis=axis)
- def _getitem_iterable(self, key, axis=0):
+ def _getitem_iterable(self, key, axis=None):
+ if axis is None:
+ axis = self.axis or 0
+
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
@@ -1138,7 +1164,7 @@ def _getitem_iterable(self, key, axis=0):
return result
- def _convert_to_indexer(self, obj, axis=0, is_setter=False):
+ def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
@@ -1153,6 +1179,9 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False):
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
+ if axis is None:
+ axis = self.axis or 0
+
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
@@ -1255,9 +1284,12 @@ def _tuplify(self, loc):
tup[0] = loc
return tuple(tup)
- def _get_slice_axis(self, slice_obj, axis=0):
+ def _get_slice_axis(self, slice_obj, axis=None):
obj = self.obj
+ if axis is None:
+ axis = self.axis or 0
+
if not need_slice(slice_obj):
return obj.copy(deep=False)
indexer = self._convert_slice_indexer(slice_obj, axis)
@@ -1325,7 +1357,8 @@ class _LocationIndexer(_NDFrameIndexer):
def __getitem__(self, key):
if type(key) is tuple:
- key = tuple(com._apply_if_callable(x, self.obj) for x in key)
+ key = tuple(com._apply_if_callable(x, self.obj)
+ for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
@@ -1333,8 +1366,11 @@ def __getitem__(self, key):
pass
return self._getitem_tuple(key)
else:
- key = com._apply_if_callable(key, self.obj)
- return self._getitem_axis(key, axis=0)
+ # we by definition only have the 0th axis
+ axis = self.axis or 0
+
+ maybe_callable = com._apply_if_callable(key, self.obj)
+ return self._getitem_axis(maybe_callable, axis=axis)
def _is_scalar_access(self, key):
raise NotImplementedError()
@@ -1342,10 +1378,12 @@ def _is_scalar_access(self, key):
def _getitem_scalar(self, key):
raise NotImplementedError()
- def _getitem_axis(self, key, axis=0):
+ def _getitem_axis(self, key, axis=None):
raise NotImplementedError()
- def _getbool_axis(self, key, axis=0):
+ def _getbool_axis(self, key, axis=None):
+ if axis is None:
+ axis = self.axis or 0
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
@@ -1354,8 +1392,11 @@ def _getbool_axis(self, key, axis=0):
except Exception as detail:
raise self._exception(detail)
- def _get_slice_axis(self, slice_obj, axis=0):
+ def _get_slice_axis(self, slice_obj, axis=None):
""" this is pretty simple as we just have to deal with labels """
+ if axis is None:
+ axis = self.axis or 0
+
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
@@ -1528,7 +1569,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
return key
- def _getitem_axis(self, key, axis=0):
+ def _getitem_axis(self, key, axis=None):
+ if axis is None:
+ axis = self.axis or 0
+
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
@@ -1717,7 +1761,9 @@ def _getitem_tuple(self, tup):
return retval
- def _get_slice_axis(self, slice_obj, axis=0):
+ def _get_slice_axis(self, slice_obj, axis=None):
+ if axis is None:
+ axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
@@ -1729,7 +1775,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
else:
return self.obj._take(slice_obj, axis=axis, convert=False)
- def _get_list_axis(self, key, axis=0):
+ def _get_list_axis(self, key, axis=None):
"""
Return Series values by list or array of integers
@@ -1742,13 +1788,17 @@ def _get_list_axis(self, key, axis=0):
-------
Series object
"""
+ if axis is None:
+ axis = self.axis or 0
try:
return self.obj._take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
- def _getitem_axis(self, key, axis=0):
+ def _getitem_axis(self, key, axis=None):
+ if axis is None:
+ axis = self.axis or 0
if isinstance(key, slice):
self._has_valid_type(key, axis)
@@ -1781,8 +1831,10 @@ def _getitem_axis(self, key, axis=0):
return self._get_loc(key, axis=axis)
- def _convert_to_indexer(self, obj, axis=0, is_setter=False):
+ def _convert_to_indexer(self, obj, axis=None, is_setter=False):
""" much simpler as we only have to deal with our valid types """
+ if axis is None:
+ axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
@@ -1818,7 +1870,8 @@ def __getitem__(self, key):
def __setitem__(self, key, value):
if isinstance(key, tuple):
- key = tuple(com._apply_if_callable(x, self.obj) for x in key)
+ key = tuple(com._apply_if_callable(x, self.obj)
+ for x in key)
else:
# scalar callable may return tuple
key = com._apply_if_callable(key, self.obj)
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 8bcc19e6d8ba4..27906838abb2d 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -143,10 +143,11 @@ def test_set_index_nonuniq(self):
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
- df2 = df.select(lambda indx: indx >= 1)
- rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
+
+ df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
+ rs = df2.set_index('key')
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 219c1df301c4b..f9a4275d14f55 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -796,16 +796,38 @@ def test_filter_corner(self):
assert_frame_equal(result, empty)
def test_select(self):
+
+ # deprecated: gh-12410
f = lambda x: x.weekday() == 2
- result = self.tsframe.select(f, axis=0)
- expected = self.tsframe.reindex(
- index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
- assert_frame_equal(result, expected)
+ index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
+ expected_weekdays = self.tsframe.reindex(index=index)
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = self.tsframe.select(f, axis=0)
+ assert_frame_equal(result, expected_weekdays)
+
+ result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
+ expected = self.frame.reindex(columns=['B', 'D'])
+ assert_frame_equal(result, expected, check_names=False)
+
+ # replacement
+ f = lambda x: x.weekday == 2
+ result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
+ assert_frame_equal(result, expected_weekdays)
- result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
+ crit = lambda x: x in ['B', 'D']
+ result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
expected = self.frame.reindex(columns=['B', 'D'])
+ assert_frame_equal(result, expected, check_names=False)
+
+ # doc example
+ df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
- # TODO should reindex check_names?
+ crit = lambda x: x in ['bar', 'baz']
+ with tm.assert_produces_warning(FutureWarning):
+ expected = df.select(crit)
+ result = df.loc[df.index.map(crit)]
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 0043475702f94..26e2b801f6460 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -83,6 +83,7 @@ def test_assign_order(self):
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
+
# non-keyword argument
with pytest.raises(TypeError):
df.assign(lambda x: x.A)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 47bf837fa62d9..657de9b589dc9 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -3103,7 +3103,8 @@ def agg_before(hour, func, fix=False):
"""
def _func(data):
- d = data.select(lambda x: x.hour < 11).dropna()
+ d = data.loc[data.index.map(
+ lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 09ba0e197438d..93e7b81163b54 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -2225,14 +2225,18 @@ def test_rename(self):
assert result.name == expected.name
def test_select(self):
- n = len(self.ts)
- result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
- expected = self.ts.reindex(self.ts.index[n // 2:])
- assert_series_equal(result, expected)
- result = self.ts.select(lambda x: x.weekday() == 2)
- expected = self.ts[self.ts.index.weekday == 2]
- assert_series_equal(result, expected)
+ # deprecated: gh-12410
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ n = len(self.ts)
+ result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
+ expected = self.ts.reindex(self.ts.index[n // 2:])
+ assert_series_equal(result, expected)
+
+ result = self.ts.select(lambda x: x.weekday() == 2)
+ expected = self.ts[self.ts.index.weekday == 2]
+ assert_series_equal(result, expected)
def test_cast_on_putmask(self):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 050335988ca41..94577db15f01a 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1239,7 +1239,8 @@ def test_groupby_level_no_obs(self):
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
- df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
+ df1 = df.loc(axis=1)[df.columns.map(
+ lambda u: u[0] in ['f2', 'f3'])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
| xref #12401
| https://api.github.com/repos/pandas-dev/pandas/pulls/17633 | 2017-09-22T21:29:27Z | 2017-10-04T11:07:36Z | 2017-10-04T11:07:36Z | 2017-10-04T11:08:36Z |
preserve kwargs order on assign func for py36plus - #14207 | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 5003aa0d97c1c..49f831b95a71f 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -117,6 +117,7 @@ Other Enhancements
- :func:`MultiIndex.is_monotonic_decreasing` has been implemented. Previously returned ``False`` in all cases. (:issue:`16554`)
- :func:`Categorical.rename_categories` now accepts a dict-like argument as `new_categories` and only updates the categories found in that dict. (:issue:`17336`)
- :func:`read_excel` raises ``ImportError`` with a better message if ``xlrd`` is not installed. (:issue:`17613`)
+- :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index dd5d490ea66a8..3b85c864d877c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -82,6 +82,7 @@
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
+from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg
@@ -2575,12 +2576,12 @@ def assign(self, **kwargs):
Notes
-----
- Since ``kwargs`` is a dictionary, the order of your
- arguments may not be preserved. To make things predicatable,
- the columns are inserted in alphabetical order, at the end of
- your DataFrame. Assigning multiple columns within the same
- ``assign`` is possible, but you cannot reference other columns
- created within the same ``assign`` call.
+ For python 3.6 and above, the columns are inserted in the order of
+ **kwargs. For python 3.5 and earlier, since **kwargs is unordered,
+ the columns are inserted in alphabetical order at the end of your
+ DataFrame. Assigning multiple columns within the same ``assign``
+ is possible, but you cannot reference other columns created within
+ the same ``assign`` call.
Examples
--------
@@ -2620,14 +2621,18 @@ def assign(self, **kwargs):
data = self.copy()
# do all calculations first...
- results = {}
+ results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
+ # preserve order for 3.6 and later, but sort by key for 3.5 and earlier
+ if PY36:
+ results = results.items()
+ else:
+ results = sorted(results.items())
# ... and then assign
- for k, v in sorted(results.items()):
+ for k, v in results:
data[k] = v
-
return data
def _sanitize_column(self, key, value, broadcast=True):
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 4462260a290d9..0043475702f94 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -4,6 +4,7 @@
import pytest
from pandas.compat import range, lrange
import numpy as np
+from pandas.compat import PY36
from pandas import DataFrame, Series, Index, MultiIndex
@@ -61,14 +62,23 @@ def test_assign_multiple(self):
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
- def test_assign_alphabetical(self):
+ def test_assign_order(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
- expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
- columns=list('ABCD'))
+
+ if PY36:
+ expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]],
+ columns=list('ABDC'))
+ else:
+ expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
+ columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
+
+ expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
+ columns=list('ABCD'))
+
assert_frame_equal(result, expected)
def test_assign_bad(self):
| - [x] closes #14207
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17632 | 2017-09-22T20:38:25Z | 2017-09-24T13:22:14Z | 2017-09-24T13:22:14Z | 2017-09-25T02:19:39Z |
COMPAT: sum/prod on all nan will remain nan regardless of bottleneck install | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 07740d66a2186..c0b3a2e0edb30 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -181,6 +181,42 @@ account for missing data. For example:
df.mean(1)
df.cumsum()
+
+.. _missing_data.numeric_sum:
+
+Sum/Prod of Empties/Nans
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. warning::
+
+ This behavior is now standard as of v0.21.0; previously sum/prod would give different
+ results if the ``bottleneck`` package was installed. See the :ref:`here <whatsnew_0210.api_breaking.bottleneck>`.
+
+With ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, the result will be all-``NaN``.
+
+.. ipython:: python
+
+ s = Series([np.nan])
+
+ s.sum()
+
+Summing of an empty ``Series``
+
+.. ipython:: python
+
+ pd.Series([]).sum()
+
+.. warning::
+
+ These behaviors differ from the default in ``numpy`` where an empty sum returns zero.
+
+ .. ipython:: python
+
+ np.nansum(np.array([np.nan]))
+ np.nansum(np.array([]))
+
+
+
NA values in GroupBy
~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index ed3be71852299..afb3bf071ab73 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -12,6 +12,7 @@ Highlights include:
- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`.
- New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying
categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`.
+- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`
Check the :ref:`API Changes <whatsnew_0210.api_breaking>` and :ref:`deprecations <whatsnew_0210.deprecations>` before updating.
@@ -412,6 +413,52 @@ Current Behavior
s.loc[pd.Index([True, False, True])]
+.. _whatsnew_0210.api_breaking.bottleneck:
+
+Sum/Prod of all-NaN Series/DataFrames is now consistently NaN
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on
+whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed. (:issue:`9422`, :issue:`15507`).
+
+With ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, the result will be all-``NaN``. See the :ref:`docs <missing_data.numeric_sum>`.
+
+.. ipython:: python
+
+ s = Series([np.nan])
+
+Previously NO ``bottleneck``
+
+.. code_block:: ipython
+
+ In [2]: s.sum()
+ Out[2]: np.nan
+
+Previously WITH ``bottleneck``
+
+.. code_block:: ipython
+
+ In [2]: s.sum()
+ Out[2]: 0.0
+
+New Behavior, without regards to the bottleneck installation.
+
+.. ipython:: python
+
+ s.sum()
+
+Note that this also changes the sum of an empty ``Series``
+
+Previously regardless of ``bottlenck``
+
+.. code_block:: ipython
+
+ In [1]: pd.Series([]).sum()
+ Out[1]: 0
+
+.. ipython:: python
+
+ pd.Series([]).sum()
.. _whatsnew_0210.api_breaking.pandas_eval:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c7ae9bbee9013..bc0f10a3f79ab 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6990,7 +6990,7 @@ def _doc_parms(cls):
----------
axis : %(axis_descr)s
skipna : boolean, default True
- Exclude NA/null values. If an entire row/column is NA, the result
+ Exclude NA/null values. If an entire row/column is NA or empty, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 388b2ecdff445..baeb869239c1e 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -18,7 +18,7 @@
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
from pandas.core.config import get_option
from pandas.core.common import _values_from_object
@@ -89,8 +89,7 @@ def _f(*args, **kwargs):
class bottleneck_switch(object):
- def __init__(self, zero_value=None, **kwargs):
- self.zero_value = zero_value
+ def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, alt):
@@ -108,18 +107,20 @@ def f(values, axis=None, skipna=True, **kwds):
if k not in kwds:
kwds[k] = v
try:
- if self.zero_value is not None and values.size == 0:
- if values.ndim == 1:
+ if values.size == 0:
+
+ # we either return np.nan or pd.NaT
+ if is_numeric_dtype(values):
+ values = values.astype('float64')
+ fill_value = na_value_for_dtype(values.dtype)
- # wrap the 0's if needed
- if is_timedelta64_dtype(values):
- return lib.Timedelta(0)
- return 0
+ if values.ndim == 1:
+ return fill_value
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
- result = np.empty(result_shape)
- result.fill(0)
+ result = np.empty(result_shape, dtype=values.dtype)
+ result.fill(fill_value)
return result
if (_USE_BOTTLENECK and skipna and
@@ -154,11 +155,16 @@ def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)):
+ # GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
- if name == 'nansum':
- if dt.itemsize < 8:
- return False
+
+ # GH 9422
+ # further we also want to preserve NaN when all elements
+ # are NaN, unlinke bottleneck/numpy which consider this
+ # to be 0
+ if name in ['nansum', 'nanprod']:
+ return False
return True
return False
@@ -297,7 +303,7 @@ def nanall(values, axis=None, skipna=True):
@disallow('M8')
-@bottleneck_switch(zero_value=0)
+@bottleneck_switch()
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index dca905b47000e..c36b5957a4283 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -448,7 +448,11 @@ def test_sum(self):
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
- def test_stat_operators_attempt_obj_array(self):
+ @pytest.mark.parametrize(
+ "method", ['sum', 'mean', 'prod', 'var',
+ 'std', 'skew', 'min', 'max'])
+ def test_stat_operators_attempt_obj_array(self, method):
+ # GH #676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
@@ -458,20 +462,17 @@ def test_stat_operators_attempt_obj_array(self):
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
- methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
- # GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
- for meth in methods:
- assert df.values.dtype == np.object_
- result = getattr(df, meth)(1)
- expected = getattr(df.astype('f8'), meth)(1)
+ assert df.values.dtype == np.object_
+ result = getattr(df, method)(1)
+ expected = getattr(df.astype('f8'), method)(1)
- if not tm._incompat_bottleneck_version(meth):
- tm.assert_series_equal(result, expected)
+ if method in ['sum', 'prod']:
+ tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
@@ -563,15 +564,15 @@ def test_var_std(self):
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
- if nanops._USE_BOTTLENECK:
- nanops._USE_BOTTLENECK = False
+
+ with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
- nanops._USE_BOTTLENECK = True
- def test_numeric_only_flag(self):
+ @pytest.mark.parametrize(
+ "meth", ['sem', 'var', 'std'])
+ def test_numeric_only_flag(self, meth):
# GH #9201
- methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
@@ -580,20 +581,19 @@ def test_numeric_only_flag(self):
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
- for meth in methods:
- result = getattr(df1, meth)(axis=1, numeric_only=True)
- expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
- tm.assert_series_equal(expected, result)
+ result = getattr(df1, meth)(axis=1, numeric_only=True)
+ expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
+ tm.assert_series_equal(expected, result)
- result = getattr(df2, meth)(axis=1, numeric_only=True)
- expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
- tm.assert_series_equal(expected, result)
+ result = getattr(df2, meth)(axis=1, numeric_only=True)
+ expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
+ tm.assert_series_equal(expected, result)
- # df1 has all numbers, df2 has a letter inside
- pytest.raises(TypeError, lambda: getattr(df1, meth)(
- axis=1, numeric_only=False))
- pytest.raises(TypeError, lambda: getattr(df2, meth)(
- axis=1, numeric_only=False))
+ # df1 has all numbers, df2 has a letter inside
+ pytest.raises(TypeError, lambda: getattr(df1, meth)(
+ axis=1, numeric_only=False))
+ pytest.raises(TypeError, lambda: getattr(df2, meth)(
+ axis=1, numeric_only=False))
def test_mixed_ops(self):
# GH 16116
@@ -606,11 +606,9 @@ def test_mixed_ops(self):
result = getattr(df, op)()
assert len(result) == 2
- if nanops._USE_BOTTLENECK:
- nanops._USE_BOTTLENECK = False
+ with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
- nanops._USE_BOTTLENECK = True
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
@@ -676,11 +674,10 @@ def test_sem(self):
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
- if nanops._USE_BOTTLENECK:
- nanops._USE_BOTTLENECK = False
+
+ with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
- nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
@@ -767,7 +764,7 @@ def wrapper(x):
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
- if not tm._incompat_bottleneck_version(name):
+ if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
@@ -799,7 +796,7 @@ def wrapper(x):
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
- if not tm._incompat_bottleneck_version(name):
+ if name in ['sum', 'prod']:
assert np.isnan(r0).all()
assert np.isnan(r1).all()
@@ -1859,14 +1856,14 @@ def test_dataframe_clip(self):
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
- @pytest.mark.xfail(reason=("clip on mixed integer or floats "
- "with integer clippers coerces to float"))
def test_clip_mixed_numeric(self):
-
+ # TODO(jreback)
+ # clip on mixed integer or floats
+ # with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
- expected = DataFrame({'A': [1, 2, 2],
+ expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py
index efc833575843c..913d3bcc09869 100644
--- a/pandas/tests/groupby/test_aggregate.py
+++ b/pandas/tests/groupby/test_aggregate.py
@@ -562,7 +562,7 @@ def _testit(name):
exp.name = 'C'
result = op(grouped)['C']
- if not tm._incompat_bottleneck_version(name):
+ if name in ['sum', 'prod']:
assert_series_equal(result, exp)
_testit('count')
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 6495d748e3823..8cc40bb5146c5 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -15,110 +15,103 @@
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
-import pandas.core.config as cf
-
import pandas.core.nanops as nanops
-from pandas.compat import lrange, range, is_platform_windows
+from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
import pandas.util.testing as tm
-
from .common import TestData
-skip_if_bottleneck_on_windows = (is_platform_windows() and
- nanops._USE_BOTTLENECK)
+class TestSeriesAnalytics(TestData):
+ @pytest.mark.parametrize("use_bottleneck", [True, False])
+ @pytest.mark.parametrize("method", ["sum", "prod"])
+ def test_empty(self, method, use_bottleneck):
-class TestSeriesAnalytics(TestData):
+ with pd.option_context("use_bottleneck", use_bottleneck):
+ # GH 9422
+ # treat all missing as NaN
+ s = Series([])
+ result = getattr(s, method)()
+ assert isna(result)
- def test_sum_zero(self):
- arr = np.array([])
- assert nanops.nansum(arr) == 0
+ result = getattr(s, method)(skipna=True)
+ assert isna(result)
- arr = np.empty((10, 0))
- assert (nanops.nansum(arr, axis=1) == 0).all()
+ s = Series([np.nan])
+ result = getattr(s, method)()
+ assert isna(result)
- # GH #844
- s = Series([], index=[])
- assert s.sum() == 0
+ result = getattr(s, method)(skipna=True)
+ assert isna(result)
- df = DataFrame(np.empty((10, 0)))
- assert (df.sum(1) == 0).all()
+ s = Series([np.nan, 1])
+ result = getattr(s, method)()
+ assert result == 1.0
+
+ s = Series([np.nan, 1])
+ result = getattr(s, method)(skipna=True)
+ assert result == 1.0
+
+ # GH #844 (changed in 9422)
+ df = DataFrame(np.empty((10, 0)))
+ assert (df.sum(1).isnull()).all()
+
+ @pytest.mark.parametrize(
+ "method", ['sum', 'mean', 'median', 'std', 'var'])
+ def test_ops_consistency_on_empty(self, method):
+
+ # GH 7869
+ # consistency on empty
+
+ # float
+ result = getattr(Series(dtype=float), method)()
+ assert isna(result)
+
+ # timedelta64[ns]
+ result = getattr(Series(dtype='m8[ns]'), method)()
+ assert result is pd.NaT
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
- def test_overflow(self):
- # GH 6915
- # overflowing on the smaller int dtypes
- for dtype in ['int32', 'int64']:
- v = np.arange(5000000, dtype=dtype)
- s = Series(v)
-
- # no bottleneck
- result = s.sum(skipna=False)
- assert int(result) == v.sum(dtype='int64')
- result = s.min(skipna=False)
- assert int(result) == 0
- result = s.max(skipna=False)
- assert int(result) == v[-1]
-
- for dtype in ['float32', 'float64']:
- v = np.arange(5000000, dtype=dtype)
- s = Series(v)
-
- # no bottleneck
- result = s.sum(skipna=False)
- assert result == v.sum(dtype=dtype)
- result = s.min(skipna=False)
- assert np.allclose(float(result), 0.0)
- result = s.max(skipna=False)
- assert np.allclose(float(result), v[-1])
-
- @pytest.mark.xfail(
- skip_if_bottleneck_on_windows,
- reason="buggy bottleneck with sum overflow on windows")
- def test_overflow_with_bottleneck(self):
- # GH 6915
- # overflowing on the smaller int dtypes
- for dtype in ['int32', 'int64']:
- v = np.arange(5000000, dtype=dtype)
- s = Series(v)
-
- # use bottleneck if available
- result = s.sum()
- assert int(result) == v.sum(dtype='int64')
- result = s.min()
- assert int(result) == 0
- result = s.max()
- assert int(result) == v[-1]
-
- for dtype in ['float32', 'float64']:
- v = np.arange(5000000, dtype=dtype)
- s = Series(v)
-
- # use bottleneck if available
- result = s.sum()
- assert result == v.sum(dtype=dtype)
- result = s.min()
- assert np.allclose(float(result), 0.0)
- result = s.max()
- assert np.allclose(float(result), v[-1])
-
- @pytest.mark.xfail(
- skip_if_bottleneck_on_windows,
- reason="buggy bottleneck with sum overflow on windows")
+ @pytest.mark.parametrize("use_bottleneck", [True, False])
+ def test_sum_overflow(self, use_bottleneck):
+
+ with pd.option_context('use_bottleneck', use_bottleneck):
+ # GH 6915
+ # overflowing on the smaller int dtypes
+ for dtype in ['int32', 'int64']:
+ v = np.arange(5000000, dtype=dtype)
+ s = Series(v)
+
+ result = s.sum(skipna=False)
+ assert int(result) == v.sum(dtype='int64')
+ result = s.min(skipna=False)
+ assert int(result) == 0
+ result = s.max(skipna=False)
+ assert int(result) == v[-1]
+
+ for dtype in ['float32', 'float64']:
+ v = np.arange(5000000, dtype=dtype)
+ s = Series(v)
+
+ result = s.sum(skipna=False)
+ assert result == v.sum(dtype=dtype)
+ result = s.min(skipna=False)
+ assert np.allclose(float(result), 0.0)
+ result = s.max(skipna=False)
+ assert np.allclose(float(result), v[-1])
+
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
def test_sum_inf(self):
- import pandas.core.nanops as nanops
-
s = Series(np.random.randn(10))
s2 = s.copy()
@@ -130,7 +123,7 @@ def test_sum_inf(self):
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
- with cf.option_context("mode.use_inf_as_na", True):
+ with pd.option_context("mode.use_inf_as_na", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
@@ -510,9 +503,8 @@ def test_npdiff(self):
def _check_stat_op(self, name, alternate, check_objects=False,
check_allna=False):
- import pandas.core.nanops as nanops
- def testit():
+ with pd.option_context('use_bottleneck', False):
f = getattr(Series, name)
# add some NaNs
@@ -535,15 +527,7 @@ def testit():
allna = self.series * nan
if check_allna:
- # xref 9422
- # bottleneck >= 1.0 give 0.0 for an allna Series sum
- try:
- assert nanops._USE_BOTTLENECK
- import bottleneck as bn # noqa
- assert bn.__version__ >= LooseVersion('1.0')
- assert f(allna) == 0.0
- except:
- assert np.isnan(f(allna))
+ assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
@@ -574,16 +558,6 @@ def testit():
tm.assert_raises_regex(NotImplementedError, name, f,
self.series, numeric_only=True)
- testit()
-
- try:
- import bottleneck as bn # noqa
- nanops._USE_BOTTLENECK = False
- testit()
- nanops._USE_BOTTLENECK = True
- except ImportError:
- pass
-
def _check_accum_op(self, name, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(func(self.ts).values,
@@ -733,31 +707,6 @@ def test_modulo(self):
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
- def test_ops_consistency_on_empty(self):
-
- # GH 7869
- # consistency on empty
-
- # float
- result = Series(dtype=float).sum()
- assert result == 0
-
- result = Series(dtype=float).mean()
- assert isna(result)
-
- result = Series(dtype=float).median()
- assert isna(result)
-
- # timedelta64[ns]
- result = Series(dtype='m8[ns]').sum()
- assert result == Timedelta(0)
-
- result = Series(dtype='m8[ns]').mean()
- assert result is pd.NaT
-
- result = Series(dtype='m8[ns]').median()
- assert result is pd.NaT
-
def test_corr(self):
tm._skip_if_no_scipy()
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index c8e056f156218..2769ec0d2dbed 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -172,7 +172,7 @@ def wrapper(x):
for i in range(obj.ndim):
result = f(axis=i)
- if not tm._incompat_bottleneck_version(name):
+ if name in ['sum', 'prod']:
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
pytest.raises(Exception, f, axis=obj.ndim)
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 863671feb4ed8..49859fd27d7bc 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -138,7 +138,7 @@ def wrapper(x):
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i)
- if not tm._incompat_bottleneck_version(name):
+ if name in ['sum', 'prod']:
expected = obj.apply(skipna_wrapper, axis=i)
tm.assert_panel_equal(result, expected)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 0fe51121abef6..432350b4849d8 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -2355,7 +2355,8 @@ def test_expanding_consistency(self, min_periods):
expanding_apply_f_result = x.expanding(
min_periods=min_periods).apply(func=f)
- if not tm._incompat_bottleneck_version(name):
+ # GH 9422
+ if name in ['sum', 'prod']:
assert_equal(expanding_f_result,
expanding_apply_f_result)
@@ -2453,7 +2454,9 @@ def test_rolling_consistency(self, window, min_periods, center):
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods,
center=center).apply(func=f)
- if not tm._incompat_bottleneck_version(name):
+
+ # GH 9422
+ if name in ['sum', 'prod']:
assert_equal(rolling_f_result,
rolling_apply_f_result)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 202c9473eea12..3c23462e10d35 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -401,21 +401,6 @@ def _skip_if_no_localpath():
pytest.skip("py.path not installed")
-def _incompat_bottleneck_version(method):
- """ skip if we have bottleneck installed
- and its >= 1.0
- as we don't match the nansum/nanprod behavior for all-nan
- ops, see GH9422
- """
- if method not in ['sum', 'prod']:
- return False
- try:
- import bottleneck as bn
- return bn.__version__ >= LooseVersion('1.0')
- except ImportError:
- return False
-
-
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
| xref #15507
closes #9422
| https://api.github.com/repos/pandas-dev/pandas/pulls/17630 | 2017-09-22T15:08:18Z | 2017-10-10T13:17:02Z | 2017-10-10T13:17:02Z | 2017-10-10T16:09:20Z |
BUG: coerce pd.wide_to_long suffixes to ints | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 02cb5aa870c9b..d982d88eafcfe 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -191,6 +191,7 @@ Other API Changes
- Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`)
- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:pr:`16672`)
- :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`)
+- :func:`wide_to_long` previously kept numeric-like suffixes as ``object`` dtype. Now they are cast to numeric if possible (:issue:`17627`)
.. _whatsnew_0220.deprecations:
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 16439b30d5bb4..46edc0b96b7c2 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -13,6 +13,7 @@
import re
from pandas.core.dtypes.missing import notna
+from pandas.core.tools.numeric import to_numeric
@Appender(_shared_docs['melt'] %
@@ -199,6 +200,9 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
.. versionadded:: 0.20.0
+ .. versionchanged:: 0.22.0
+ When all suffixes are numeric, they are cast to int64/float64.
+
Returns
-------
DataFrame
@@ -278,8 +282,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
Going from long back to wide just takes some creative use of `unstack`
- >>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack()
- >>> w.columns = pd.Index(w.columns).str.join('')
+ >>> w = l.unstack()
+ >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
@@ -333,16 +337,63 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
+ All of the above examples have integers as suffixes. It is possible to
+ have non-integers as suffixes.
+
+ >>> df = pd.DataFrame({
+ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
+ ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
+ ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
+ ... })
+ >>> df
+ birth famid ht_one ht_two
+ 0 1 1 2.8 3.4
+ 1 2 1 2.9 3.8
+ 2 3 1 2.2 2.9
+ 3 1 2 2.0 3.2
+ 4 2 2 1.8 2.8
+ 5 3 2 1.9 2.4
+ 6 1 3 2.2 3.3
+ 7 2 3 2.3 3.4
+ 8 3 3 2.1 2.9
+
+ >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
+ sep='_', suffix='\w')
+ >>> l
+ ... # doctest: +NORMALIZE_WHITESPACE
+ ht
+ famid birth age
+ 1 1 one 2.8
+ two 3.4
+ 2 one 2.9
+ two 3.8
+ 3 one 2.2
+ two 2.9
+ 2 1 one 2.0
+ two 3.2
+ 2 one 1.8
+ two 2.8
+ 3 one 1.9
+ two 2.4
+ 3 1 one 2.2
+ two 3.3
+ 2 one 2.3
+ two 3.4
+ 3 one 2.1
+ two 2.9
+
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
- in a typicaly case.
+ in a typical case.
"""
def get_var_names(df, stub, sep, suffix):
- regex = "^{stub}{sep}{suffix}".format(
+ regex = r'^{stub}{sep}{suffix}$'.format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
- return df.filter(regex=regex).columns.tolist()
+ pattern = re.compile(regex)
+ return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
@@ -350,9 +401,12 @@ def melt_stub(df, stub, i, j, value_vars, sep):
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
+ # GH17627 Cast numerics suffixes to int/float
+ newdf[j] = to_numeric(newdf[j], errors='ignore')
+
return newdf.set_index(i + [j])
- if any(map(lambda s: s in df.columns.tolist(), stubnames)):
+ if any([col in stubnames for col in df.columns]):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(stubnames):
@@ -368,8 +422,7 @@ def melt_stub(df, stub, i, j, value_vars, sep):
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
- value_vars = list(map(lambda stub:
- get_var_names(df, stub, sep, suffix), stubnames))
+ value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 3c38512548c70..b7422dfd7e911 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -308,12 +308,12 @@ def test_simple(self):
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
- "year": ['1970', '1970', '1970', '1980', '1980', '1980'],
+ "year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
- exp_frame = DataFrame(exp_data)
- exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
- long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
- tm.assert_frame_equal(long_frame, exp_frame)
+ expected = DataFrame(exp_data)
+ expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
+ result = wide_to_long(df, ["A", "B"], i="id", j="year")
+ tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204
@@ -348,12 +348,12 @@ def test_separating_character(self):
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
- "year": ['1970', '1970', '1970', '1980', '1980', '1980'],
+ "year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
- exp_frame = DataFrame(exp_data)
- exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
- long_frame = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
- tm.assert_frame_equal(long_frame, exp_frame)
+ expected = DataFrame(exp_data)
+ expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
+ result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
+ tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
@@ -376,14 +376,14 @@ def test_escapable_characters(self):
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
- "year": ['1970', '1970', '1970', '1980', '1980', '1980'],
+ "year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
- exp_frame = DataFrame(exp_data)
- exp_frame = exp_frame.set_index(
+ expected = DataFrame(exp_data)
+ expected = expected.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
- long_frame = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
- i="id", j="year")
- tm.assert_frame_equal(long_frame, exp_frame)
+ result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
+ i="id", j="year")
+ tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
@@ -396,11 +396,11 @@ def test_unbalanced(self):
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
- 'year': ['2010', '2011', '2010', '2011']}
- exp_frame = pd.DataFrame(exp_data)
- exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
- long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
- tm.assert_frame_equal(long_frame, exp_frame)
+ 'year': [2010, 2011, 2010, 2011]}
+ expected = pd.DataFrame(exp_data)
+ expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
+ result = wide_to_long(df, ['A', 'B'], i='id', j='year')
+ tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
@@ -415,19 +415,19 @@ def test_character_overlap(self):
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
- exp_frame = pd.DataFrame({
+ expected = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
- 'year': ['11', '11', '11', '12', '12', '12']})
- exp_frame = exp_frame.set_index(['id', 'year'])[
+ 'year': [11, 11, 11, 12, 12, 12]})
+ expected = expected.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
- long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
- tm.assert_frame_equal(long_frame.sort_index(axis=1),
- exp_frame.sort_index(axis=1))
+ result = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
+ tm.assert_frame_equal(result.sort_index(axis=1),
+ expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
@@ -445,13 +445,13 @@ def test_invalid_separator(self):
'year': [],
'A': [],
'B': []}
- exp_frame = pd.DataFrame(exp_data)
- exp_frame = exp_frame.set_index(['id', 'year'])[[
+ expected = pd.DataFrame(exp_data).astype({'year': 'int'})
+ expected = expected.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
- exp_frame.index.set_levels([[0, 1], []], inplace=True)
- long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
- tm.assert_frame_equal(long_frame.sort_index(axis=1),
- exp_frame.sort_index(axis=1))
+ expected.index.set_levels([0, 1], level=0, inplace=True)
+ result = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
+ tm.assert_frame_equal(result.sort_index(axis=1),
+ expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
@@ -467,19 +467,19 @@ def test_num_string_disambiguation(self):
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
- exp_frame = pd.DataFrame({
+ expected = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
- 'year': ['11', '11', '11', '12', '12', '12']})
- exp_frame = exp_frame.set_index(['id', 'year'])[
+ 'year': [11, 11, 11, 12, 12, 12]})
+ expected = expected.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
- long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
- tm.assert_frame_equal(long_frame.sort_index(axis=1),
- exp_frame.sort_index(axis=1))
+ result = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
+ tm.assert_frame_equal(result.sort_index(axis=1),
+ expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
@@ -497,13 +497,13 @@ def test_invalid_suffixtype(self):
'year': [],
'A': [],
'B': []}
- exp_frame = pd.DataFrame(exp_data)
- exp_frame = exp_frame.set_index(['id', 'year'])[[
- 'X', 'Aone', 'Atwo', 'Bone', 'A', 'B']]
- exp_frame.index.set_levels([[0, 1], []], inplace=True)
- long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
- tm.assert_frame_equal(long_frame.sort_index(axis=1),
- exp_frame.sort_index(axis=1))
+ expected = pd.DataFrame(exp_data).astype({'year': 'int'})
+
+ expected = expected.set_index(['id', 'year'])
+ expected.index.set_levels([0, 1], level=0, inplace=True)
+ result = wide_to_long(df, ['A', 'B'], i='id', j='year')
+ tm.assert_frame_equal(result.sort_index(axis=1),
+ expected.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
@@ -513,17 +513,17 @@ def test_multiple_id_columns(self):
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
- exp_frame = pd.DataFrame({
+ expected = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
- 'age': ['1', '2', '1', '2', '1', '2', '1', '2', '1',
- '2', '1', '2', '1', '2', '1', '2', '1', '2']
+ 'age': [1, 2, 1, 2, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 1, 2]
})
- exp_frame = exp_frame.set_index(['famid', 'birth', 'age'])[['ht']]
- long_frame = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
- tm.assert_frame_equal(long_frame, exp_frame)
+ expected = expected.set_index(['famid', 'birth', 'age'])[['ht']]
+ result = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
+ tm.assert_frame_equal(result, expected)
def test_non_unique_idvars(self):
# GH16382
@@ -535,3 +535,87 @@ def test_non_unique_idvars(self):
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
+
+ def test_cast_j_int(self):
+ df = pd.DataFrame({
+ 'actor_1': ['CCH Pounder', 'Johnny Depp', 'Christoph Waltz'],
+ 'actor_2': ['Joel David Moore', 'Orlando Bloom', 'Rory Kinnear'],
+ 'actor_fb_likes_1': [1000.0, 40000.0, 11000.0],
+ 'actor_fb_likes_2': [936.0, 5000.0, 393.0],
+ 'title': ['Avatar', "Pirates of the Caribbean", 'Spectre']})
+
+ expected = pd.DataFrame({
+ 'actor': ['CCH Pounder',
+ 'Johnny Depp',
+ 'Christoph Waltz',
+ 'Joel David Moore',
+ 'Orlando Bloom',
+ 'Rory Kinnear'],
+ 'actor_fb_likes': [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
+ 'num': [1, 1, 1, 2, 2, 2],
+ 'title': ['Avatar',
+ 'Pirates of the Caribbean',
+ 'Spectre',
+ 'Avatar',
+ 'Pirates of the Caribbean',
+ 'Spectre']}).set_index(['title', 'num'])
+ result = wide_to_long(df, ['actor', 'actor_fb_likes'],
+ i='title', j='num', sep='_')
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_identical_stubnames(self):
+ df = pd.DataFrame({'A2010': [1.0, 2.0],
+ 'A2011': [3.0, 4.0],
+ 'B2010': [5.0, 6.0],
+ 'A': ['X1', 'X2']})
+ with pytest.raises(ValueError):
+ wide_to_long(df, ['A', 'B'], i='A', j='colname')
+
+ def test_nonnumeric_suffix(self):
+ df = pd.DataFrame({'treatment_placebo': [1.0, 2.0],
+ 'treatment_test': [3.0, 4.0],
+ 'result_placebo': [5.0, 6.0],
+ 'A': ['X1', 'X2']})
+ expected = pd.DataFrame({
+ 'A': ['X1', 'X1', 'X2', 'X2'],
+ 'colname': ['placebo', 'test', 'placebo', 'test'],
+ 'result': [5.0, np.nan, 6.0, np.nan],
+ 'treatment': [1.0, 3.0, 2.0, 4.0]})
+ expected = expected.set_index(['A', 'colname'])
+ result = wide_to_long(df, ['result', 'treatment'],
+ i='A', j='colname', suffix='[a-z]+', sep='_')
+ tm.assert_frame_equal(result, expected)
+
+ def test_mixed_type_suffix(self):
+ df = pd.DataFrame({
+ 'treatment_1': [1.0, 2.0],
+ 'treatment_foo': [3.0, 4.0],
+ 'result_foo': [5.0, 6.0],
+ 'result_1': [0, 9],
+ 'A': ['X1', 'X2']})
+ expected = pd.DataFrame({
+ 'A': ['X1', 'X2', 'X1', 'X2'],
+ 'colname': ['1', '1', 'foo', 'foo'],
+ 'result': [0.0, 9.0, 5.0, 6.0],
+ 'treatment': [1.0, 2.0, 3.0, 4.0]}).set_index(['A', 'colname'])
+ result = wide_to_long(df, ['result', 'treatment'],
+ i='A', j='colname', suffix='.+', sep='_')
+ tm.assert_frame_equal(result, expected)
+
+ def test_float_suffix(self):
+ df = pd.DataFrame({
+ 'treatment_1.1': [1.0, 2.0],
+ 'treatment_2.1': [3.0, 4.0],
+ 'result_1.2': [5.0, 6.0],
+ 'result_1': [0, 9],
+ 'A': ['X1', 'X2']})
+ expected = pd.DataFrame({
+ 'A': ['X1', 'X1', 'X1', 'X1', 'X2', 'X2', 'X2', 'X2'],
+ 'colname': [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1],
+ 'result': [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan],
+ 'treatment': [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0]})
+ expected = expected.set_index(['A', 'colname'])
+ result = wide_to_long(df, ['result', 'treatment'],
+ i='A', j='colname', suffix='[0-9.]+', sep='_')
+ tm.assert_frame_equal(result, expected)
| - [x] closes #17627
- [x] I had to change nearly all the tests which had the suffixes as strings to integers. I also added a few other tests including one for string suffixes
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
I also cleaned up the finding of the var_names and substituted in some list comprehensions. | https://api.github.com/repos/pandas-dev/pandas/pulls/17628 | 2017-09-22T13:54:31Z | 2017-12-10T21:36:16Z | 2017-12-10T21:36:16Z | 2017-12-10T21:51:24Z |
DOC: whatsnew fixes | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 885babfdd1d19..a80fa744780a2 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -135,7 +135,7 @@ We have updated our minimum supported versions of dependencies (:issue:`15206`,
+--------------+-----------------+----------+
| Package | Minimum Version | Required |
- +======================+=========+==========+
+ +==============+=================+==========+
| Numpy | 1.9.0 | X |
+--------------+-----------------+----------+
| Matplotlib | 1.4.3 | |
@@ -241,54 +241,53 @@ New Behaviour:
Dtype Conversions
^^^^^^^^^^^^^^^^^
-- Previously assignments, ``.where()`` and ``.fillna()`` with a ``bool`` assignment, would coerce to
- same the type (e.g. int / float), or raise for datetimelikes. These will now preseve the bools with ``object`` dtypes. (:issue:`16821`).
+Previously assignments, ``.where()`` and ``.fillna()`` with a ``bool`` assignment, would coerce to same the type (e.g. int / float), or raise for datetimelikes. These will now preseve the bools with ``object`` dtypes. (:issue:`16821`).
- .. ipython:: python
+.. ipython:: python
- s = Series([1, 2, 3])
+ s = Series([1, 2, 3])
- .. code-block:: python
+.. code-block:: python
- In [5]: s[1] = True
+ In [5]: s[1] = True
- In [6]: s
- Out[6]:
- 0 1
- 1 1
- 2 3
- dtype: int64
+ In [6]: s
+ Out[6]:
+ 0 1
+ 1 1
+ 2 3
+ dtype: int64
- New Behavior
+New Behavior
- .. ipython:: python
+.. ipython:: python
- s[1] = True
- s
+ s[1] = True
+ s
-- Previously, as assignment to a datetimelike with a non-datetimelike would coerce the
- non-datetime-like item being assigned (:issue:`14145`).
+Previously, as assignment to a datetimelike with a non-datetimelike would coerce the
+non-datetime-like item being assigned (:issue:`14145`).
- .. ipython:: python
+.. ipython:: python
- s = pd.Series([pd.Timestamp('2011-01-01'), pd.Timestamp('2012-01-01')])
+ s = pd.Series([pd.Timestamp('2011-01-01'), pd.Timestamp('2012-01-01')])
- .. code-block:: python
+.. code-block:: python
- In [1]: s[1] = 1
+ In [1]: s[1] = 1
- In [2]: s
- Out[2]:
- 0 2011-01-01 00:00:00.000000000
- 1 1970-01-01 00:00:00.000000001
- dtype: datetime64[ns]
+ In [2]: s
+ Out[2]:
+ 0 2011-01-01 00:00:00.000000000
+ 1 1970-01-01 00:00:00.000000001
+ dtype: datetime64[ns]
- These now coerce to ``object`` dtype.
+These now coerce to ``object`` dtype.
- .. ipython:: python
+.. ipython:: python
- s[1] = 1
- s
+ s[1] = 1
+ s
- Inconsistent behavior in ``.where()`` with datetimelikes which would raise rather than coerce to ``object`` (:issue:`16402`)
- Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`)
@@ -338,26 +337,26 @@ UTC Localization with Series
Previously, :func:`to_datetime` did not localize datetime ``Series`` data when ``utc=True`` was passed. Now, :func:`to_datetime` will correctly localize ``Series`` with a ``datetime64[ns, UTC]`` dtype to be consistent with how list-like and ``Index`` data are handled. (:issue:`6415`).
- Previous Behavior
+Previous Behavior
- .. ipython:: python
+.. ipython:: python
- s = Series(['20130101 00:00:00'] * 3)
+ s = Series(['20130101 00:00:00'] * 3)
- .. code-block:: ipython
+.. code-block:: ipython
- In [12]: pd.to_datetime(s, utc=True)
- Out[12]:
- 0 2013-01-01
- 1 2013-01-01
- 2 2013-01-01
- dtype: datetime64[ns]
+ In [12]: pd.to_datetime(s, utc=True)
+ Out[12]:
+ 0 2013-01-01
+ 1 2013-01-01
+ 2 2013-01-01
+ dtype: datetime64[ns]
- New Behavior
+New Behavior
- .. ipython:: python
+.. ipython:: python
- pd.to_datetime(s, utc=True)
+ pd.to_datetime(s, utc=True)
Additionally, DataFrames with datetime columns that were parsed by :func:`read_sql_table` and :func:`read_sql_query` will also be localized to UTC only if the original SQL columns were timezone aware datetime columns.
@@ -410,9 +409,9 @@ Previous Behavior:
New Behavior:
- .. ipython:: python
+.. ipython:: python
- pd.interval_range(start=0, end=4)
+ pd.interval_range(start=0, end=4)
.. _whatsnew_0210.api:
@@ -476,6 +475,14 @@ Performance Improvements
- Improved performance of the :class:`CategoricalIndex` for data that is already categorical dtype (:issue:`17513`)
- Improved performance of :meth:`RangeIndex.min` and :meth:`RangeIndex.max` by using ``RangeIndex`` properties to perform the computations (:issue:`17607`)
+.. _whatsnew_0210.docs:
+
+Documentation Changes
+~~~~~~~~~~~~~~~~~~~~~
+
+- Several ``NaT`` method docstrings (e.g. :func:`NaT.ctime`) were incorrect (:issue:`17327`)
+- The documentation has had references to versions < v0.17 removed and cleaned up (:issue:`17442`, :issue:`17442`, :issue:`17404` & :issue:`17504`)
+
.. _whatsnew_0210.bug_fixes:
Bug Fixes
@@ -530,7 +537,7 @@ Plotting
^^^^^^^^
- Bug in plotting methods using ``secondary_y`` and ``fontsize`` not setting secondary axis font size (:issue:`12565`)
- Bug when plotting ``timedelta`` and ``datetime`` dtypes on y-axis (:issue:`16953`)
-- Line plots no longer assume monotonic x data when calculating xlims, they show the entire lines now even for unsorted x data. (:issue:`11310`)(:issue:`11471`)
+- Line plots no longer assume monotonic x data when calculating xlims, they show the entire lines now even for unsorted x data. (:issue:`11310`, :issue:`11471`)
- With matplotlib 2.0.0 and above, calculation of x limits for line plots is left to matplotlib, so that its new default settings are applied. (:issue:`15495`)
- Bug in ``Series.plot.bar`` or ``DataFramee.plot.bar`` with ``y`` not respecting user-passed ``color`` (:issue:`16822`)
@@ -575,10 +582,8 @@ Numeric
Categorical
^^^^^^^^^^^
- Bug in :func:`Series.isin` when called with a categorical (:issue`16639`)
-- Bug in the categorical constructor with empty values and categories causing
- the ``.categories`` to be an empty ``Float64Index`` rather than an empty
- ``Index`` with object dtype (:issue:`17248`)
-- Bug in categorical operations with :ref:`Series.cat <categorical.cat>' not preserving the original Series' name (:issue:`17509`)
+- Bug in the categorical constructor with empty values and categories causing the ``.categories`` to be an empty ``Float64Index`` rather than an empty ``Index`` with object dtype (:issue:`17248`)
+- Bug in categorical operations with :ref:`Series.cat <categorical.cat>' not preserving the original Series' name (:issue:`17509`)
PyPy
^^^^
@@ -593,5 +598,3 @@ PyPy
Other
^^^^^
- Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`)
-- Several ``NaT`` method docstrings (e.g. :func:`NaT.ctime`) were incorrect (:issue:`17327`)
-- The documentation has had references to versions < v0.17 removed and cleaned up (:issue:`17442`, :issue:`17442`, :issue:`17404` & :issue:`17504`)
| closes #17601
| https://api.github.com/repos/pandas-dev/pandas/pulls/17626 | 2017-09-22T13:39:34Z | 2017-09-22T13:39:41Z | 2017-09-22T13:39:41Z | 2017-09-22T13:39:41Z |
Revert "BLD: pin numpy to particular variant that is built for all our deps | diff --git a/ci/requirements-3.6.build b/ci/requirements-3.6.build
index 31ffd5acc7fcc..1c4b46aea3865 100644
--- a/ci/requirements-3.6.build
+++ b/ci/requirements-3.6.build
@@ -2,7 +2,5 @@ python=3.6*
python-dateutil
pytz
nomkl
+numpy
cython
-
-# pin numpy that is built for all our deps
-numpy=1.13.1=py36_blas_openblas_201
| xref #17619
This reverts commit 6930f27e78b2b61a4df31b667a816fa53e49ffed.
closes #17620
| https://api.github.com/repos/pandas-dev/pandas/pulls/17625 | 2017-09-22T12:53:23Z | 2017-09-22T13:41:32Z | 2017-09-22T13:41:32Z | 2017-09-23T10:17:23Z |
BUG: Fix groupby nunique with NaT | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 1cd65bb530f73..e4a3bd796e3ba 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -543,6 +543,7 @@ Groupby/Resample/Rolling
- Bug in ``Series.resample(...).apply()`` where an empty ``Series`` modified the source index and did not return the name of a ``Series`` (:issue:`14313`)
- Bug in ``.rolling(...).apply(...)`` with a ``DataFrame`` with a ``DatetimeIndex``, a ``window`` of a timedelta-convertible and ``min_periods >= 1` (:issue:`15305`)
- Bug in ``DataFrame.groupby`` where index and column keys were not recognized correctly when the number of keys equaled the number of elements on the groupby axis (:issue:`16859`)
+- Bug in ``groupby.nunique()`` with ``TimeGrouper`` which cannot handle ``NaT`` correctly (:issue:`17575`)
Sparse
^^^^^^
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index f14ed08a27fae..a62ae40a85941 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3177,7 +3177,13 @@ def nunique(self, dropna=True):
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
- res = out if ids[0] != -1 else out[1:]
+ # NaN/NaT group exists if the head of ids is -1,
+ # so remove it from res and exclude its index from idx
+ if ids[0] == -1:
+ res = out[1:]
+ idx = idx[np.flatnonzero(idx)]
+ else:
+ res = out
else:
res = out[1:]
ri = self.grouper.result_index
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index df0a93d783375..f83a3fcd0668d 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -608,3 +608,16 @@ def test_first_last_max_min_on_time_data(self):
assert_frame_equal(grouped_ref.min(), grouped_test.min())
assert_frame_equal(grouped_ref.first(), grouped_test.first())
assert_frame_equal(grouped_ref.last(), grouped_test.last())
+
+ def test_nunique_with_timegrouper_and_nat(self):
+ # GH 17575
+ test = pd.DataFrame({
+ 'time': [Timestamp('2016-06-28 09:35:35'),
+ pd.NaT,
+ Timestamp('2016-06-28 16:46:28')],
+ 'data': ['1', '2', '3']})
+
+ grouper = pd.TimeGrouper(key='time', freq='h')
+ result = test.groupby(grouper)['data'].nunique()
+ expected = test[test.time.notnull()].groupby(grouper)['data'].nunique()
+ tm.assert_series_equal(result, expected)
| - [x] closes #17575
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17624 | 2017-09-22T12:24:38Z | 2017-09-22T21:39:13Z | 2017-09-22T21:39:13Z | 2017-09-22T21:39:23Z |
TST: Use fixtures in indexes common tests | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 90618cd6e235f..970dd7b63225a 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -30,9 +30,9 @@ def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
- def verify_pickle(self, index):
- unpickled = tm.round_trip_pickle(index)
- assert index.equals(unpickled)
+ def verify_pickle(self, indices):
+ unpickled = tm.round_trip_pickle(indices)
+ assert indices.equals(unpickled)
def test_pickle_compat_construction(self):
# this is testing for pickle compat
@@ -97,7 +97,7 @@ def test_numeric_compat(self):
lambda: 1 * idx)
div_err = "cannot perform __truediv__" if PY3 \
- else "cannot perform __div__"
+ else "cannot perform __div__"
tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1)
tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx)
tm.assert_raises_regex(TypeError, "cannot perform __floordiv__",
@@ -178,11 +178,10 @@ def test_str(self):
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
- def test_dtype_str(self):
- for idx in self.indices.values():
- dtype = idx.dtype_str
- assert isinstance(dtype, compat.string_types)
- assert dtype == str(idx.dtype)
+ def test_dtype_str(self, indices):
+ dtype = indices.dtype_str
+ assert isinstance(dtype, compat.string_types)
+ assert dtype == str(indices.dtype)
def test_repr_max_seq_item_setting(self):
# GH10182
@@ -192,48 +191,43 @@ def test_repr_max_seq_item_setting(self):
repr(idx)
assert '...' not in str(idx)
- def test_wrong_number_names(self):
+ def test_wrong_number_names(self, indices):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
+ tm.assert_raises_regex(ValueError, "^Length", testit, indices)
- for ind in self.indices.values():
- tm.assert_raises_regex(ValueError, "^Length", testit, ind)
-
- def test_set_name_methods(self):
+ def test_set_name_methods(self, indices):
new_name = "This is the new name for this index"
- for ind in self.indices.values():
-
- # don't tests a MultiIndex here (as its tested separated)
- if isinstance(ind, MultiIndex):
- continue
- original_name = ind.name
- new_ind = ind.set_names([new_name])
- assert new_ind.name == new_name
- assert ind.name == original_name
- res = ind.rename(new_name, inplace=True)
-
- # should return None
- assert res is None
- assert ind.name == new_name
- assert ind.names == [new_name]
- # with tm.assert_raises_regex(TypeError, "list-like"):
- # # should still fail even if it would be the right length
- # ind.set_names("a")
- with tm.assert_raises_regex(ValueError, "Level must be None"):
- ind.set_names("a", level=0)
-
- # rename in place just leaves tuples and other containers alone
- name = ('A', 'B')
- ind.rename(name, inplace=True)
- assert ind.name == name
- assert ind.names == [name]
-
- def test_hash_error(self):
- for ind in self.indices.values():
- with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
- type(ind).__name__):
- hash(ind)
+ # don't tests a MultiIndex here (as its tested separated)
+ if isinstance(indices, MultiIndex):
+ return
+ original_name = indices.name
+ new_ind = indices.set_names([new_name])
+ assert new_ind.name == new_name
+ assert indices.name == original_name
+ res = indices.rename(new_name, inplace=True)
+
+ # should return None
+ assert res is None
+ assert indices.name == new_name
+ assert indices.names == [new_name]
+ # with tm.assert_raises_regex(TypeError, "list-like"):
+ # # should still fail even if it would be the right length
+ # ind.set_names("a")
+ with tm.assert_raises_regex(ValueError, "Level must be None"):
+ indices.set_names("a", level=0)
+
+ # rename in place just leaves tuples and other containers alone
+ name = ('A', 'B')
+ indices.rename(name, inplace=True)
+ assert indices.name == name
+ assert indices.names == [name]
+
+ def test_hash_error(self, indices):
+ index = indices
+ tm.assert_raises_regex(TypeError, "unhashable type: %r" %
+ type(index).__name__, hash, indices)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
@@ -298,106 +292,87 @@ def test_ensure_copied_data(self):
tm.assert_numpy_array_equal(index._values, result._values,
check_same='same')
- def test_copy_and_deepcopy(self):
+ def test_copy_and_deepcopy(self, indices):
from copy import copy, deepcopy
- for ind in self.indices.values():
+ if isinstance(indices, MultiIndex):
+ return
+ for func in (copy, deepcopy):
+ idx_copy = func(indices)
+ assert idx_copy is not indices
+ assert idx_copy.equals(indices)
- # don't tests a MultiIndex here (as its tested separated)
- if isinstance(ind, MultiIndex):
- continue
+ new_copy = indices.copy(deep=True, name="banana")
+ assert new_copy.name == "banana"
- for func in (copy, deepcopy):
- idx_copy = func(ind)
- assert idx_copy is not ind
- assert idx_copy.equals(ind)
+ def test_duplicates(self, indices):
+ if type(indices) is not self._holder:
+ return
+ if not len(indices) or isinstance(indices, MultiIndex):
+ return
+ idx = self._holder([indices[0]] * 5)
+ assert not idx.is_unique
+ assert idx.has_duplicates
- new_copy = ind.copy(deep=True, name="banana")
- assert new_copy.name == "banana"
+ def test_get_unique_index(self, indices):
+ # MultiIndex tested separately
+ if not len(indices) or isinstance(indices, MultiIndex):
+ return
- def test_duplicates(self):
- for ind in self.indices.values():
+ idx = indices[[0] * 5]
+ idx_unique = indices[[0]]
- if not len(ind):
- continue
- if isinstance(ind, MultiIndex):
- continue
- idx = self._holder([ind[0]] * 5)
- assert not idx.is_unique
- assert idx.has_duplicates
-
- # GH 10115
- # preserve names
- idx.name = 'foo'
- result = idx.drop_duplicates()
- assert result.name == 'foo'
- tm.assert_index_equal(result, Index([ind[0]], name='foo'))
-
- def test_get_unique_index(self):
- for ind in self.indices.values():
-
- # MultiIndex tested separately
- if not len(ind) or isinstance(ind, MultiIndex):
- continue
+ # We test against `idx_unique`, so first we make sure it's unique
+ # and doesn't contain nans.
+ assert idx_unique.is_unique
+ try:
+ assert not idx_unique.hasnans
+ except NotImplementedError:
+ pass
- idx = ind[[0] * 5]
- idx_unique = ind[[0]]
+ for dropna in [False, True]:
+ result = idx._get_unique_index(dropna=dropna)
+ tm.assert_index_equal(result, idx_unique)
- # We test against `idx_unique`, so first we make sure it's unique
- # and doesn't contain nans.
- assert idx_unique.is_unique
- try:
- assert not idx_unique.hasnans
- except NotImplementedError:
- pass
+ # nans:
+ if not indices._can_hold_na:
+ return
- for dropna in [False, True]:
- result = idx._get_unique_index(dropna=dropna)
- tm.assert_index_equal(result, idx_unique)
+ if needs_i8_conversion(indices):
+ vals = indices.asi8[[0] * 5]
+ vals[0] = iNaT
+ else:
+ vals = indices.values[[0] * 5]
+ vals[0] = np.nan
- # nans:
- if not ind._can_hold_na:
- continue
+ vals_unique = vals[:2]
+ idx_nan = indices._shallow_copy(vals)
+ idx_unique_nan = indices._shallow_copy(vals_unique)
+ assert idx_unique_nan.is_unique
- if needs_i8_conversion(ind):
- vals = ind.asi8[[0] * 5]
- vals[0] = iNaT
- else:
- vals = ind.values[[0] * 5]
- vals[0] = np.nan
-
- vals_unique = vals[:2]
- idx_nan = ind._shallow_copy(vals)
- idx_unique_nan = ind._shallow_copy(vals_unique)
- assert idx_unique_nan.is_unique
-
- assert idx_nan.dtype == ind.dtype
- assert idx_unique_nan.dtype == ind.dtype
-
- for dropna, expected in zip([False, True],
- [idx_unique_nan, idx_unique]):
- for i in [idx_nan, idx_unique_nan]:
- result = i._get_unique_index(dropna=dropna)
- tm.assert_index_equal(result, expected)
-
- def test_sort(self):
- for ind in self.indices.values():
- pytest.raises(TypeError, ind.sort)
-
- def test_mutability(self):
- for ind in self.indices.values():
- if not len(ind):
- continue
- pytest.raises(TypeError, ind.__setitem__, 0, ind[0])
+ assert idx_nan.dtype == indices.dtype
+ assert idx_unique_nan.dtype == indices.dtype
- def test_view(self):
- for ind in self.indices.values():
- i_view = ind.view()
- assert i_view.name == ind.name
+ for dropna, expected in zip([False, True],
+ [idx_unique_nan,
+ idx_unique]):
+ for i in [idx_nan, idx_unique_nan]:
+ result = i._get_unique_index(dropna=dropna)
+ tm.assert_index_equal(result, expected)
- def test_compat(self):
- for ind in self.indices.values():
- assert ind.tolist() == list(ind)
+ def test_sort(self, indices):
+ pytest.raises(TypeError, indices.sort)
+
+ def test_mutability(self, indices):
+ if not len(indices):
+ return
+ pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
+
+ def test_view(self, indices):
+ assert indices.view().name == indices.name
+
+ def test_compat(self, indices):
+ assert indices.tolist() == list(indices)
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
@@ -457,11 +432,11 @@ def test_numpy_argsort(self):
tm.assert_raises_regex(ValueError, msg, np.argsort,
ind, order=('a', 'b'))
- def test_pickle(self):
- for ind in self.indices.values():
- self.verify_pickle(ind)
- ind.name = 'foo'
- self.verify_pickle(ind)
+ def test_pickle(self, indices):
+ self.verify_pickle(indices)
+ original_name, indices.name = indices.name, 'foo'
+ self.verify_pickle(indices)
+ indices.name = original_name
def test_take(self):
indexer = [4, 3, 0, 2]
@@ -962,46 +937,47 @@ def test_join_self_unique(self, how):
joined = index.join(index, how=how)
assert (index == joined).all()
- def test_searchsorted_monotonic(self):
+ def test_searchsorted_monotonic(self, indices):
# GH17271
- for index in self.indices.values():
- # not implemented for tuple searches in MultiIndex
- # or Intervals searches in IntervalIndex
- if isinstance(index, (MultiIndex, IntervalIndex)):
- continue
+ # not implemented for tuple searches in MultiIndex
+ # or Intervals searches in IntervalIndex
+ if isinstance(indices, (MultiIndex, IntervalIndex)):
+ return
- # nothing to test if the index is empty
- if index.empty:
- continue
- value = index[0]
-
- # determine the expected results (handle dupes for 'right')
- expected_left, expected_right = 0, (index == value).argmin()
- if expected_right == 0:
- # all values are the same, expected_right should be length
- expected_right = len(index)
-
- # test _searchsorted_monotonic in all cases
- # test searchsorted only for increasing
- if index.is_monotonic_increasing:
- ssm_left = index._searchsorted_monotonic(value, side='left')
- assert expected_left == ssm_left
-
- ssm_right = index._searchsorted_monotonic(value, side='right')
- assert expected_right == ssm_right
-
- ss_left = index.searchsorted(value, side='left')
- assert expected_left == ss_left
-
- ss_right = index.searchsorted(value, side='right')
- assert expected_right == ss_right
- elif index.is_monotonic_decreasing:
- ssm_left = index._searchsorted_monotonic(value, side='left')
- assert expected_left == ssm_left
-
- ssm_right = index._searchsorted_monotonic(value, side='right')
- assert expected_right == ssm_right
- else:
- # non-monotonic should raise.
- with pytest.raises(ValueError):
- index._searchsorted_monotonic(value, side='left')
+ # nothing to test if the index is empty
+ if indices.empty:
+ return
+ value = indices[0]
+
+ # determine the expected results (handle dupes for 'right')
+ expected_left, expected_right = 0, (indices == value).argmin()
+ if expected_right == 0:
+ # all values are the same, expected_right should be length
+ expected_right = len(indices)
+
+ # test _searchsorted_monotonic in all cases
+ # test searchsorted only for increasing
+ if indices.is_monotonic_increasing:
+ ssm_left = indices._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = indices._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+
+ ss_left = indices.searchsorted(value, side='left')
+ assert expected_left == ss_left
+
+ ss_right = indices.searchsorted(value, side='right')
+ assert expected_right == ss_right
+
+ elif indices.is_monotonic_decreasing:
+ ssm_left = indices._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = indices._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+
+ else:
+ # non-monotonic should raise.
+ with pytest.raises(ValueError):
+ indices._searchsorted_monotonic(value, side='left')
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
new file mode 100644
index 0000000000000..a0ee3e511ef37
--- /dev/null
+++ b/pandas/tests/indexes/conftest.py
@@ -0,0 +1,24 @@
+import pytest
+
+import pandas.util.testing as tm
+from pandas.core.indexes.api import Index, MultiIndex
+from pandas.compat import lzip
+
+
+@pytest.fixture(params=[tm.makeUnicodeIndex(100),
+ tm.makeStringIndex(100),
+ tm.makeDateIndex(100),
+ tm.makePeriodIndex(100),
+ tm.makeTimedeltaIndex(100),
+ tm.makeIntIndex(100),
+ tm.makeUIntIndex(100),
+ tm.makeFloatIndex(100),
+ Index([True, False]),
+ tm.makeCategoricalIndex(100),
+ Index([]),
+ MultiIndex.from_tuples(lzip(
+ ['foo', 'bar', 'baz'], [1, 2, 3])),
+ Index([0, 0, 1, 1, 2, 2])],
+ ids=lambda x: type(x).__name__)
+def indices(request):
+ return request.param
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 114940009377c..12b509d4aef3f 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -26,8 +26,8 @@ def test_str(self):
if hasattr(idx, 'freq'):
assert "freq='%s'" % idx.freqstr in str(idx)
- def test_view(self):
- super(DatetimeLike, self).test_view()
+ def test_view(self, indices):
+ super(DatetimeLike, self).test_view(indices)
i = self.create_index()
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index fa73c9fc7b722..0bd2861e060ed 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -58,8 +58,8 @@ def test_new_axis(self):
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
- def test_copy_and_deepcopy(self):
- super(TestIndex, self).test_copy_and_deepcopy()
+ def test_copy_and_deepcopy(self, indices):
+ super(TestIndex, self).test_copy_and_deepcopy(indices)
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 7e7e10e4aeabe..dc38b0a2b1fb7 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -459,8 +459,8 @@ def test_take_fill_value(self):
class NumericInt(Numeric):
- def test_view(self):
- super(NumericInt, self).test_view()
+ def test_view(self, indices):
+ super(NumericInt, self).test_view(indices)
i = self._holder([], name='Foo')
i_view = i.view()
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index d206c36ee51c9..e87208af965a1 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -312,8 +312,8 @@ def test_delete(self):
# either depending on numpy version
result = idx.delete(len(idx))
- def test_view(self):
- super(TestRangeIndex, self).test_view()
+ def test_view(self, indices):
+ super(TestRangeIndex, self).test_view(indices)
i = RangeIndex(0, name='Foo')
i_view = i.view()
| - [x] closes #16835
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Not sure if there is a better way to define fixture params since they're essentially the indexes defined in `setup_method`. Also thinking about adding an id to each param so the output doesn't look like:
```
test_base.py::TestMixedIntIndex::test_sort[index0] <- pandas/tests/indexes/common.py PASSED
test_base.py::TestMixedIntIndex::test_sort[index1] <- pandas/tests/indexes/common.py PASSED
test_base.py::TestMixedIntIndex::test_sort[index2] <- pandas/tests/indexes/common.py PASSED
test_base.py::TestMixedIntIndex::test_sort[index3] <- pandas/tests/indexes/common.py PASSED
...
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/17622 | 2017-09-22T10:51:14Z | 2017-09-25T10:20:51Z | 2017-09-25T10:20:51Z | 2017-09-28T03:23:05Z |
BUG: Fix unexpected sort in groupby | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 50f11c38bae23..5eb2c94ecf66e 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -625,6 +625,7 @@ Groupby/Resample/Rolling
- Bug in ``.rolling(...).apply(...)`` with a ``DataFrame`` with a ``DatetimeIndex``, a ``window`` of a timedelta-convertible and ``min_periods >= 1` (:issue:`15305`)
- Bug in ``DataFrame.groupby`` where index and column keys were not recognized correctly when the number of keys equaled the number of elements on the groupby axis (:issue:`16859`)
- Bug in ``groupby.nunique()`` with ``TimeGrouper`` which cannot handle ``NaT`` correctly (:issue:`17575`)
+- Bug in ``DataFrame.groupby`` where a single level selection from a ``MultiIndex`` unexpectedly sorts (:issue:`17537`)
Sparse
^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2fb0e348c01c0..6302685669e9b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6631,7 +6631,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
- grouped = self.groupby(level=level, axis=axis)
+ grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index a62ae40a85941..2f2056279558d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2586,10 +2586,27 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
"""
group_axis = obj._get_axis(axis)
- # validate that the passed level is compatible with the passed
+ # validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
- if not isinstance(group_axis, MultiIndex):
+ # TODO: These if-block and else-block are almost same.
+ # MultiIndex instance check is removable, but it seems that there are
+ # some processes only for non-MultiIndex in else-block,
+ # eg. `obj.index.name != level`. We have to consider carefully whether
+ # these are applicable for MultiIndex. Even if these are applicable,
+ # we need to check if it makes no side effect to subsequent processes
+ # on the outside of this condition.
+ # (GH 17621)
+ if isinstance(group_axis, MultiIndex):
+ if is_list_like(level) and len(level) == 1:
+ level = level[0]
+
+ if key is None and is_scalar(level):
+ # Get the level values from group_axis
+ key = group_axis.get_level_values(level)
+ level = None
+
+ else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
@@ -2611,6 +2628,8 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
+ # NOTE: `group_axis` and `group_axis.get_level_values(level)`
+ # are same in this section.
level = None
key = group_axis
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index d91cff436dee2..47bf837fa62d9 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1791,18 +1791,20 @@ def aggfun(ser):
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
- def test_groupby_level(self):
+ @pytest.mark.parametrize('sort', [True, False])
+ def test_groupby_level(self, sort):
+ # GH 17537
frame = self.mframe
deleveled = frame.reset_index()
- result0 = frame.groupby(level=0).sum()
- result1 = frame.groupby(level=1).sum()
+ result0 = frame.groupby(level=0, sort=sort).sum()
+ result1 = frame.groupby(level=1, sort=sort).sum()
- expected0 = frame.groupby(deleveled['first'].values).sum()
- expected1 = frame.groupby(deleveled['second'].values).sum()
+ expected0 = frame.groupby(deleveled['first'].values, sort=sort).sum()
+ expected1 = frame.groupby(deleveled['second'].values, sort=sort).sum()
- expected0 = expected0.reindex(frame.index.levels[0])
- expected1 = expected1.reindex(frame.index.levels[1])
+ expected0.index.name = 'first'
+ expected1.index.name = 'second'
assert result0.index.name == 'first'
assert result1.index.name == 'second'
@@ -1813,15 +1815,15 @@ def test_groupby_level(self):
assert result1.index.name == frame.index.names[1]
# groupby level name
- result0 = frame.groupby(level='first').sum()
- result1 = frame.groupby(level='second').sum()
+ result0 = frame.groupby(level='first', sort=sort).sum()
+ result1 = frame.groupby(level='second', sort=sort).sum()
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
# axis=1
- result0 = frame.T.groupby(level=0, axis=1).sum()
- result1 = frame.T.groupby(level=1, axis=1).sum()
+ result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()
+ result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()
assert_frame_equal(result0, expected0.T)
assert_frame_equal(result1, expected1.T)
@@ -1835,15 +1837,17 @@ def test_groupby_level_index_names(self):
df.groupby(level='exp')
pytest.raises(ValueError, df.groupby, level='foo')
- def test_groupby_level_with_nas(self):
+ @pytest.mark.parametrize('sort', [True, False])
+ def test_groupby_level_with_nas(self, sort):
+ # GH 17537
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1,
2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
- result = s.groupby(level=0).sum()
- expected = Series([22., 6.], index=[1, 0])
+ result = s.groupby(level=0, sort=sort).sum()
+ expected = Series([6., 22.], index=[0, 1])
assert_series_equal(result, expected)
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
@@ -1852,8 +1856,8 @@ def test_groupby_level_with_nas(self):
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
- result = s.groupby(level=0).sum()
- expected = Series([18., 6.], index=[1, 0])
+ result = s.groupby(level=0, sort=sort).sum()
+ expected = Series([6., 18.], index=[0.0, 1.0])
assert_series_equal(result, expected)
def test_groupby_level_apply(self):
@@ -1936,9 +1940,14 @@ def test_groupby_complex(self):
result = a.sum(level=0)
assert_series_equal(result, expected)
- def test_level_preserve_order(self):
- grouped = self.mframe.groupby(level=0)
- exp_labels = np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3], np.intp)
+ @pytest.mark.parametrize('sort,labels', [
+ [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
+ [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]]
+ ])
+ def test_level_preserve_order(self, sort, labels):
+ # GH 17537
+ grouped = self.mframe.groupby(level=0, sort=sort)
+ exp_labels = np.array(labels, np.intp)
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_grouping_labels(self):
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index 1c5161d2ffb43..259f466316c41 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -174,12 +174,16 @@ def raw_frame():
@pytest.mark.parametrize(
- "op, level, axis, skipna",
+ "op, level, axis, skipna, sort",
product(AGG_FUNCTIONS,
lrange(2), lrange(2),
+ [True, False],
[True, False]))
-def test_regression_whitelist_methods(raw_frame, op, level, axis, skipna):
+def test_regression_whitelist_methods(
+ raw_frame, op, level,
+ axis, skipna, sort):
# GH6944
+ # GH 17537
# explicity test the whitelest methods
if axis == 0:
@@ -188,15 +192,19 @@ def test_regression_whitelist_methods(raw_frame, op, level, axis, skipna):
frame = raw_frame.T
if op in AGG_FUNCTIONS_WITH_SKIPNA:
- grouped = frame.groupby(level=level, axis=axis)
+ grouped = frame.groupby(level=level, axis=axis, sort=sort)
result = getattr(grouped, op)(skipna=skipna)
expected = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
+ if sort:
+ expected = expected.sort_index(axis=axis, level=level)
tm.assert_frame_equal(result, expected)
else:
- grouped = frame.groupby(level=level, axis=axis)
+ grouped = frame.groupby(level=level, axis=axis, sort=sort)
result = getattr(grouped, op)()
expected = getattr(frame, op)(level=level, axis=axis)
+ if sort:
+ expected = expected.sort_index(axis=axis, level=level)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 6976fe162c5d5..050335988ca41 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1392,17 +1392,23 @@ def test_count(self):
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
- def test_series_group_min_max(self):
+ @pytest.mark.parametrize('sort', [True, False])
+ def test_series_group_min_max(self, sort):
+ # GH 17537
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
- grouped = self.series.groupby(level=level)
+ grouped = self.series.groupby(level=level, sort=sort)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
+ if sort:
+ rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
- def test_frame_group_ops(self):
+ @pytest.mark.parametrize('sort', [True, False])
+ def test_frame_group_ops(self, sort):
+ # GH 17537
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
@@ -1415,7 +1421,7 @@ def test_frame_group_ops(self):
else:
frame = self.frame.T
- grouped = frame.groupby(level=level, axis=axis)
+ grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
@@ -1426,6 +1432,9 @@ def aggf(x):
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
+ if sort:
+ rightside = rightside.sort_index(level=level, axis=axis)
+ frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
| - [x] closes #17537
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17621 | 2017-09-22T07:41:49Z | 2017-10-01T14:53:46Z | 2017-10-01T14:53:45Z | 2017-10-01T14:53:48Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.