title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
DOC: Fix docstring for read_sql_table | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aaface5415384..02fba52eac7f7 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -182,26 +182,29 @@ def execute(sql, con, cur=None, params=None):
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
- """Read SQL database table into a DataFrame.
+ """
+ Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
- table_name : string
+ table_name : str
Name of SQL table in database.
- con : SQLAlchemy connectable (or database string URI)
+ con : SQLAlchemy connectable or str
+ A database URI could be provided as as str.
SQLite DBAPI connection mode not supported.
- schema : string, default None
+ schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
- index_col : string or list of strings, optional, default: None
+ index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
- coerce_float : boolean, default True
+ coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
- parse_dates : list or dict, default: None
+ parse_dates : list or dict, default None
+ The behavior is as follows:
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -210,8 +213,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
- columns : list, default: None
- List of column names to select from SQL table
+ columns : list, default None
+ List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
@@ -219,15 +222,21 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
Returns
-------
DataFrame
+ A SQL table is returned as two-dimensional data structure with labeled
+ axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
- read_sql
+ read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
+
+ Examples
+ --------
+ >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
```
################################################################################
###################### Docstring (pandas.read_sql_table) ######################
################################################################################
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or database URI str
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
The behavior is as follows:
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.read_sql_table" correct. :)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/25465 | 2019-02-27T20:56:36Z | 2019-02-28T20:00:31Z | 2019-02-28T20:00:31Z | 2019-03-01T10:14:40Z |
Draft implementation | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 0c76ac6cd75ac..aedb1c9d5557c 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -1,5 +1,6 @@
from datetime import datetime, time
from functools import partial
+import numbers
import numpy as np
@@ -398,6 +399,21 @@ def _adjust_to_origin(arg, origin, unit):
return arg
+def _contains_numbers(arg):
+ """Returns True if argument is a number or an iterable containing
+ some numbers.
+ """
+ # deal with case where input is a number or a list of numbers
+ arr = np.asarray(arg)
+ if np.issubdtype(arr.dtype, np.number):
+ return True
+ elif np.isscalar(arr) and np.issubdtype(arr, np.number):
+ return True
+ else:
+ # inefficient
+ return any(isinstance(x, numbers.Number) for x in arg)
+
+
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
@@ -570,6 +586,9 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
+ if _contains_numbers(arg) and unit is None:
+ raise ValueError('When supplying numbers the unit must be specified.')
+
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
| Raises a ValueError when the input to to_datetime contains some numbers
but the unit isn't supplied.
- [X] closes #15836
- [ ] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25463 | 2019-02-27T20:30:38Z | 2019-03-22T02:30:02Z | null | 2019-03-22T02:30:02Z |
Remove return values for asserts | diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index b88ba1cf01426..759dacd7f221e 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -159,7 +159,7 @@ def _check_op(self, s, op_name, other, exc=None):
# integer result type
else:
- rs = pd.Series(s.values._data)
+ rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 19e42b4621b3a..a22210268e5aa 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -566,12 +566,12 @@ def test_get_loc_datetimelike_overlapping(self, arrays):
value = index[0].mid + Timedelta('12 hours')
result = np.sort(index.get_loc(value))
expected = np.array([0, 1], dtype='intp')
- assert tm.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
interval = Interval(index[0].left, index[1].right)
result = np.sort(index.get_loc(interval))
expected = np.array([0, 1, 2], dtype='intp')
- assert tm.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index eb3789bb72910..2177d6bb93108 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -345,5 +345,5 @@ def test_empty_csv_input(self):
def assert_array_dicts_equal(left, right):
for k, v in left.items():
- assert tm.assert_numpy_array_equal(np.asarray(v),
- np.asarray(right[k]))
+ tm.assert_numpy_array_equal(np.asarray(v),
+ np.asarray(right[k]))
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 76ac5353c5916..c91233e9317b7 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -147,7 +147,7 @@ def test_same_tz_min_max_axis_1(self, op, expected_col):
columns=['a'])
df['b'] = df.a.subtract(pd.Timedelta(seconds=3600))
result = getattr(df, op)(axis=1)
- expected = df[expected_col]
+ expected = df[expected_col].rename(None)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 5376cb3696012..fd90a87c553c4 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1058,7 +1058,7 @@ def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
- assert tm.is_sorted(series.index)
+ tm.assert_is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index ee2c2c0c3cdfa..5d3903cb93bd5 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -240,7 +240,7 @@ def test_really_large_scalar(large_val, signed, transform, errors):
else:
expected = float(val) if (errors == "coerce" and
val_is_string) else val
- assert tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
+ tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
def test_really_large_in_arr(large_val, signed, transform,
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 70c06efa30fee..5ef68dcefdf8b 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -281,25 +281,25 @@ def assert_almost_equal(left, right, check_dtype="equiv",
"""
if isinstance(left, pd.Index):
- return assert_index_equal(left, right,
- check_exact=False,
- exact=check_dtype,
- check_less_precise=check_less_precise,
- **kwargs)
+ assert_index_equal(left, right,
+ check_exact=False,
+ exact=check_dtype,
+ check_less_precise=check_less_precise,
+ **kwargs)
elif isinstance(left, pd.Series):
- return assert_series_equal(left, right,
- check_exact=False,
- check_dtype=check_dtype,
- check_less_precise=check_less_precise,
- **kwargs)
+ assert_series_equal(left, right,
+ check_exact=False,
+ check_dtype=check_dtype,
+ check_less_precise=check_less_precise,
+ **kwargs)
elif isinstance(left, pd.DataFrame):
- return assert_frame_equal(left, right,
- check_exact=False,
- check_dtype=check_dtype,
- check_less_precise=check_less_precise,
- **kwargs)
+ assert_frame_equal(left, right,
+ check_exact=False,
+ check_dtype=check_dtype,
+ check_less_precise=check_less_precise,
+ **kwargs)
else:
# Other sequences.
@@ -317,7 +317,7 @@ def assert_almost_equal(left, right, check_dtype="equiv",
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
- return _testing.assert_almost_equal(
+ _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
@@ -355,7 +355,7 @@ def _check_isinstance(left, right, cls):
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
- return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
+ _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
@@ -717,11 +717,12 @@ def isiterable(obj):
return hasattr(obj, '__iter__')
-def is_sorted(seq):
+def assert_is_sorted(seq):
+ """Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
- return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
+ assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
@@ -911,8 +912,6 @@ def _raise(left, right, err_msg):
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
- return True
-
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
@@ -1073,12 +1072,10 @@ def assert_series_equal(left, right, check_dtype=True,
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
- return assert_extension_array_equal(left._values, right._values)
-
+ assert_extension_array_equal(left._values, right._values)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
- return assert_extension_array_equal(left.array, right.array)
-
+ assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
| Unless they are context managers.
- [X] closes #25135
- [X] tests added / passed (not applicable)
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25462 | 2019-02-27T18:54:51Z | 2019-05-07T00:56:44Z | 2019-05-07T00:56:44Z | 2019-05-07T01:00:36Z |
Fix minor typo | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index eb84a9a5810f4..523543ada235c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1815,7 +1815,7 @@ def __hash__(self):
' hashed'.format(self.__class__.__name__))
def __iter__(self):
- """Iterate over infor axis"""
+ """Iterate over info axis"""
return iter(self._info_axis)
# can we get a better explanation of this?
| Signed-off-by: Philippe Ombredanne <pombredanne@nexb.com>
| https://api.github.com/repos/pandas-dev/pandas/pulls/25458 | 2019-02-27T10:06:04Z | 2019-02-28T12:45:16Z | 2019-02-28T12:45:16Z | 2019-02-28T14:13:58Z |
CI: add __init__.py to isort skip list | diff --git a/ci/deps/azure-27-compat.yaml b/ci/deps/azure-27-compat.yaml
index 986855c464852..c68b51fbd6644 100644
--- a/ci/deps/azure-27-compat.yaml
+++ b/ci/deps/azure-27-compat.yaml
@@ -21,6 +21,7 @@ dependencies:
- pytest
- pytest-xdist
- pytest-mock
+ - isort
- pip:
- html5lib==1.0b2
- beautifulsoup4==4.2.1
diff --git a/ci/deps/azure-27-locale.yaml b/ci/deps/azure-27-locale.yaml
index f73079ecbe3d2..5679c503caddc 100644
--- a/ci/deps/azure-27-locale.yaml
+++ b/ci/deps/azure-27-locale.yaml
@@ -24,6 +24,7 @@ dependencies:
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
+ - isort
- pip:
- html5lib==1.0b2
- beautifulsoup4==4.2.1
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 6b8d38fd25082..de1f4ad0e9a76 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -30,5 +30,6 @@ dependencies:
- pytest-xdist
- pytest-mock
- moto
+ - isort
- pip:
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 569b71dae003b..a89e63a2b7d3a 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -28,6 +28,7 @@ dependencies:
- pytest
- pytest-xdist
- pytest-mock
+ - isort
- pip:
- hypothesis>=3.58.0
- moto # latest moto in conda-forge fails with 3.7, move to conda dependencies when this is fixed
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index a37be124cc546..3132de891299c 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -10,6 +10,7 @@ dependencies:
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
+ - isort
- pip:
- "git+git://github.com/dateutil/dateutil.git"
- "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml
index d1fe926744ecd..9710bcb5bf43d 100644
--- a/ci/deps/azure-macos-35.yaml
+++ b/ci/deps/azure-macos-35.yaml
@@ -25,6 +25,7 @@ dependencies:
- pytest
- pytest-xdist
- pytest-mock
+ - isort
- pip:
- python-dateutil==2.5.3
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-windows-27.yaml b/ci/deps/azure-windows-27.yaml
index 74faeed83c387..093c055e69553 100644
--- a/ci/deps/azure-windows-27.yaml
+++ b/ci/deps/azure-windows-27.yaml
@@ -30,3 +30,4 @@ dependencies:
- pytest-mock
- moto
- hypothesis>=3.58.0
+ - isort
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 94d67b3d37788..e9db271a75d9d 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -27,3 +27,4 @@ dependencies:
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
+ - isort
diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml
index 4915c003bce4e..71b224b2c68c2 100644
--- a/ci/deps/travis-27.yaml
+++ b/ci/deps/travis-27.yaml
@@ -44,6 +44,7 @@ dependencies:
- pytest-mock
- moto==1.3.4
- hypothesis>=3.58.0
+ - isort
- pip:
- backports.lzma
- pandas-gbq
diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml
index 26f3a17432ab2..1a65d292ef085 100644
--- a/ci/deps/travis-36-doc.yaml
+++ b/ci/deps/travis-36-doc.yaml
@@ -43,3 +43,4 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - isort
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 2a7692f10752c..36dbb8013104a 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -32,5 +32,6 @@ dependencies:
- pytest-xdist
- pytest-mock
- moto
+ - isort
- pip:
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index 7934d179c8618..f4b9091c4300b 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -30,3 +30,4 @@ dependencies:
- pytest-mock
- moto
- hypothesis>=3.58.0
+ - isort
diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml
index 857c3fadfdaeb..e22529784b5ec 100644
--- a/ci/deps/travis-36.yaml
+++ b/ci/deps/travis-36.yaml
@@ -38,6 +38,7 @@ dependencies:
- pytest-cov
- pytest-mock
- hypothesis>=3.58.0
+ - isort
- pip:
- brotlipy
- coverage
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 125750191de7d..a8a5df5894ba5 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -17,5 +17,6 @@ dependencies:
- pytest-mock
- hypothesis>=3.58.0
- s3fs
+ - isort
- pip:
- moto
diff --git a/setup.cfg b/setup.cfg
index b15c3ce8a110a..956aa23839e73 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -152,3 +152,23 @@ skip=
asv_bench/benchmarks/dtypes.py
asv_bench/benchmarks/strings.py
asv_bench/benchmarks/period.py
+ pandas/__init__.py
+ pandas/plotting/__init__.py
+ pandas/tests/extension/decimal/__init__.py
+ pandas/tests/extension/base/__init__.py
+ pandas/io/msgpack/__init__.py
+ pandas/io/json/__init__.py
+ pandas/io/clipboard/__init__.py
+ pandas/io/excel/__init__.py
+ pandas/compat/__init__.py
+ pandas/compat/numpy/__init__.py
+ pandas/core/arrays/__init__.py
+ pandas/core/groupby/__init__.py
+ pandas/core/internals/__init__.py
+ pandas/api/__init__.py
+ pandas/api/extensions/__init__.py
+ pandas/api/types/__init__.py
+ pandas/_libs/__init__.py
+ pandas/_libs/tslibs/__init__.py
+ pandas/util/__init__.py
+ pandas/arrays/__init__.py
| 4.3.5 - February 24, 2019 - last Python 2.7 Maintenance Release
This is the final Python 2.x release of isort, and includes the following major changes:
Potentially Interface Breaking:
The -r option for removing imports has been renamed -rm to avoid accidental deletions and confusion with the -rc recursive option.
__init__.py has been removed from the default ignore list. The default ignore list is now empty - with all items needing to be explicitly ignored.... | https://api.github.com/repos/pandas-dev/pandas/pulls/25455 | 2019-02-26T19:56:42Z | 2019-02-27T22:26:54Z | 2019-02-27T22:26:54Z | 2019-03-12T09:17:12Z |
STY: use pytest.raises context manager (tests/test_*) | diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index cb7426ce2f7c9..c56bf944699e2 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -11,7 +11,7 @@
from pandas._libs import (
algos as libalgos, groupby as libgroupby, hashtable as ht)
-from pandas.compat import lrange, range
+from pandas.compat import PY2, lrange, range
from pandas.compat.numpy import np_array_datetime64_compat
import pandas.util._test_decorators as td
@@ -224,11 +224,16 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level):
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
- pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
+ msg = ("'<' not supported between instances of 'complex' and"
+ r" 'complex'|"
+ r"unorderable types: complex\(\) > complex\(\)")
+ with pytest.raises(TypeError, match=msg):
+ algos.factorize(x17[::-1], sort=True)
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
@@ -589,9 +594,14 @@ class TestIsin(object):
def test_invalid(self):
- pytest.raises(TypeError, lambda: algos.isin(1, 1))
- pytest.raises(TypeError, lambda: algos.isin(1, [1]))
- pytest.raises(TypeError, lambda: algos.isin([1], 1))
+ msg = (r"only list-like objects are allowed to be passed to isin\(\),"
+ r" you passed a \[int\]")
+ with pytest.raises(TypeError, match=msg):
+ algos.isin(1, 1)
+ with pytest.raises(TypeError, match=msg):
+ algos.isin(1, [1])
+ with pytest.raises(TypeError, match=msg):
+ algos.isin([1], 1)
def test_basic(self):
@@ -819,8 +829,9 @@ def test_value_counts_dtypes(self):
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
- pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
- ['1', 1])
+ msg = "bins argument only works with numeric data"
+ with pytest.raises(TypeError, match=msg):
+ algos.value_counts(['1', 1], bins=1)
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 54db3887850ea..baca66e0361ad 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -3,7 +3,10 @@
import pytest
+from pandas.compat import PY2
+
import pandas as pd
+from pandas.core.config import OptionError
class TestConfig(object):
@@ -48,26 +51,35 @@ def test_is_one_of_factory(self):
v(12)
v(None)
- pytest.raises(ValueError, v, 1.1)
+ msg = r"Value must be one of None\|12"
+ with pytest.raises(ValueError, match=msg):
+ v(1.1)
def test_register_option(self):
self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
- pytest.raises(KeyError, self.cf.register_option, 'a', 1, 'doc')
+ msg = "Option 'a' has already been registered"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
- pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
- 'doc')
- pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
- 'doc')
+ msg = "Path prefix to option 'a' is already an option"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.register_option('a.b.c.d1', 1, 'doc')
+ with pytest.raises(OptionError, match=msg):
+ self.cf.register_option('a.b.c.d2', 1, 'doc')
# no python keywords
- pytest.raises(ValueError, self.cf.register_option, 'for', 0)
- pytest.raises(ValueError, self.cf.register_option, 'a.for.b', 0)
+ msg = "for is a python keyword"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option('for', 0)
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option('a.for.b', 0)
# must be valid identifier (ensure attribute access works)
- pytest.raises(ValueError, self.cf.register_option,
- 'Oh my Goddess!', 0)
+ msg = "oh my goddess! is not a valid identifier"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option('Oh my Goddess!', 0)
# we can register options several levels deep
# without predefining the intermediate steps
@@ -90,7 +102,9 @@ def test_describe_option(self):
self.cf.register_option('l', "foo")
# non-existent keys raise KeyError
- pytest.raises(KeyError, self.cf.describe_option, 'no.such.key')
+ msg = r"No such keys\(s\)"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.describe_option('no.such.key')
# we can get the description for any key we registered
assert 'doc' in self.cf.describe_option('a', _print_desc=False)
@@ -122,7 +136,9 @@ def test_case_insensitive(self):
assert self.cf.get_option('kAnBaN') == 2
# gets of non-existent keys fail
- pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
+ msg = r"No such keys\(s\): 'no_such_option'"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.get_option('no_such_option')
self.cf.deprecate_option('KanBan')
assert self.cf._is_deprecated('kAnBaN')
@@ -138,7 +154,9 @@ def test_get_option(self):
assert self.cf.get_option('b.b') is None
# gets of non-existent keys fail
- pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
+ msg = r"No such keys\(s\): 'no_such_option'"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.get_option('no_such_option')
def test_set_option(self):
self.cf.register_option('a', 1, 'doc')
@@ -157,16 +175,24 @@ def test_set_option(self):
assert self.cf.get_option('b.c') == 'wurld'
assert self.cf.get_option('b.b') == 1.1
- pytest.raises(KeyError, self.cf.set_option, 'no.such.key', None)
+ msg = r"No such keys\(s\): 'no.such.key'"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.set_option('no.such.key', None)
def test_set_option_empty_args(self):
- pytest.raises(ValueError, self.cf.set_option)
+ msg = "Must provide an even number of non-keyword arguments"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option()
def test_set_option_uneven_args(self):
- pytest.raises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
+ msg = "Must provide an even number of non-keyword arguments"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option('a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
- pytest.raises(ValueError, self.cf.set_option, 2)
+ msg = "Must provide an even number of non-keyword arguments"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option(2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
@@ -183,27 +209,36 @@ def test_set_option_multiple(self):
assert self.cf.get_option('b.c') is None
assert self.cf.get_option('b.b') == 10.0
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_text)
- pytest.raises(ValueError, self.cf.register_option, 'a.b.c.d2',
- 'NO', 'doc', validator=self.cf.is_int)
+ msg = "Value must have type '<class 'int'>'"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option(
+ 'a.b.c.d2', 'NO', 'doc', validator=self.cf.is_int)
self.cf.set_option('a', 2) # int is_int
self.cf.set_option('b.c', 'wurld') # str is_str
- pytest.raises(
- ValueError, self.cf.set_option, 'a', None) # None not is_int
- pytest.raises(ValueError, self.cf.set_option, 'a', 'ab')
- pytest.raises(ValueError, self.cf.set_option, 'b.c', 1)
+ # None not is_int
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option('a', None)
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option('a', 'ab')
+
+ msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.set_option('b.c', 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option('b', lambda: None, 'doc',
validator=validator)
self.cf.set_option('b', '%.1f'.format) # Formatter is callable
self.cf.set_option('b', None) # Formatter is none (default)
- pytest.raises(ValueError, self.cf.set_option, 'b', '%.1f')
+ with pytest.raises(ValueError, match="Value must be a callable"):
+ self.cf.set_option('b', '%.1f')
def test_reset_option(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
@@ -267,8 +302,9 @@ def test_deprecate_option(self):
assert 'eprecated' in str(w[-1]) # we get the default message
assert 'nifty_ver' in str(w[-1]) # with the removal_ver quoted
- pytest.raises(
- KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
+ msg = "Option 'a' has already been defined as deprecated"
+ with pytest.raises(OptionError, match=msg):
+ self.cf.deprecate_option('a')
self.cf.deprecate_option('b.c', 'zounds!')
with warnings.catch_warnings(record=True) as w:
@@ -374,12 +410,6 @@ def eq(val):
def test_attribute_access(self):
holder = []
- def f():
- options.b = 1
-
- def f2():
- options.display = 1
-
def f3(key):
holder.append(True)
@@ -397,8 +427,11 @@ def f3(key):
self.cf.reset_option("a")
assert options.a == self.cf.get_option("a", 0)
- pytest.raises(KeyError, f)
- pytest.raises(KeyError, f2)
+ msg = "You can only set the value of existing options"
+ with pytest.raises(OptionError, match=msg):
+ options.b = 1
+ with pytest.raises(OptionError, match=msg):
+ options.display = 1
# make sure callback kicks when using this form of setting
options.c = 1
@@ -429,5 +462,6 @@ def test_option_context_scope(self):
def test_dictwrapper_getattr(self):
options = self.cf.options
# GH 19789
- pytest.raises(self.cf.OptionError, getattr, options, 'bananas')
+ with pytest.raises(OptionError, match="No such option"):
+ options.bananas
assert not hasattr(options, 'bananas')
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 4ea7e9b8ec9a4..a9a59c6d95373 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -886,8 +886,11 @@ def test_count(self):
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'a'
- pytest.raises(KeyError, series.count, 'x')
- pytest.raises(KeyError, frame.count, level='x')
+ msg = "Level x not found"
+ with pytest.raises(KeyError, match=msg):
+ series.count('x')
+ with pytest.raises(KeyError, match=msg):
+ frame.count(level='x')
@pytest.mark.parametrize('op', AGG_FUNCTIONS)
@pytest.mark.parametrize('level', [0, 1])
@@ -1119,7 +1122,8 @@ def test_level_with_tuples(self):
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
- pytest.raises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
+ with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):
+ series[('foo', 'bar', 0), 2]
result = frame.loc[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index cf5ef6cf15eca..d1893b7efbc41 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -7,6 +7,7 @@
import numpy as np
import pytest
+from pandas.compat import PY2
from pandas.compat.numpy import _np_version_under1p13
import pandas.util._test_decorators as td
@@ -728,6 +729,7 @@ def test_numeric_values(self):
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
@@ -743,7 +745,9 @@ def test_ndarray(self):
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
- pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values))
+ msg = r"could not convert string to float: '(foo|baz)'"
+ with pytest.raises(ValueError, match=msg):
+ nanops._ensure_numeric(s_values)
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric('1'), 1.0)
@@ -751,9 +755,15 @@ def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j)
def test_non_convertable_values(self):
- pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo'))
- pytest.raises(TypeError, lambda: nanops._ensure_numeric({}))
- pytest.raises(TypeError, lambda: nanops._ensure_numeric([]))
+ msg = "Could not convert foo to numeric"
+ with pytest.raises(TypeError, match=msg):
+ nanops._ensure_numeric('foo')
+ msg = "Could not convert {} to numeric"
+ with pytest.raises(TypeError, match=msg):
+ nanops._ensure_numeric({})
+ msg = r"Could not convert \[\] to numeric"
+ with pytest.raises(TypeError, match=msg):
+ nanops._ensure_numeric([])
class TestNanvarFixedValues(object):
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 7500cbb3cfc3a..e83bdb1af9121 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -7,6 +7,8 @@
from numpy import nan
import pytest
+from pandas.compat import PY2
+
from pandas import DataFrame, MultiIndex, Series, compat, concat, merge
from pandas.core import common as com
from pandas.core.sorting import (
@@ -403,15 +405,21 @@ def test_mixed_integer_from_list(self):
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
+ msg = ("'<' not supported between instances of 'datetime.datetime'"
+ r" and 'int'|"
+ r"unorderable types: int\(\) > datetime.datetime\(\)")
if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
- pytest.raises(TypeError, safe_sort, arr)
+ with pytest.raises(TypeError, match=msg):
+ safe_sort(arr)
else:
- pytest.raises(TypeError, safe_sort, arr)
+ with pytest.raises(TypeError, match=msg):
+ safe_sort(arr)
def test_exceptions(self):
with pytest.raises(TypeError,
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 7cea3be03d1a7..bbcdc24f58f9b 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -10,7 +10,7 @@
import pytest
import pandas.compat as compat
-from pandas.compat import PY3, range, u
+from pandas.compat import PY2, PY3, range, u
from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna
import pandas.core.strings as strings
@@ -1002,11 +1002,13 @@ def test_replace(self):
tm.assert_series_equal(result, exp)
# GH 13438
+ msg = "repl must be a string or callable"
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
- pytest.raises(TypeError, values.str.replace, 'a', repl)
+ with pytest.raises(TypeError, match=msg):
+ values.str.replace('a', repl)
def test_replace_callable(self):
# GH 15055
@@ -1123,10 +1125,14 @@ def test_replace_literal(self):
callable_repl = lambda m: m.group(0).swapcase()
compiled_pat = re.compile('[a-z][A-Z]{2}')
- pytest.raises(ValueError, values.str.replace, 'abc', callable_repl,
- regex=False)
- pytest.raises(ValueError, values.str.replace, compiled_pat, '',
- regex=False)
+ msg = "Cannot use a callable replacement when regex=False"
+ with pytest.raises(ValueError, match=msg):
+ values.str.replace('abc', callable_repl, regex=False)
+
+ msg = ("Cannot use a compiled regex as replacement pattern with"
+ " regex=False")
+ with pytest.raises(ValueError, match=msg):
+ values.str.replace(compiled_pat, '', regex=False)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
@@ -1242,12 +1248,13 @@ def test_extract_expand_False(self):
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
- f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
- pytest.raises(ValueError, f)
+ msg = "pattern contains no capture groups"
+ with pytest.raises(ValueError, match=msg):
+ s_or_idx.str.extract('[ABC][123]', expand=False)
# only non-capturing groups
- f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
- pytest.raises(ValueError, f)
+ with pytest.raises(ValueError, match=msg):
+ s_or_idx.str.extract('(?:[AB]).*', expand=False)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
@@ -1387,12 +1394,13 @@ def test_extract_expand_True(self):
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
- f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
- pytest.raises(ValueError, f)
+ msg = "pattern contains no capture groups"
+ with pytest.raises(ValueError, match=msg):
+ s_or_idx.str.extract('[ABC][123]', expand=True)
# only non-capturing groups
- f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
- pytest.raises(ValueError, f)
+ with pytest.raises(ValueError, match=msg):
+ s_or_idx.str.extract('(?:[AB]).*', expand=True)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
@@ -3315,10 +3323,14 @@ def test_encode_decode(self):
tm.assert_series_equal(result, exp)
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_encode_decode_errors(self):
encodeBase = Series([u('a'), u('b'), u('a\x9d')])
- pytest.raises(UnicodeEncodeError, encodeBase.str.encode, 'cp1252')
+ msg = (r"'charmap' codec can't encode character '\\x9d' in position 1:"
+ " character maps to <undefined>")
+ with pytest.raises(UnicodeEncodeError, match=msg):
+ encodeBase.str.encode('cp1252')
f = lambda x: x.encode('cp1252', 'ignore')
result = encodeBase.str.encode('cp1252', 'ignore')
@@ -3327,7 +3339,10 @@ def test_encode_decode_errors(self):
decodeBase = Series([b'a', b'b', b'a\x9d'])
- pytest.raises(UnicodeDecodeError, decodeBase.str.decode, 'cp1252')
+ msg = ("'charmap' codec can't decode byte 0x9d in position 1:"
+ " character maps to <undefined>")
+ with pytest.raises(UnicodeDecodeError, match=msg):
+ decodeBase.str.decode('cp1252')
f = lambda x: x.decode('cp1252', 'ignore')
result = decodeBase.str.decode('cp1252', 'ignore')
@@ -3418,7 +3433,8 @@ def test_method_on_bytes(self):
lhs = Series(np.array(list('abc'), 'S1').astype(object))
rhs = Series(np.array(list('def'), 'S1').astype(object))
if compat.PY3:
- pytest.raises(TypeError, lhs.str.cat, rhs)
+ with pytest.raises(TypeError, match="can't concat str to bytes"):
+ lhs.str.cat(rhs)
else:
result = lhs.str.cat(rhs)
expected = Series(np.array(
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index e816d4c04344a..ce9d1888b8e96 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -89,9 +89,8 @@ def test_getitem(self):
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.rolling(window=5)
- pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']]
-
- pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
+ with pytest.raises(KeyError, match="Columns not found: 'C'"):
+ g[['C']]
with pytest.raises(KeyError, match='^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
@@ -102,7 +101,9 @@ def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r['A'].sum())
- pytest.raises(AttributeError, lambda: r.F)
+ msg = "'Rolling' object has no attribute 'F'"
+ with pytest.raises(AttributeError, match=msg):
+ r.F
def tests_skip_nuisance(self):
@@ -217,12 +218,11 @@ def test_agg_nested_dicts(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
- def f():
+ msg = r"cannot perform renaming for (r1|r2) with a nested dictionary"
+ with pytest.raises(SpecificationError, match=msg):
r.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
- pytest.raises(SpecificationError, f)
-
expected = concat([r['A'].mean(), r['A'].std(),
r['B'].mean(), r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
@@ -1806,26 +1806,38 @@ def test_ewm_alpha_arg(self):
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
- # com must satisfy: com >= 0
- pytest.raises(ValueError, s.ewm, com=-0.1)
+ msg = "comass must satisfy: comass >= 0"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
- # span must satisfy: span >= 1
- pytest.raises(ValueError, s.ewm, span=-0.1)
- pytest.raises(ValueError, s.ewm, span=0.0)
- pytest.raises(ValueError, s.ewm, span=0.9)
+
+ msg = "span must satisfy: span >= 1"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(span=-0.1)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(span=0.0)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
- # halflife must satisfy: halflife > 0
- pytest.raises(ValueError, s.ewm, halflife=-0.1)
- pytest.raises(ValueError, s.ewm, halflife=0.0)
+
+ msg = "halflife must satisfy: halflife > 0"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(halflife=-0.1)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
- # alpha must satisfy: 0 < alpha <= 1
- pytest.raises(ValueError, s.ewm, alpha=-0.1)
- pytest.raises(ValueError, s.ewm, alpha=0.0)
+
+ msg = "alpha must satisfy: 0 < alpha <= 1"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(alpha=-0.1)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
- pytest.raises(ValueError, s.ewm, alpha=1.1)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(alpha=1.1)
@pytest.mark.parametrize('method', ['mean', 'vol', 'var'])
def test_ew_empty_series(self, method):
@@ -2598,7 +2610,10 @@ def get_result(obj, obj2=None):
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
- pytest.raises(TypeError, rwindow._flex_binary_moment, 5, 6, None)
+ msg = ("arguments to moment function must be of type"
+ " np.ndarray/Series/DataFrame")
+ with pytest.raises(TypeError, match=msg):
+ rwindow._flex_binary_moment(5, 6, None)
def test_corr_sanity(self):
# GH 3155
@@ -2682,7 +2697,10 @@ def func(A, B, com, **kwargs):
Series([1.]), Series([1.]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([np.NaN]))
- pytest.raises(Exception, func, A, randn(50), 20, min_periods=5)
+ msg = "Input arrays must be of the same type!"
+ # exception raised is Exception
+ with pytest.raises(Exception, match=msg):
+ func(A, randn(50), 20, min_periods=5)
def test_expanding_apply_args_kwargs(self, raw):
@@ -3266,9 +3284,9 @@ def setup_method(self, method):
def test_mutated(self):
- def f():
+ msg = r"group\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
self.frame.groupby('A', foo=1)
- pytest.raises(TypeError, f)
g = self.frame.groupby('A')
assert not g.mutated
| xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25452 | 2019-02-26T17:57:03Z | 2019-02-28T13:34:22Z | 2019-02-28T13:34:22Z | 2019-03-01T12:48:56Z |
STY: use pytest.raises context manager (indexes) | diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index e4f25ff143273..ba451da10573a 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -403,13 +403,16 @@ def test_get_item(self, closed):
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
- pytest.raises(KeyError, self.index.get_loc, 0)
+ with pytest.raises(KeyError, match="^0$"):
+ self.index.get_loc(0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
- pytest.raises(KeyError, self.index.get_loc, -1)
- pytest.raises(KeyError, self.index.get_loc, 3)
+ with pytest.raises(KeyError, match="^-1$"):
+ self.index.get_loc(-1)
+ with pytest.raises(KeyError, match="^3$"):
+ self.index.get_loc(3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
@@ -419,10 +422,12 @@ def test_get_loc_value(self):
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='intp'))
assert idx.get_loc(3) == 1
- pytest.raises(KeyError, idx.get_loc, 3.5)
+ with pytest.raises(KeyError, match=r"^3\.5$"):
+ idx.get_loc(3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
- pytest.raises(KeyError, idx.get_loc, 1.5)
+ with pytest.raises(KeyError, match=r"^1\.5$"):
+ idx.get_loc(1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
@@ -486,7 +491,9 @@ def test_slice_locs_decreasing_float64(self):
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
- with pytest.raises(KeyError):
+ msg = ("'can only get slices from an IntervalIndex if bounds are"
+ " non-overlapping and all monotonic increasing or decreasing'")
+ with pytest.raises(KeyError, match=msg):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
@@ -494,9 +501,12 @@ def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
- pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
- pytest.raises(KeyError, self.index.get_loc,
- Interval(-1, 0, 'left'))
+ msg = r"Interval\(2, 3, closed='right'\)"
+ with pytest.raises(KeyError, match=msg):
+ self.index.get_loc(Interval(2, 3))
+ msg = r"Interval\(-1, 0, closed='left'\)"
+ with pytest.raises(KeyError, match=msg):
+ self.index.get_loc(Interval(-1, 0, 'left'))
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [3, Interval(1, 4)])
@@ -981,9 +991,11 @@ def test_comparison(self):
self.index > 0
with pytest.raises(TypeError, match='unorderable types'):
self.index <= 0
- with pytest.raises(TypeError):
+ msg = r"unorderable types: Interval\(\) > int\(\)"
+ with pytest.raises(TypeError, match=msg):
self.index > np.arange(2)
- with pytest.raises(ValueError):
+ msg = "Lengths must match to compare"
+ with pytest.raises(ValueError, match=msg):
self.index > np.arange(3)
def test_missing_values(self, closed):
@@ -993,7 +1005,9 @@ def test_missing_values(self, closed):
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
- with pytest.raises(ValueError):
+ msg = ("missing values must be missing in the same location both left"
+ " and right sides")
+ with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 8415bab802239..26dcf7d6bc234 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -4,6 +4,7 @@
from datetime import datetime, timedelta
import math
import operator
+import re
import sys
import numpy as np
@@ -107,7 +108,10 @@ def test_constructor_copy(self):
def test_constructor_corner(self):
# corner case
- pytest.raises(TypeError, Index, 0)
+ msg = (r"Index\(\.\.\.\) must be called with a collection of some"
+ " kind, 0 was passed")
+ with pytest.raises(TypeError, match=msg):
+ Index(0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
@@ -488,21 +492,22 @@ def test_constructor_cast(self):
Index(["a", "b", "c"], dtype=float)
def test_view_with_args(self):
-
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
-
- for i in restricted:
- ind = self.indices[i]
-
- # with arguments
- pytest.raises(TypeError, lambda: ind.view('i8'))
-
- # these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
+ ind.view('i8')
- # with arguments
+ @pytest.mark.parametrize('index_type', [
+ 'unicodeIndex',
+ 'strIndex',
+ pytest.param('catIndex', marks=pytest.mark.xfail(reason="gh-25464")),
+ 'boolIndex',
+ 'empty'])
+ def test_view_with_args_object_array_raises(self, index_type):
+ ind = self.indices[index_type]
+ msg = "Cannot change data-type for object array"
+ with pytest.raises(TypeError, match=msg):
ind.view('i8')
def test_astype(self):
@@ -565,8 +570,8 @@ def test_delete(self, pos, expected):
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
- with pytest.raises((IndexError, ValueError)):
- # either depending on numpy version
+ msg = "index 5 is out of bounds for axis 0 with size 4"
+ with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_identical(self):
@@ -683,7 +688,9 @@ def test_empty_fancy_raises(self, attr):
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
- pytest.raises(IndexError, index.__getitem__, empty_farr)
+ msg = r"arrays used as indices must be of integer \(or boolean\) type"
+ with pytest.raises(IndexError, match=msg):
+ index[empty_farr]
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, sort):
@@ -1426,13 +1433,14 @@ def test_get_indexer_strings(self, method, expected):
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
- with pytest.raises(TypeError):
+ msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
+ with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
@@ -1685,8 +1693,11 @@ def test_drop_tuple(self, values, to_drop):
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
+ msg = r"\"\[{}\] not found in axis\"".format(
+ re.escape(to_drop[1].__repr__()))
for drop_me in to_drop[1], [to_drop[1]]:
- pytest.raises(KeyError, removed.drop, drop_me)
+ with pytest.raises(KeyError, match=msg):
+ removed.drop(drop_me)
@pytest.mark.parametrize("method,expected,sort", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index d889135160ae2..95fac2f6ae05b 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -181,18 +181,21 @@ def test_create_categorical(self):
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
- def test_disallow_set_ops(self):
-
+ @pytest.mark.parametrize('func,op_name', [
+ (lambda idx: idx - idx, '__sub__'),
+ (lambda idx: idx + idx, '__add__'),
+ (lambda idx: idx - ['a', 'b'], '__sub__'),
+ (lambda idx: idx + ['a', 'b'], '__add__'),
+ (lambda idx: ['a', 'b'] - idx, '__rsub__'),
+ (lambda idx: ['a', 'b'] + idx, '__radd__'),
+ ])
+ def test_disallow_set_ops(self, func, op_name):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
-
- pytest.raises(TypeError, lambda: idx - idx)
- pytest.raises(TypeError, lambda: idx + idx)
- pytest.raises(TypeError, lambda: idx - ['a', 'b'])
- pytest.raises(TypeError, lambda: idx + ['a', 'b'])
- pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
- pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
+ msg = "cannot perform {} with this index type: CategoricalIndex"
+ with pytest.raises(TypeError, match=msg.format(op_name)):
+ func(idx)
def test_method_delegation(self):
@@ -231,8 +234,9 @@ def test_method_delegation(self):
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
- pytest.raises(ValueError, lambda: ci.set_categories(
- list('cab'), inplace=True))
+ msg = "cannot use inplace with CategoricalIndex"
+ with pytest.raises(ValueError, match=msg):
+ ci.set_categories(list('cab'), inplace=True)
def test_contains(self):
@@ -357,12 +361,11 @@ def test_append(self):
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reordered is not ok
- pytest.raises(
- TypeError,
- lambda: ci.append(ci.values.set_categories(list('abcd'))))
- pytest.raises(
- TypeError,
- lambda: ci.append(ci.values.reorder_categories(list('abc'))))
+ msg = "all inputs must be Index"
+ with pytest.raises(TypeError, match=msg):
+ ci.append(ci.values.set_categories(list('abcd')))
+ with pytest.raises(TypeError, match=msg):
+ ci.append(ci.values.reorder_categories(list('abc')))
# with objects
result = ci.append(Index(['c', 'a']))
@@ -370,7 +373,9 @@ def test_append(self):
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
- pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
+ msg = "cannot append a non-category item to a CategoricalIndex"
+ with pytest.raises(TypeError, match=msg):
+ ci.append(Index(['a', 'd']))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
@@ -406,7 +411,10 @@ def test_insert(self):
tm.assert_index_equal(result, expected, exact=True)
# invalid
- pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
+ msg = ("cannot insert an item into a CategoricalIndex that is not"
+ " already an existing category")
+ with pytest.raises(TypeError, match=msg):
+ ci.insert(0, 'd')
# GH 18295 (test missing)
expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b'])
@@ -633,12 +641,16 @@ def test_get_indexer(self):
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
- pytest.raises(NotImplementedError,
- lambda: idx2.get_indexer(idx1, method='pad'))
- pytest.raises(NotImplementedError,
- lambda: idx2.get_indexer(idx1, method='backfill'))
- pytest.raises(NotImplementedError,
- lambda: idx2.get_indexer(idx1, method='nearest'))
+ msg = ("method='pad' and method='backfill' not implemented yet for"
+ " CategoricalIndex")
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method='pad')
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method='backfill')
+
+ msg = "method='nearest' not implemented yet for CategoricalIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method='nearest')
def test_get_loc(self):
# GH 12531
@@ -776,12 +788,15 @@ def test_equals_categorical(self):
# invalid comparisons
with pytest.raises(ValueError, match="Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
- pytest.raises(TypeError, lambda: ci1 == ci2)
- pytest.raises(
- TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
- pytest.raises(
- TypeError,
- lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
+
+ msg = ("categorical index comparisons must have the same categories"
+ " and ordered attributes")
+ with pytest.raises(TypeError, match=msg):
+ ci1 == ci2
+ with pytest.raises(TypeError, match=msg):
+ ci1 == Categorical(ci1.values, ordered=False)
+ with pytest.raises(TypeError, match=msg):
+ ci1 == Categorical(ci1.values, categories=list('abc'))
# tests
# make sure that we are testing for category inclusion properly
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index fd356202a8ce5..03448129a48fc 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -3,6 +3,8 @@
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
+import re
+
import numpy as np
import pytest
@@ -189,8 +191,14 @@ def test_unique(self, indices):
result = indices.unique(level=level)
tm.assert_index_equal(result, expected)
- for level in 3, 'wrong':
- pytest.raises((IndexError, KeyError), indices.unique, level=level)
+ msg = "Too many levels: Index has only 1 level, not 4"
+ with pytest.raises(IndexError, match=msg):
+ indices.unique(level=3)
+
+ msg = r"Level wrong must be same as name \({}\)".format(
+ re.escape(indices.name.__repr__()))
+ with pytest.raises(KeyError, match=msg):
+ indices.unique(level='wrong')
def test_get_unique_index(self, indices):
# MultiIndex tested separately
@@ -239,12 +247,16 @@ def test_get_unique_index(self, indices):
tm.assert_index_equal(result, expected)
def test_sort(self, indices):
- pytest.raises(TypeError, indices.sort)
+ msg = "cannot sort an Index object in-place, use sort_values instead"
+ with pytest.raises(TypeError, match=msg):
+ indices.sort()
def test_mutability(self, indices):
if not len(indices):
pytest.skip('Skip check for empty Index')
- pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
+ msg = "Index does not support mutable operations"
+ with pytest.raises(TypeError, match=msg):
+ indices[0] = indices[0]
def test_view(self, indices):
assert indices.view().name == indices.name
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index a64340c02cd22..26413f4519eff 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -1,15 +1,17 @@
# -*- coding: utf-8 -*-
from datetime import datetime
+import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
-from pandas.compat import range
+from pandas.compat import PY2, range
import pandas as pd
from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
+from pandas.api.types import pandas_dtype
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@@ -153,12 +155,22 @@ def test_constructor(self):
result = Index(np.array([np.nan]))
assert pd.isna(result.values).all()
+ @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_constructor_invalid(self):
# invalid
- pytest.raises(TypeError, Float64Index, 0.)
- pytest.raises(TypeError, Float64Index, ['a', 'b', 0.])
- pytest.raises(TypeError, Float64Index, [Timestamp('20130101')])
+ msg = (r"Float64Index\(\.\.\.\) must be called with a collection of"
+ r" some kind, 0\.0 was passed")
+ with pytest.raises(TypeError, match=msg):
+ Float64Index(0.)
+ msg = ("String dtype not supported, you may need to explicitly cast to"
+ " a numeric type")
+ with pytest.raises(TypeError, match=msg):
+ Float64Index(['a', 'b', 0.])
+ msg = (r"float\(\) argument must be a string or a number, not"
+ " 'Timestamp'")
+ with pytest.raises(TypeError, match=msg):
+ Float64Index([Timestamp('20130101')])
def test_constructor_coerce(self):
@@ -216,12 +228,17 @@ def test_astype(self):
# invalid
for dtype in ['M8[ns]', 'm8[ns]']:
- pytest.raises(TypeError, lambda: i.astype(dtype))
+ msg = ("Cannot convert Float64Index to dtype {}; integer values"
+ " are required for conversion").format(pandas_dtype(dtype))
+ with pytest.raises(TypeError, match=re.escape(msg)):
+ i.astype(dtype)
# GH 13149
for dtype in ['int16', 'int32', 'int64']:
i = Float64Index([0, 1.1, np.NAN])
- pytest.raises(ValueError, lambda: i.astype(dtype))
+ msg = "Cannot convert NA to integer"
+ with pytest.raises(ValueError, match=msg):
+ i.astype(dtype)
def test_type_coercion_fail(self, any_int_dtype):
# see gh-15832
@@ -275,12 +292,16 @@ def test_get_loc(self):
assert idx.get_loc(1.1, method) == loc
assert idx.get_loc(1.1, method, tolerance=0.9) == loc
- pytest.raises(KeyError, idx.get_loc, 'foo')
- pytest.raises(KeyError, idx.get_loc, 1.5)
- pytest.raises(KeyError, idx.get_loc, 1.5, method='pad',
- tolerance=0.1)
- pytest.raises(KeyError, idx.get_loc, True)
- pytest.raises(KeyError, idx.get_loc, False)
+ with pytest.raises(KeyError, match="^'foo'$"):
+ idx.get_loc('foo')
+ with pytest.raises(KeyError, match=r"^1\.5$"):
+ idx.get_loc(1.5)
+ with pytest.raises(KeyError, match=r"^1\.5$"):
+ idx.get_loc(1.5, method='pad', tolerance=0.1)
+ with pytest.raises(KeyError, match="^True$"):
+ idx.get_loc(True)
+ with pytest.raises(KeyError, match="^False$"):
+ idx.get_loc(False)
with pytest.raises(ValueError, match='must be numeric'):
idx.get_loc(1.4, method='nearest', tolerance='foo')
@@ -310,15 +331,20 @@ def test_get_loc_na(self):
# not representable by slice
idx = Float64Index([np.nan, 1, np.nan, np.nan])
assert idx.get_loc(1) == 1
- pytest.raises(KeyError, idx.slice_locs, np.nan)
+ msg = "'Cannot get left slice bound for non-unique label: nan"
+ with pytest.raises(KeyError, match=msg):
+ idx.slice_locs(np.nan)
def test_get_loc_missing_nan(self):
# GH 8569
idx = Float64Index([1, 2])
assert idx.get_loc(1) == 0
- pytest.raises(KeyError, idx.get_loc, 3)
- pytest.raises(KeyError, idx.get_loc, np.nan)
- pytest.raises(KeyError, idx.get_loc, [np.nan])
+ with pytest.raises(KeyError, match=r"^3\.0$"):
+ idx.get_loc(3)
+ with pytest.raises(KeyError, match="^nan$"):
+ idx.get_loc(np.nan)
+ with pytest.raises(KeyError, match=r"^\[nan\]$"):
+ idx.get_loc([np.nan])
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
@@ -499,13 +525,17 @@ def test_union_noncomparable(self):
tm.assert_index_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
+ msg = ("String dtype not supported, you may need to explicitly cast to"
+ " a numeric type")
# can't
data = ['foo', 'bar', 'baz']
- pytest.raises(TypeError, self._holder, data)
+ with pytest.raises(TypeError, match=msg):
+ self._holder(data)
# shouldn't
data = ['0', '1', '2']
- pytest.raises(TypeError, self._holder, data)
+ with pytest.raises(TypeError, match=msg):
+ self._holder(data)
def test_view_index(self):
self.index.view(Index)
@@ -576,7 +606,10 @@ def test_constructor(self):
tm.assert_index_equal(index, expected)
# scalar raise Exception
- pytest.raises(TypeError, Int64Index, 5)
+ msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some"
+ " kind, 5 was passed")
+ with pytest.raises(TypeError, match=msg):
+ Int64Index(5)
# copy
arr = self.index.values
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 04977023d7c62..3173252e174ab 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -198,20 +198,34 @@ def test_ops_ndarray(self):
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
- pytest.raises(TypeError, lambda: td + np.array([1]))
- pytest.raises(TypeError, lambda: np.array([1]) + td)
+ msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
+ with pytest.raises(TypeError, match=msg):
+ td + np.array([1])
+ msg = (r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and"
+ " 'Timedelta'")
+ with pytest.raises(TypeError, match=msg):
+ np.array([1]) + td
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
- pytest.raises(TypeError, lambda: td - np.array([1]))
- pytest.raises(TypeError, lambda: np.array([1]) - td)
+ msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
+ with pytest.raises(TypeError, match=msg):
+ td - np.array([1])
+ msg = (r"unsupported operand type\(s\) for -: 'numpy.ndarray' and"
+ " 'Timedelta'")
+ with pytest.raises(TypeError, match=msg):
+ np.array([1]) - td
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
- pytest.raises(TypeError, lambda: td * other)
- pytest.raises(TypeError, lambda: other * td)
+ msg = ("ufunc multiply cannot use operands with types"
+ r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)")
+ with pytest.raises(TypeError, match=msg):
+ td * other
+ with pytest.raises(TypeError, match=msg):
+ other * td
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py
index 3938d6acad2f0..0028f1e2edad5 100644
--- a/pandas/tests/indexes/timedeltas/test_construction.py
+++ b/pandas/tests/indexes/timedeltas/test_construction.py
@@ -168,10 +168,15 @@ def test_constructor_coverage(self):
tm.assert_index_equal(from_ints, expected)
# non-conforming freq
- pytest.raises(ValueError, TimedeltaIndex,
- ['1 days', '2 days', '4 days'], freq='D')
+ msg = ("Inferred frequency None from passed values does not conform to"
+ " passed frequency D")
+ with pytest.raises(ValueError, match=msg):
+ TimedeltaIndex(['1 days', '2 days', '4 days'], freq='D')
- pytest.raises(ValueError, timedelta_range, periods=10, freq='D')
+ msg = ("Of the four parameters: start, end, periods, and freq, exactly"
+ " three must be specified")
+ with pytest.raises(ValueError, match=msg):
+ timedelta_range(periods=10, freq='D')
def test_constructor_name(self):
idx = timedelta_range(start='1 days', periods=1, freq='D', name='TEST')
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 40377e4362b75..63210f67c2dbd 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -71,7 +71,9 @@ def test_unknown_attribute(self):
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert 'foo' not in ts.__dict__.keys()
- pytest.raises(AttributeError, lambda: ts.foo)
+ msg = "'Series' object has no attribute 'foo'"
+ with pytest.raises(AttributeError, match=msg):
+ ts.foo
def test_order(self):
# GH 10295
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index 62bf2a0b4a1cf..9fce1c9acd488 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -31,7 +31,9 @@ def test_partial_slice(self):
result = s['6 days, 23:11:12']
assert result == s.iloc[133]
- pytest.raises(KeyError, s.__getitem__, '50 days')
+ msg = r"^Timedelta\('50 days 00:00:00'\)$"
+ with pytest.raises(KeyError, match=msg):
+ s['50 days']
def test_partial_slice_high_reso(self):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 3cbd9942f9d84..062e1c1e9f46d 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -264,9 +264,13 @@ def test_fields(self):
tm.assert_index_equal(rng.nanoseconds,
Index([456, 456], dtype='int64'))
- pytest.raises(AttributeError, lambda: rng.hours)
- pytest.raises(AttributeError, lambda: rng.minutes)
- pytest.raises(AttributeError, lambda: rng.milliseconds)
+ msg = "'TimedeltaIndex' object has no attribute '{}'"
+ with pytest.raises(AttributeError, match=msg.format('hours')):
+ rng.hours
+ with pytest.raises(AttributeError, match=msg.format('minutes')):
+ rng.minutes
+ with pytest.raises(AttributeError, match=msg.format('milliseconds')):
+ rng.milliseconds
# with nat
s = Series(rng)
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py
index d211219159233..58482a174dfd1 100644
--- a/pandas/tests/indexes/timedeltas/test_tools.py
+++ b/pandas/tests/indexes/timedeltas/test_tools.py
@@ -115,14 +115,22 @@ def test_to_timedelta_invalid(self):
to_timedelta(['foo'], errors='never')
# these will error
- pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
- pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
+ msg = "invalid unit abbreviation: foo"
+ with pytest.raises(ValueError, match=msg):
+ to_timedelta([1, 2], unit='foo')
+ with pytest.raises(ValueError, match=msg):
+ to_timedelta(1, unit='foo')
# time not supported ATM
- pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
+ msg = ("Value must be Timedelta, string, integer, float, timedelta or"
+ " convertible")
+ with pytest.raises(ValueError, match=msg):
+ to_timedelta(time(second=1))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
- pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
+ msg = "unit abbreviation w/o a number"
+ with pytest.raises(ValueError, match=msg):
+ to_timedelta(['foo', 'bar'])
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
| xref #24332 | https://api.github.com/repos/pandas-dev/pandas/pulls/25447 | 2019-02-26T12:58:33Z | 2019-02-28T13:33:32Z | 2019-02-28T13:33:32Z | 2019-02-28T14:29:25Z |
BUG: repr of np.datetime64('NaT') in Series/DataFrame with dtype object | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 99b57e2427509..89e14074ae75b 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -395,7 +395,7 @@ Sparse
Other
^^^^^
--
+- Bug in :class:`Series` and :class:`DataFrame` repr where ``np.datetime64('NaT')`` and ``np.timedelta64('NaT')`` with ``dtype=object`` would be represented as ``NaN`` (:issue:`25445`)
-
-
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index dbe6b282ce9c0..1d08d559cb33f 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -941,10 +941,16 @@ def _format_strings(self):
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
- if x is None:
- return 'None'
- elif x is NaT:
- return 'NaT'
+ try:
+ # try block for np.isnat specifically
+ # determine na_rep if x is None or NaT-like
+ if x is None:
+ return 'None'
+ elif x is NaT or np.isnat(x):
+ return 'NaT'
+ except (TypeError, ValueError):
+ # np.isnat only handles datetime or timedelta objects
+ pass
return self.na_rep
elif isinstance(x, PandasObject):
return '{x}'.format(x=x)
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index c77c9d9143e38..e4f0b4c6459ae 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -512,3 +512,12 @@ def test_repr_categorical_dates_periods(self):
df = DataFrame({'dt': Categorical(dt), 'p': Categorical(p)})
assert repr(df) == exp
+
+ @pytest.mark.parametrize('arg', [np.datetime64, np.timedelta64])
+ @pytest.mark.parametrize('box, expected', [
+ [Series, '0 NaT\ndtype: object'],
+ [DataFrame, ' 0\n0 NaT']])
+ def test_repr_np_nat_with_object(self, arg, box, expected):
+ # GH 25445
+ result = repr(box([arg('NaT')], dtype=object))
+ assert result == expected
| - [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Discovered while investigating #12499
Before:
```
In [1]: pd.Series([np.datetime64('NaT')], dtype=object)
Out[1]:
0 NaN
dtype: object
```
After:
```
In [1]: pd.Series([np.datetime64('NaT')], dtype=object)
Out[1]:
0 NaT
dtype: object
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/25445 | 2019-02-26T07:17:47Z | 2019-03-30T17:37:25Z | 2019-03-30T17:37:25Z | 2019-03-30T19:35:33Z |
ENH: added optional caption and label arguments to DataFrame.to_latex() | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index cd0714838a3f1..4090a07fee5a5 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -20,8 +20,7 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
-
--
+- :meth:`DataFrame.to_latex` now accepts ``caption`` and ``label`` arguments (:issue:`25436`)
-
.. _whatsnew_1000.enhancements.other:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1a5b36b07e93c..b427b1f0ac858 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2925,15 +2925,21 @@ def to_latex(
multicolumn=None,
multicolumn_format=None,
multirow=None,
+ caption=None,
+ label=None,
):
r"""
- Render an object to a LaTeX tabular environment table.
+ Render object to a LaTeX tabular, longtable, or nested table/tabular.
- Render an object to a tabular environment table. You can splice
- this into a LaTeX document. Requires \usepackage{booktabs}.
+ Requires ``\usepackage{booktabs}``. The output can be copy/pasted
+ into a main LaTeX document or read from an external file
+ with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
- Added to Series
+ Added to Series.
+
+ .. versionchanged:: 1.0.0
+ Added caption and label arguments.
Parameters
----------
@@ -3002,6 +3008,17 @@ def to_latex(
from the pandas config module.
.. versionadded:: 0.20.0
+
+ caption : str, optional
+ The LaTeX caption to be placed inside ``\caption{}`` in the output.
+
+ .. versionadded:: 1.0.0
+
+ label : str, optional
+ The LaTeX label to be placed inside ``\label{}`` in the output.
+ This is used with ``\ref{}`` in the main ``.tex`` file.
+
+ .. versionadded:: 1.0.0
%(returns)s
See Also
--------
@@ -3014,7 +3031,7 @@ def to_latex(
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
- >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
+ >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
@@ -3061,6 +3078,8 @@ def to_latex(
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
+ caption=caption,
+ label=label,
)
def to_csv(
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 8ff4b9bda0430..f8db1b19dadfa 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -888,6 +888,8 @@ def to_latex(
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
+ caption: Optional[str] = None,
+ label: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
@@ -902,6 +904,8 @@ def to_latex(
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
+ caption=caption,
+ label=label,
).get_result(buf=buf, encoding=encoding)
def _format_col(self, i: int) -> List[str]:
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 4c4d5ec73269a..ca9db88ae7be4 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -36,6 +36,8 @@ def __init__(
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
+ caption: Optional[str] = None,
+ label: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
@@ -45,11 +47,14 @@ def __init__(
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
+ self.caption = caption
+ self.label = label
self.escape = self.fmt.escape
def write_result(self, buf: IO[str]) -> None:
"""
- Render a DataFrame to a LaTeX tabular/longtable environment output.
+ Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
+ environment output.
"""
# string representation of the columns
@@ -114,12 +119,12 @@ def pad_empties(x):
"not {typ}".format(typ=type(column_format))
)
- if not self.longtable:
- buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
- buf.write("\\toprule\n")
+ if self.longtable:
+ self._write_longtable_begin(buf, column_format)
else:
- buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
- buf.write("\\toprule\n")
+ self._write_tabular_begin(buf, column_format)
+
+ buf.write("\\toprule\n")
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
@@ -183,11 +188,10 @@ def pad_empties(x):
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
- if not self.longtable:
- buf.write("\\bottomrule\n")
- buf.write("\\end{tabular}\n")
+ if self.longtable:
+ self._write_longtable_end(buf)
else:
- buf.write("\\end{longtable}\n")
+ self._write_tabular_end(buf)
def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]:
r"""
@@ -268,3 +272,107 @@ def _print_cline(self, buf: IO[str], i: int, icol: int) -> None:
buf.write("\\cline{{{cl:d}-{icol:d}}}\n".format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
+
+ def _write_tabular_begin(self, buf, column_format):
+ """
+ Write the beginning of a tabular environment or
+ nested table/tabular environments including caption and label.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+ column_format : str, default None
+ The columns format as specified in `LaTeX table format
+ <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl'
+ for 3 columns
+
+ """
+ if self.caption is not None or self.label is not None:
+ # then write output in a nested table/tabular environment
+ if self.caption is None:
+ caption_ = ""
+ else:
+ caption_ = "\n\\caption{{{}}}".format(self.caption)
+
+ if self.label is None:
+ label_ = ""
+ else:
+ label_ = "\n\\label{{{}}}".format(self.label)
+
+ buf.write("\\begin{{table}}\n\\centering{}{}\n".format(caption_, label_))
+ else:
+ # then write output only in a tabular environment
+ pass
+
+ buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
+
+ def _write_tabular_end(self, buf):
+ """
+ Write the end of a tabular environment or nested table/tabular
+ environment.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+
+ """
+ buf.write("\\bottomrule\n")
+ buf.write("\\end{tabular}\n")
+ if self.caption is not None or self.label is not None:
+ buf.write("\\end{table}\n")
+ else:
+ pass
+
+ def _write_longtable_begin(self, buf, column_format):
+ """
+ Write the beginning of a longtable environment including caption and
+ label if provided by user.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+ column_format : str, default None
+ The columns format as specified in `LaTeX table format
+ <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl'
+ for 3 columns
+
+ """
+ buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
+
+ if self.caption is not None or self.label is not None:
+ if self.caption is None:
+ pass
+ else:
+ buf.write("\\caption{{{}}}".format(self.caption))
+
+ if self.label is None:
+ pass
+ else:
+ buf.write("\\label{{{}}}".format(self.label))
+
+ # a double-backslash is required at the end of the line
+ # as discussed here:
+ # https://tex.stackexchange.com/questions/219138
+ buf.write("\\\\\n")
+ else:
+ pass
+
+ @staticmethod
+ def _write_longtable_end(buf):
+ """
+ Write the end of a longtable environment.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+
+ """
+ buf.write("\\end{longtable}\n")
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 924b2a19e8504..9ffb54d23e37e 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -388,8 +388,7 @@ def test_to_latex_special_escape(self):
"""
assert escaped_result == escaped_expected
- def test_to_latex_longtable(self, float_frame):
- float_frame.to_latex(longtable=True)
+ def test_to_latex_longtable(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
withindex_result = df.to_latex(longtable=True)
@@ -439,6 +438,141 @@ def test_to_latex_longtable(self, float_frame):
with3columns_result = df.to_latex(index=False, longtable=True)
assert r"\multicolumn{3}" in with3columns_result
+ def test_to_latex_caption_label(self):
+ # GH 25436
+ the_caption = "a table in a \\texttt{table/tabular} environment"
+ the_label = "tab:table_tabular"
+
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+ # test when only the caption is provided
+ result_c = df.to_latex(caption=the_caption)
+
+ expected_c = r"""\begin{table}
+\centering
+\caption{a table in a \texttt{table/tabular} environment}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_c == expected_c
+
+ # test when only the label is provided
+ result_l = df.to_latex(label=the_label)
+
+ expected_l = r"""\begin{table}
+\centering
+\label{tab:table_tabular}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_l == expected_l
+
+ # test when the caption and the label are provided
+ result_cl = df.to_latex(caption=the_caption, label=the_label)
+
+ expected_cl = r"""\begin{table}
+\centering
+\caption{a table in a \texttt{table/tabular} environment}
+\label{tab:table_tabular}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_cl == expected_cl
+
+ def test_to_latex_longtable_caption_label(self):
+ # GH 25436
+ the_caption = "a table in a \\texttt{longtable} environment"
+ the_label = "tab:longtable"
+
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+ # test when only the caption is provided
+ result_c = df.to_latex(longtable=True, caption=the_caption)
+
+ expected_c = r"""\begin{longtable}{lrl}
+\caption{a table in a \texttt{longtable} environment}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_c == expected_c
+
+ # test when only the label is provided
+ result_l = df.to_latex(longtable=True, label=the_label)
+
+ expected_l = r"""\begin{longtable}{lrl}
+\label{tab:longtable}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_l == expected_l
+
+ # test when the caption and the label are provided
+ result_cl = df.to_latex(longtable=True, caption=the_caption, label=the_label)
+
+ expected_cl = r"""\begin{longtable}{lrl}
+\caption{a table in a \texttt{longtable} environment}\label{tab:longtable}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_cl == expected_cl
+
def test_to_latex_escape_special_chars(self):
special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
df = DataFrame(data=special_characters)
| - [x] closes #25436
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25437 | 2019-02-25T06:20:07Z | 2019-09-03T19:26:02Z | 2019-09-03T19:26:02Z | 2019-09-03T19:26:17Z |
BUG: Fix index type casting in read_json with orient='table' and float index (#25433) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..642093db3ded6 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -194,6 +194,8 @@ I/O
- Fixed bug in missing text when using :meth:`to_clipboard` if copying utf-16 characters in Python 3 on Windows (:issue:`25040`)
- Bug in :func:`read_json` for ``orient='table'`` when it tries to infer dtypes by default, which is not applicable as dtypes are already defined in the JSON schema (:issue:`21345`)
+- Bug in :func:`read_json` for ``orient='table'`` and float index, as it infers index dtype by default, which is not applicable because index dtype is already defined in the JSON schema (:issue:`25433`)
+- Bug in :func:`read_json` for ``orient='table'`` and string of float column names, as it makes a column name type conversion to Timestamp, which is not applicable because column names are already defined in the JSON schema (:issue:`25435`)
-
-
-
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 725e2d28ffd67..4bae067ee5196 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -227,7 +227,7 @@ def _write(self, obj, orient, double_precision, ensure_ascii,
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
- convert_axes=True, convert_dates=True, keep_default_dates=True,
+ convert_axes=None, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
@@ -277,18 +277,25 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
- dtype : boolean or dict, default True
+ dtype : boolean or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
- Not applicable with ``orient='table'``.
+ For all ``orient`` values except ``'table'``, default is True.
- .. versionchanged:: 0.25
+ .. versionchanged:: 0.25.0
- Not applicable with ``orient='table'``.
+ Not applicable for ``orient='table'``.
- convert_axes : boolean, default True
+ convert_axes : boolean, default None
Try to convert the axes to the proper dtypes.
+
+ For all ``orient`` values except ``'table'``, default is True.
+
+ .. versionchanged:: 0.25.0
+
+ Not applicable for ``orient='table'``.
+
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
@@ -417,8 +424,13 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
if orient == 'table' and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
+ if orient == 'table' and convert_axes:
+ raise ValueError("cannot pass both convert_axes and orient='table'")
- dtype = orient != 'table' if dtype is None else dtype
+ if dtype is None and orient != 'table':
+ dtype = True
+ if convert_axes is None and orient != 'table':
+ convert_axes = True
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
@@ -692,7 +704,7 @@ def _try_convert_data(self, name, data, use_dtypes=True,
# don't try to coerce, unless a force conversion
if use_dtypes:
- if self.dtype is False:
+ if not self.dtype:
return data, False
elif self.dtype is True:
pass
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 3002d1dfb5f8a..351b495e5d8fc 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -564,17 +564,10 @@ def test_multiindex(self, index_names):
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
- @pytest.mark.parametrize("strict_check", [
- pytest.param(True, marks=pytest.mark.xfail),
- False
- ])
- def test_empty_frame_roundtrip(self, strict_check):
+ def test_empty_frame_roundtrip(self):
# GH 21287
df = pd.DataFrame([], columns=['a', 'b', 'c'])
expected = df.copy()
out = df.to_json(orient='table')
result = pd.read_json(out, orient='table')
- # TODO: When DF coercion issue (#21345) is resolved tighten type checks
- tm.assert_frame_equal(expected, result,
- check_dtype=strict_check,
- check_index_type=strict_check)
+ tm.assert_frame_equal(expected, result)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index fecd0f0572757..ed598b730d960 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -194,7 +194,7 @@ def _check_orient(df, orient, dtype=None, numpy=False,
else:
unser = unser.sort_index()
- if dtype is False:
+ if not dtype:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
@@ -1202,6 +1202,16 @@ def test_data_frame_size_after_to_json(self):
assert size_before == size_after
+ @pytest.mark.parametrize('index', [None, [1, 2], [1., 2.], ['a', 'b'],
+ ['1', '2'], ['1.', '2.']])
+ @pytest.mark.parametrize('columns', [['a', 'b'], ['1', '2'], ['1.', '2.']])
+ def test_from_json_to_json_table_index_and_columns(self, index, columns):
+ # GH25433 GH25435
+ expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
+ dfjson = expected.to_json(orient='table')
+ result = pd.read_json(dfjson, orient='table')
+ assert_frame_equal(result, expected)
+
def test_from_json_to_json_table_dtypes(self):
# GH21345
expected = pd.DataFrame({'a': [1, 2], 'b': [3., 4.], 'c': ['5', '6']})
@@ -1214,9 +1224,18 @@ def test_read_json_table_dtype_raises(self, dtype):
# GH21345
df = pd.DataFrame({'a': [1, 2], 'b': [3., 4.], 'c': ['5', '6']})
dfjson = df.to_json(orient='table')
- with pytest.raises(ValueError):
+ msg = "cannot pass both dtype and orient='table'"
+ with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient='table', dtype=dtype)
+ def test_read_json_table_convert_axes_raises(self):
+ # GH25433 GH25435
+ df = DataFrame([[1, 2], [3, 4]], index=[1., 2.], columns=['1.', '2.'])
+ dfjson = df.to_json(orient='table')
+ msg = "cannot pass both convert_axes and orient='table'"
+ with pytest.raises(ValueError, match=msg):
+ pd.read_json(dfjson, orient='table', convert_axes=True)
+
@pytest.mark.parametrize('data, expected', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
| closes #25433
closes #25435
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25434 | 2019-02-24T20:47:52Z | 2019-02-28T15:53:31Z | 2019-02-28T15:53:30Z | 2019-02-28T15:53:36Z |
TST: numpy RuntimeWarning with Series.round() | diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 2e690ebbfa121..43a45bb915819 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -8,7 +8,7 @@
import numpy as np
import pytest
-from pandas.compat import PY35, lrange
+from pandas.compat import PY2, PY35, is_platform_windows, lrange
import pandas.util._test_decorators as td
import pandas as pd
@@ -1842,6 +1842,17 @@ def test_numpy_round(self):
with pytest.raises(ValueError, match=msg):
np.round(df, decimals=0, out=df)
+ @pytest.mark.xfail(
+ PY2 and is_platform_windows(), reason="numpy/numpy#7882",
+ raises=AssertionError, strict=True)
+ def test_numpy_round_nan(self):
+ # See gh-14197
+ df = Series([1.53, np.nan, 0.06]).to_frame()
+ with tm.assert_produces_warning(None):
+ result = df.round()
+ expected = Series([2., np.nan, 0.]).to_frame()
+ tm.assert_frame_equal(result, expected)
+
def test_round_mixed_type(self):
# GH 11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 6811e370726b2..1f265d574da15 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -9,7 +9,7 @@
from numpy import nan
import pytest
-from pandas.compat import PY35, lrange, range
+from pandas.compat import PY2, PY35, is_platform_windows, lrange, range
import pandas.util._test_decorators as td
import pandas as pd
@@ -285,6 +285,17 @@ def test_numpy_round(self):
with pytest.raises(ValueError, match=msg):
np.round(s, decimals=0, out=s)
+ @pytest.mark.xfail(
+ PY2 and is_platform_windows(), reason="numpy/numpy#7882",
+ raises=AssertionError, strict=True)
+ def test_numpy_round_nan(self):
+ # See gh-14197
+ s = Series([1.53, np.nan, 0.06])
+ with tm.assert_produces_warning(None):
+ result = s.round()
+ expected = Series([2., np.nan, 0.])
+ assert_series_equal(result, expected)
+
def test_built_in_round(self):
if not compat.PY3:
pytest.skip(
| - [x] closes #14197
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ n/a] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25432 | 2019-02-24T15:57:21Z | 2019-02-27T14:40:33Z | 2019-02-27T14:40:33Z | 2019-02-28T09:19:05Z |
[BUG] maybe_upcast_putmast also handle ndarray | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 1823a8e8654fd..6ac7fdd2434c7 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -169,7 +169,10 @@ def trans(x): # noqa
def maybe_upcast_putmask(result, mask, other):
"""
- A safe version of putmask that potentially upcasts the result
+ A safe version of putmask that potentially upcasts the result.
+ The result is replaced with the first N elements of other,
+ where N is the number of True values in mask.
+ If the length of other is shorter than N, other will be repeated.
Parameters
----------
@@ -185,8 +188,18 @@ def maybe_upcast_putmask(result, mask, other):
result : ndarray
changed : boolean
Set to true if the result array was upcasted
+
+ Examples
+ --------
+ >>> result, _ = maybe_upcast_putmask(np.arange(1,6),
+ np.array([False, True, False, True, True]), np.arange(21,23))
+ >>> result
+ array([1, 21, 3, 22, 21])
"""
+ if not isinstance(result, np.ndarray):
+ raise ValueError("The result input must be a ndarray.")
+
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
@@ -241,7 +254,7 @@ def changeit():
# we have an ndarray and the masking has nans in it
else:
- if isna(other[mask]).any():
+ if isna(other).any():
return changeit()
try:
diff --git a/pandas/tests/dtypes/cast/test_upcast.py b/pandas/tests/dtypes/cast/test_upcast.py
new file mode 100644
index 0000000000000..074e89274cc88
--- /dev/null
+++ b/pandas/tests/dtypes/cast/test_upcast.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import pytest
+
+from pandas.core.dtypes.cast import maybe_upcast_putmask
+
+from pandas import Series
+from pandas.util import testing as tm
+
+
+@pytest.mark.parametrize("result", [
+ Series([10, 11, 12]),
+ [10, 11, 12],
+ (10, 11, 12)
+])
+def test_upcast_error(result):
+ # GH23823
+ mask = np.array([False, True, False])
+ other = np.array([61, 62, 63])
+ with pytest.raises(ValueError):
+ result, _ = maybe_upcast_putmask(result, mask, other)
+
+
+@pytest.mark.parametrize("arr, other, exp_changed, expected", [
+ (np.arange(1, 6), np.array([61, 62, 63]),
+ False, np.array([1, 61, 3, 62, 63])),
+ (np.arange(1, 6), np.array([61.1, 62.2, 63.3]),
+ True, np.array([1, 61.1, 3, 62.2, 63.3])),
+ (np.arange(1, 6), np.nan,
+ True, np.array([1, np.nan, 3, np.nan, np.nan])),
+ (np.arange(10, 15), np.array([61, 62]),
+ False, np.array([10, 61, 12, 62, 61])),
+ (np.arange(10, 15), np.array([61, np.nan]),
+ True, np.array([10, 61, 12, np.nan, 61]))
+])
+def test_upcast(arr, other, exp_changed, expected):
+ # GH23823
+ mask = np.array([False, True, False, True, True])
+ result, changed = maybe_upcast_putmask(arr, mask, other)
+
+ assert changed == exp_changed
+ tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("arr, other, exp_changed, expected", [
+ (np.arange('2019-01-01', '2019-01-06', dtype='datetime64[D]'),
+ np.arange('2018-01-01', '2018-01-04', dtype='datetime64[D]'),
+ False, np.array(['2019-01-01', '2018-01-01', '2019-01-03',
+ '2018-01-02', '2018-01-03'], dtype='datetime64[D]')),
+ (np.arange('2019-01-01', '2019-01-06', dtype='datetime64[D]'), np.nan,
+ False, np.array(['2019-01-01', np.datetime64('NaT'),
+ '2019-01-03', np.datetime64('NaT'),
+ np.datetime64('NaT')], dtype='datetime64[D]')),
+ (np.arange('2019-01-01', '2019-01-06', dtype='datetime64[D]'),
+ np.arange('2018-01-01', '2018-01-03', dtype='datetime64[D]'),
+ False, np.array(['2019-01-01', '2018-01-01', '2019-01-03',
+ '2018-01-02', '2018-01-01'], dtype='datetime64[D]'))
+])
+def test_upcast_datetime(arr, other, exp_changed, expected):
+ # GH23823
+ mask = np.array([False, True, False, True, True])
+ result, changed = maybe_upcast_putmask(arr, mask, other)
+
+ assert changed == exp_changed
+ tm.assert_numpy_array_equal(result, expected)
| - closes #23823
- 2 tests added
- passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- whatsnew entry
Follow h-vetinari's footstep. `maybe_upcast_putmask` was left untouched in #25425. Try to fix the bug so that `maybe_upcast_putmask` can also handle `ndarray` as well as `Series`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25431 | 2019-02-24T15:04:24Z | 2019-03-22T11:51:09Z | 2019-03-22T11:51:09Z | 2019-03-22T11:51:15Z |
TST/REF: Add pytest idiom to test_frequencies.py | diff --git a/pandas/tests/tseries/frequencies/__init__.py b/pandas/tests/tseries/frequencies/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py
new file mode 100644
index 0000000000000..0aa29e451b1ba
--- /dev/null
+++ b/pandas/tests/tseries/frequencies/test_freq_code.py
@@ -0,0 +1,149 @@
+import pytest
+
+from pandas._libs.tslibs import frequencies as libfrequencies, resolution
+from pandas._libs.tslibs.frequencies import (
+ FreqGroup, _period_code_map, get_freq, get_freq_code)
+import pandas.compat as compat
+
+import pandas.tseries.offsets as offsets
+
+
+@pytest.fixture(params=list(compat.iteritems(_period_code_map)))
+def period_code_item(request):
+ return request.param
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("A", 1000), ("3A", 1000), ("-1A", 1000),
+ ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
+ ("W", 4000), ("W-MON", 4001), ("W-FRI", 4005)
+])
+def test_freq_code(freqstr, expected):
+ assert get_freq(freqstr) == expected
+
+
+def test_freq_code_match(period_code_item):
+ freqstr, code = period_code_item
+ assert get_freq(freqstr) == code
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000),
+ ("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
+ ("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000),
+ (offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),
+ ("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000),
+ (offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),
+ ("T", FreqGroup.FR_MIN),
+])
+def test_freq_group(freqstr, expected):
+ assert resolution.get_freq_group(freqstr) == expected
+
+
+def test_freq_group_match(period_code_item):
+ freqstr, code = period_code_item
+
+ str_group = resolution.get_freq_group(freqstr)
+ code_group = resolution.get_freq_group(code)
+
+ assert str_group == code_group == code // 1000 * 1000
+
+
+@pytest.mark.parametrize("freqstr,exp_freqstr", [
+ ("D", "D"), ("W", "D"), ("M", "D"),
+ ("S", "S"), ("T", "S"), ("H", "S")
+])
+def test_get_to_timestamp_base(freqstr, exp_freqstr):
+ tsb = libfrequencies.get_to_timestamp_base
+
+ assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]
+
+
+_reso = resolution.Resolution
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("A", "year"), ("Q", "quarter"), ("M", "month"),
+ ("D", "day"), ("H", "hour"), ("T", "minute"),
+ ("S", "second"), ("L", "millisecond"),
+ ("U", "microsecond"), ("N", "nanosecond")
+])
+def test_get_str_from_freq(freqstr, expected):
+ assert _reso.get_str_from_freq(freqstr) == expected
+
+
+@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H",
+ "T", "S", "L", "U", "N"])
+def test_get_freq_roundtrip(freq):
+ result = _reso.get_freq(_reso.get_str_from_freq(freq))
+ assert freq == result
+
+
+@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"])
+def test_get_freq_roundtrip2(freq):
+ result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))
+ assert freq == result
+
+
+@pytest.mark.parametrize("args,expected", [
+ ((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")),
+ ((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")),
+ ((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L"))
+])
+def test_resolution_bumping(args, expected):
+ # see gh-14378
+ assert _reso.get_stride_from_decimal(*args) == expected
+
+
+@pytest.mark.parametrize("args", [
+ (0.5, "N"),
+
+ # Too much precision in the input can prevent.
+ (0.3429324798798269273987982, "H")
+])
+def test_cat(args):
+ msg = "Could not convert to integer offset at any resolution"
+
+ with pytest.raises(ValueError, match=msg):
+ _reso.get_stride_from_decimal(*args)
+
+
+@pytest.mark.parametrize("freq_input,expected", [
+ # Frequency string.
+ ("A", (get_freq("A"), 1)),
+ ("3D", (get_freq("D"), 3)),
+ ("-2M", (get_freq("M"), -2)),
+
+ # Tuple.
+ (("D", 1), (get_freq("D"), 1)),
+ (("A", 3), (get_freq("A"), 3)),
+ (("M", -2), (get_freq("M"), -2)),
+ ((5, "T"), (FreqGroup.FR_MIN, 5)),
+
+ # Numeric Tuple.
+ ((1000, 1), (1000, 1)),
+
+ # Offsets.
+ (offsets.Day(), (get_freq("D"), 1)),
+ (offsets.Day(3), (get_freq("D"), 3)),
+ (offsets.Day(-2), (get_freq("D"), -2)),
+ (offsets.MonthEnd(), (get_freq("M"), 1)),
+ (offsets.MonthEnd(3), (get_freq("M"), 3)),
+ (offsets.MonthEnd(-2), (get_freq("M"), -2)),
+ (offsets.Week(), (get_freq("W"), 1)),
+ (offsets.Week(3), (get_freq("W"), 3)),
+ (offsets.Week(-2), (get_freq("W"), -2)),
+ (offsets.Hour(), (FreqGroup.FR_HR, 1)),
+
+ # Monday is weekday=0.
+ (offsets.Week(weekday=1), (get_freq("W-TUE"), 1)),
+ (offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)),
+ (offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)),
+])
+def test_get_freq_code(freq_input, expected):
+ assert get_freq_code(freq_input) == expected
+
+
+def test_get_code_invalid():
+ with pytest.raises(ValueError, match="Invalid frequency"):
+ get_freq_code((5, "baz"))
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
new file mode 100644
index 0000000000000..9e7ddbc45bba8
--- /dev/null
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -0,0 +1,406 @@
+from datetime import datetime, timedelta
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
+from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
+import pandas.compat as compat
+from pandas.compat import is_platform_windows, range
+
+from pandas import (
+ DatetimeIndex, Index, Series, Timestamp, date_range, period_range)
+from pandas.core.tools.datetimes import to_datetime
+import pandas.util.testing as tm
+
+import pandas.tseries.frequencies as frequencies
+import pandas.tseries.offsets as offsets
+
+
+def _check_generated_range(start, periods, freq):
+ """
+ Check the range generated from a given start, frequency, and period count.
+
+ Parameters
+ ----------
+ start : str
+ The start date.
+ periods : int
+ The number of periods.
+ freq : str
+ The frequency of the range.
+ """
+ freq = freq.upper()
+
+ gen = date_range(start, periods=periods, freq=freq)
+ index = DatetimeIndex(gen.values)
+
+ if not freq.startswith("Q-"):
+ assert frequencies.infer_freq(index) == gen.freqstr
+ else:
+ inf_freq = frequencies.infer_freq(index)
+ is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in (
+ "Q", "Q-DEC", "Q-SEP", "Q-JUN", "Q-MAR")
+ is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in (
+ "Q-NOV", "Q-AUG", "Q-MAY", "Q-FEB")
+ is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in (
+ "Q-OCT", "Q-JUL", "Q-APR", "Q-JAN")
+ assert is_dec_range or is_nov_range or is_oct_range
+
+
+@pytest.fixture(params=[(timedelta(1), "D"),
+ (timedelta(hours=1), "H"),
+ (timedelta(minutes=1), "T"),
+ (timedelta(seconds=1), "S"),
+ (np.timedelta64(1, "ns"), "N"),
+ (timedelta(microseconds=1), "U"),
+ (timedelta(microseconds=1000), "L")])
+def base_delta_code_pair(request):
+ return request.param
+
+
+@pytest.fixture(params=[1, 2, 3, 4])
+def count(request):
+ return request.param
+
+
+@pytest.fixture(params=DAYS)
+def day(request):
+ return request.param
+
+
+@pytest.fixture(params=MONTHS)
+def month(request):
+ return request.param
+
+
+@pytest.fixture(params=[5, 7])
+def periods(request):
+ return request.param
+
+
+def test_raise_if_period_index():
+ index = period_range(start="1/1/1990", periods=20, freq="M")
+ msg = "Check the `freq` attribute instead of using infer_freq"
+
+ with pytest.raises(TypeError, match=msg):
+ frequencies.infer_freq(index)
+
+
+def test_raise_if_too_few():
+ index = DatetimeIndex(["12/31/1998", "1/3/1999"])
+ msg = "Need at least 3 dates to infer frequency"
+
+ with pytest.raises(ValueError, match=msg):
+ frequencies.infer_freq(index)
+
+
+def test_business_daily():
+ index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"])
+ assert frequencies.infer_freq(index) == "B"
+
+
+def test_business_daily_look_alike():
+ # see gh-16624
+ #
+ # Do not infer "B when "weekend" (2-day gap) in wrong place.
+ index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"])
+ assert frequencies.infer_freq(index) is None
+
+
+def test_day_corner():
+ index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"])
+ assert frequencies.infer_freq(index) == "D"
+
+
+def test_non_datetime_index():
+ dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"])
+ assert frequencies.infer_freq(dates) == "D"
+
+
+def test_fifth_week_of_month_infer():
+ # see gh-9425
+ #
+ # Only attempt to infer up to WOM-4.
+ index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
+ assert frequencies.infer_freq(index) is None
+
+
+def test_week_of_month_fake():
+ # All of these dates are on same day
+ # of week and are 4 or 5 weeks apart.
+ index = DatetimeIndex(["2013-08-27", "2013-10-01",
+ "2013-10-29", "2013-11-26"])
+ assert frequencies.infer_freq(index) != "WOM-4TUE"
+
+
+def test_fifth_week_of_month():
+ # see gh-9425
+ #
+ # Only supports freq up to WOM-4.
+ msg = ("Of the four parameters: start, end, periods, "
+ "and freq, exactly three must be specified")
+
+ with pytest.raises(ValueError, match=msg):
+ date_range("2014-01-01", freq="WOM-5MON")
+
+
+def test_monthly_ambiguous():
+ rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"])
+ assert rng.inferred_freq == "M"
+
+
+def test_annual_ambiguous():
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
+ assert rng.inferred_freq == "A-JAN"
+
+
+def test_infer_freq_delta(base_delta_code_pair, count):
+ b = Timestamp(datetime.now())
+ base_delta, code = base_delta_code_pair
+
+ inc = base_delta * count
+ index = DatetimeIndex([b + inc * j for j in range(3)])
+
+ exp_freq = "%d%s" % (count, code) if count > 1 else code
+ assert frequencies.infer_freq(index) == exp_freq
+
+
+@pytest.mark.parametrize("constructor", [
+ lambda now, delta: DatetimeIndex([now + delta * 7] +
+ [now + delta * j for j in range(3)]),
+ lambda now, delta: DatetimeIndex([now + delta * j for j in range(3)] +
+ [now + delta * 7])
+])
+def test_infer_freq_custom(base_delta_code_pair, constructor):
+ b = Timestamp(datetime.now())
+ base_delta, _ = base_delta_code_pair
+
+ index = constructor(b, base_delta)
+ assert frequencies.infer_freq(index) is None
+
+
+def test_weekly_infer(periods, day):
+ _check_generated_range("1/1/2000", periods, "W-{day}".format(day=day))
+
+
+def test_week_of_month_infer(periods, day, count):
+ _check_generated_range("1/1/2000", periods,
+ "WOM-{count}{day}".format(count=count, day=day))
+
+
+@pytest.mark.parametrize("freq", ["M", "BM", "BMS"])
+def test_monthly_infer(periods, freq):
+ _check_generated_range("1/1/2000", periods, "M")
+
+
+def test_quarterly_infer(month, periods):
+ _check_generated_range("1/1/2000", periods,
+ "Q-{month}".format(month=month))
+
+
+@pytest.mark.parametrize("annual", ["A", "BA"])
+def test_annually_infer(month, periods, annual):
+ _check_generated_range("1/1/2000", periods,
+ "{annual}-{month}".format(annual=annual,
+ month=month))
+
+
+@pytest.mark.parametrize("freq,expected", [
+ ("Q", "Q-DEC"), ("Q-NOV", "Q-NOV"), ("Q-OCT", "Q-OCT")
+])
+def test_infer_freq_index(freq, expected):
+ rng = period_range("1959Q2", "2009Q3", freq=freq)
+ rng = Index(rng.to_timestamp("D", how="e").astype(object))
+
+ assert rng.inferred_freq == expected
+
+
+@pytest.mark.parametrize(
+ "expected,dates",
+ list(compat.iteritems(
+ {"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"],
+ "Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"],
+ "M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"],
+ "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"],
+ "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"],
+ "H": ["2011-12-31 22:00", "2011-12-31 23:00",
+ "2012-01-01 00:00", "2012-01-01 01:00"]}))
+)
+def test_infer_freq_tz(tz_naive_fixture, expected, dates):
+ # see gh-7310
+ tz = tz_naive_fixture
+ idx = DatetimeIndex(dates, tz=tz)
+ assert idx.inferred_freq == expected
+
+
+@pytest.mark.parametrize("date_pair", [
+ ["2013-11-02", "2013-11-5"], # Fall DST
+ ["2014-03-08", "2014-03-11"], # Spring DST
+ ["2014-01-01", "2014-01-03"] # Regular Time
+])
+@pytest.mark.parametrize("freq", [
+ "3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N"
+])
+def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq):
+ # see gh-8772
+ tz = tz_naive_fixture
+ idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
+ assert idx.inferred_freq == freq
+
+
+def test_infer_freq_tz_transition_custom():
+ index = date_range("2013-11-03", periods=5,
+ freq="3H").tz_localize("America/Chicago")
+ assert index.inferred_freq is None
+
+
+@pytest.mark.parametrize("data,expected", [
+ # Hourly freq in a day must result in "H"
+ (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00",
+ "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00"], "H"),
+
+ (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00",
+ "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00",
+ "2014-07-01 15:00", "2014-07-01 16:00", "2014-07-02 09:00",
+ "2014-07-02 10:00", "2014-07-02 11:00"], "BH"),
+ (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00",
+ "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00",
+ "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00",
+ "2014-07-07 10:00", "2014-07-07 11:00"], "BH"),
+ (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00",
+ "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00",
+ "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00",
+ "2014-07-07 10:00", "2014-07-07 11:00", "2014-07-07 12:00",
+ "2014-07-07 13:00", "2014-07-07 14:00", "2014-07-07 15:00",
+ "2014-07-07 16:00", "2014-07-08 09:00", "2014-07-08 10:00",
+ "2014-07-08 11:00", "2014-07-08 12:00", "2014-07-08 13:00",
+ "2014-07-08 14:00", "2014-07-08 15:00", "2014-07-08 16:00"], "BH"),
+])
+def test_infer_freq_business_hour(data, expected):
+ # see gh-7905
+ idx = DatetimeIndex(data)
+ assert idx.inferred_freq == expected
+
+
+def test_not_monotonic():
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
+ rng = rng[::-1]
+
+ assert rng.inferred_freq == "-1A-JAN"
+
+
+def test_non_datetime_index2():
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
+ vals = rng.to_pydatetime()
+
+ result = frequencies.infer_freq(vals)
+ assert result == rng.inferred_freq
+
+
+@pytest.mark.parametrize("idx", [
+ tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10)
+])
+def test_invalid_index_types(idx):
+ msg = ("(cannot infer freq from a non-convertible)|"
+ "(Check the `freq` attribute instead of using infer_freq)")
+
+ with pytest.raises(TypeError, match=msg):
+ frequencies.infer_freq(idx)
+
+
+@pytest.mark.skipif(is_platform_windows(),
+ reason="see gh-10822: Windows issue")
+@pytest.mark.parametrize("idx", [tm.makeStringIndex(10),
+ tm.makeUnicodeIndex(10)])
+def test_invalid_index_types_unicode(idx):
+ # see gh-10822
+ #
+ # Odd error message on conversions to datetime for unicode.
+ msg = "Unknown string format"
+
+ with pytest.raises(ValueError, match=msg):
+ frequencies.infer_freq(idx)
+
+
+def test_string_datetime_like_compat():
+ # see gh-6463
+ data = ["2004-01", "2004-02", "2004-03", "2004-04"]
+
+ expected = frequencies.infer_freq(data)
+ result = frequencies.infer_freq(Index(data))
+
+ assert result == expected
+
+
+def test_series():
+ # see gh-6407
+ s = Series(date_range("20130101", "20130110"))
+ inferred = frequencies.infer_freq(s)
+ assert inferred == "D"
+
+
+@pytest.mark.parametrize("end", [10, 10.])
+def test_series_invalid_type(end):
+ # see gh-6407
+ msg = "cannot infer freq from a non-convertible dtype on a Series"
+ s = Series(np.arange(end))
+
+ with pytest.raises(TypeError, match=msg):
+ frequencies.infer_freq(s)
+
+
+def test_series_inconvertible_string():
+ # see gh-6407
+ msg = "Unknown string format"
+
+ with pytest.raises(ValueError, match=msg):
+ frequencies.infer_freq(Series(["foo", "bar"]))
+
+
+@pytest.mark.parametrize("freq", [None, "L"])
+def test_series_period_index(freq):
+ # see gh-6407
+ #
+ # Cannot infer on PeriodIndex
+ msg = "cannot infer freq from a non-convertible dtype on a Series"
+ s = Series(period_range("2013", periods=10, freq=freq))
+
+ with pytest.raises(TypeError, match=msg):
+ frequencies.infer_freq(s)
+
+
+@pytest.mark.parametrize("freq", ["M", "L", "S"])
+def test_series_datetime_index(freq):
+ s = Series(date_range("20130101", periods=10, freq=freq))
+ inferred = frequencies.infer_freq(s)
+ assert inferred == freq
+
+
+@pytest.mark.parametrize("offset_func", [
+ frequencies.get_offset,
+ lambda freq: date_range("2011-01-01", periods=5, freq=freq)
+])
+@pytest.mark.parametrize("freq", [
+ "WEEKDAY", "EOM", "W@MON", "W@TUE", "W@WED", "W@THU",
+ "W@FRI", "W@SAT", "W@SUN", "Q@JAN", "Q@FEB", "Q@MAR",
+ "A@JAN", "A@FEB", "A@MAR", "A@APR", "A@MAY", "A@JUN",
+ "A@JUL", "A@AUG", "A@SEP", "A@OCT", "A@NOV", "A@DEC",
+ "Y@JAN", "WOM@1MON", "WOM@2MON", "WOM@3MON",
+ "WOM@4MON", "WOM@1TUE", "WOM@2TUE", "WOM@3TUE",
+ "WOM@4TUE", "WOM@1WED", "WOM@2WED", "WOM@3WED",
+ "WOM@4WED", "WOM@1THU", "WOM@2THU", "WOM@3THU",
+ "WOM@4THU", "WOM@1FRI", "WOM@2FRI", "WOM@3FRI",
+ "WOM@4FRI"
+])
+def test_legacy_offset_warnings(offset_func, freq):
+ with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
+ offset_func(freq)
+
+
+def test_ms_vs_capital_ms():
+ left = frequencies.get_offset("ms")
+ right = frequencies.get_offset("MS")
+
+ assert left == offsets.Milli()
+ assert right == offsets.MonthBegin()
diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py
new file mode 100644
index 0000000000000..c9c35b47f3475
--- /dev/null
+++ b/pandas/tests/tseries/frequencies/test_to_offset.py
@@ -0,0 +1,146 @@
+import re
+
+import pytest
+
+from pandas import Timedelta
+
+import pandas.tseries.frequencies as frequencies
+import pandas.tseries.offsets as offsets
+
+
+@pytest.mark.parametrize("freq_input,expected", [
+ (frequencies.to_offset("10us"), offsets.Micro(10)),
+ (offsets.Hour(), offsets.Hour()),
+ ((5, "T"), offsets.Minute(5)),
+ ("2h30min", offsets.Minute(150)),
+ ("2h 30min", offsets.Minute(150)),
+ ("2h30min15s", offsets.Second(150 * 60 + 15)),
+ ("2h 60min", offsets.Hour(3)),
+ ("2h 20.5min", offsets.Second(8430)),
+ ("1.5min", offsets.Second(90)),
+ ("0.5S", offsets.Milli(500)),
+ ("15l500u", offsets.Micro(15500)),
+ ("10s75L", offsets.Milli(10075)),
+ ("1s0.25ms", offsets.Micro(1000250)),
+ ("1s0.25L", offsets.Micro(1000250)),
+ ("2800N", offsets.Nano(2800)),
+ ("2SM", offsets.SemiMonthEnd(2)),
+ ("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)),
+ ("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),
+ ("2SMS-15", offsets.SemiMonthBegin(2)),
+])
+def test_to_offset(freq_input, expected):
+ result = frequencies.to_offset(freq_input)
+ assert result == expected
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("-1S", -1),
+ ("-2SM", -2),
+ ("-1SMS", -1),
+ ("-5min10s", -310),
+])
+def test_to_offset_negative(freqstr, expected):
+ result = frequencies.to_offset(freqstr)
+ assert result.n == expected
+
+
+@pytest.mark.parametrize("freqstr", [
+ "2h20m", "U1", "-U", "3U1", "-2-3U", "-2D:3H",
+ "1.5.0S", "2SMS-15-15", "2SMS-15D", "100foo",
+
+ # Invalid leading +/- signs.
+ "+-1d", "-+1h", "+1", "-7", "+d", "-m",
+
+ # Invalid shortcut anchors.
+ "SM-0", "SM-28", "SM-29", "SM-FOO", "BSM", "SM--1", "SMS-1",
+ "SMS-28", "SMS-30", "SMS-BAR", "SMS-BYR", "BSMS", "SMS--2"
+])
+def test_to_offset_invalid(freqstr):
+ # see gh-13930
+
+ # We escape string because some of our
+ # inputs contain regex special characters.
+ msg = re.escape("Invalid frequency: {freqstr}".format(freqstr=freqstr))
+ with pytest.raises(ValueError, match=msg):
+ frequencies.to_offset(freqstr)
+
+
+def test_to_offset_no_evaluate():
+ with pytest.raises(ValueError, match="Could not evaluate"):
+ frequencies.to_offset(("", ""))
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("2D 3H", offsets.Hour(51)),
+ ("2 D3 H", offsets.Hour(51)),
+ ("2 D 3 H", offsets.Hour(51)),
+ (" 2 D 3 H ", offsets.Hour(51)),
+ (" H ", offsets.Hour()),
+ (" 3 H ", offsets.Hour(3)),
+])
+def test_to_offset_whitespace(freqstr, expected):
+ result = frequencies.to_offset(freqstr)
+ assert result == expected
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("00H 00T 01S", 1),
+ ("-00H 03T 14S", -194),
+])
+def test_to_offset_leading_zero(freqstr, expected):
+ result = frequencies.to_offset(freqstr)
+ assert result.n == expected
+
+
+@pytest.mark.parametrize("freqstr,expected", [
+ ("+1d", 1),
+ ("+2h30min", 150),
+])
+def test_to_offset_leading_plus(freqstr, expected):
+ result = frequencies.to_offset(freqstr)
+ assert result.n == expected
+
+
+@pytest.mark.parametrize("kwargs,expected", [
+ (dict(days=1, seconds=1), offsets.Second(86401)),
+ (dict(days=-1, seconds=1), offsets.Second(-86399)),
+ (dict(hours=1, minutes=10), offsets.Minute(70)),
+ (dict(hours=1, minutes=-10), offsets.Minute(50)),
+ (dict(weeks=1), offsets.Day(7)),
+ (dict(hours=1), offsets.Hour(1)),
+ (dict(hours=1), frequencies.to_offset("60min")),
+ (dict(microseconds=1), offsets.Micro(1))
+])
+def test_to_offset_pd_timedelta(kwargs, expected):
+ # see gh-9064
+ td = Timedelta(**kwargs)
+ result = frequencies.to_offset(td)
+ assert result == expected
+
+
+def test_to_offset_pd_timedelta_invalid():
+ # see gh-9064
+ msg = "Invalid frequency: 0 days 00:00:00"
+ td = Timedelta(microseconds=0)
+
+ with pytest.raises(ValueError, match=msg):
+ frequencies.to_offset(td)
+
+
+@pytest.mark.parametrize("shortcut,expected", [
+ ("W", offsets.Week(weekday=6)),
+ ("W-SUN", offsets.Week(weekday=6)),
+ ("Q", offsets.QuarterEnd(startingMonth=12)),
+ ("Q-DEC", offsets.QuarterEnd(startingMonth=12)),
+ ("Q-MAY", offsets.QuarterEnd(startingMonth=5)),
+ ("SM", offsets.SemiMonthEnd(day_of_month=15)),
+ ("SM-15", offsets.SemiMonthEnd(day_of_month=15)),
+ ("SM-1", offsets.SemiMonthEnd(day_of_month=1)),
+ ("SM-27", offsets.SemiMonthEnd(day_of_month=27)),
+ ("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),
+ ("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),
+])
+def test_anchored_shortcuts(shortcut, expected):
+ result = frequencies.to_offset(shortcut)
+ assert result == expected
diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py
deleted file mode 100644
index eb4e63654b47b..0000000000000
--- a/pandas/tests/tseries/test_frequencies.py
+++ /dev/null
@@ -1,793 +0,0 @@
-from datetime import datetime, timedelta
-
-import numpy as np
-import pytest
-
-from pandas._libs.tslibs import frequencies as libfrequencies, resolution
-from pandas._libs.tslibs.ccalendar import MONTHS
-from pandas._libs.tslibs.frequencies import (
- INVALID_FREQ_ERR_MSG, FreqGroup, _period_code_map, get_freq, get_freq_code)
-import pandas.compat as compat
-from pandas.compat import is_platform_windows, range
-
-from pandas import (
- DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range,
- period_range)
-from pandas.core.tools.datetimes import to_datetime
-import pandas.util.testing as tm
-
-import pandas.tseries.frequencies as frequencies
-import pandas.tseries.offsets as offsets
-
-
-class TestToOffset(object):
-
- def test_to_offset_multiple(self):
- freqstr = '2h30min'
- freqstr2 = '2h 30min'
-
- result = frequencies.to_offset(freqstr)
- assert (result == frequencies.to_offset(freqstr2))
- expected = offsets.Minute(150)
- assert (result == expected)
-
- freqstr = '2h30min15s'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Second(150 * 60 + 15)
- assert (result == expected)
-
- freqstr = '2h 60min'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Hour(3)
- assert (result == expected)
-
- freqstr = '2h 20.5min'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Second(8430)
- assert (result == expected)
-
- freqstr = '1.5min'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Second(90)
- assert (result == expected)
-
- freqstr = '0.5S'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Milli(500)
- assert (result == expected)
-
- freqstr = '15l500u'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Micro(15500)
- assert (result == expected)
-
- freqstr = '10s75L'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Milli(10075)
- assert (result == expected)
-
- freqstr = '1s0.25ms'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Micro(1000250)
- assert (result == expected)
-
- freqstr = '1s0.25L'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Micro(1000250)
- assert (result == expected)
-
- freqstr = '2800N'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Nano(2800)
- assert (result == expected)
-
- freqstr = '2SM'
- result = frequencies.to_offset(freqstr)
- expected = offsets.SemiMonthEnd(2)
- assert (result == expected)
-
- freqstr = '2SM-16'
- result = frequencies.to_offset(freqstr)
- expected = offsets.SemiMonthEnd(2, day_of_month=16)
- assert (result == expected)
-
- freqstr = '2SMS-14'
- result = frequencies.to_offset(freqstr)
- expected = offsets.SemiMonthBegin(2, day_of_month=14)
- assert (result == expected)
-
- freqstr = '2SMS-15'
- result = frequencies.to_offset(freqstr)
- expected = offsets.SemiMonthBegin(2)
- assert (result == expected)
-
- # malformed
- with pytest.raises(ValueError, match='Invalid frequency: 2h20m'):
- frequencies.to_offset('2h20m')
-
- def test_to_offset_negative(self):
- freqstr = '-1S'
- result = frequencies.to_offset(freqstr)
- assert (result.n == -1)
-
- freqstr = '-5min10s'
- result = frequencies.to_offset(freqstr)
- assert (result.n == -310)
-
- freqstr = '-2SM'
- result = frequencies.to_offset(freqstr)
- assert (result.n == -2)
-
- freqstr = '-1SMS'
- result = frequencies.to_offset(freqstr)
- assert (result.n == -1)
-
- def test_to_offset_invalid(self):
- # GH 13930
- with pytest.raises(ValueError, match='Invalid frequency: U1'):
- frequencies.to_offset('U1')
- with pytest.raises(ValueError, match='Invalid frequency: -U'):
- frequencies.to_offset('-U')
- with pytest.raises(ValueError, match='Invalid frequency: 3U1'):
- frequencies.to_offset('3U1')
- with pytest.raises(ValueError, match='Invalid frequency: -2-3U'):
- frequencies.to_offset('-2-3U')
- with pytest.raises(ValueError, match='Invalid frequency: -2D:3H'):
- frequencies.to_offset('-2D:3H')
- with pytest.raises(ValueError, match='Invalid frequency: 1.5.0S'):
- frequencies.to_offset('1.5.0S')
-
- # split offsets with spaces are valid
- assert frequencies.to_offset('2D 3H') == offsets.Hour(51)
- assert frequencies.to_offset('2 D3 H') == offsets.Hour(51)
- assert frequencies.to_offset('2 D 3 H') == offsets.Hour(51)
- assert frequencies.to_offset(' 2 D 3 H ') == offsets.Hour(51)
- assert frequencies.to_offset(' H ') == offsets.Hour()
- assert frequencies.to_offset(' 3 H ') == offsets.Hour(3)
-
- # special cases
- assert frequencies.to_offset('2SMS-15') == offsets.SemiMonthBegin(2)
- with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15-15'):
- frequencies.to_offset('2SMS-15-15')
- with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15D'):
- frequencies.to_offset('2SMS-15D')
-
- def test_to_offset_leading_zero(self):
- freqstr = '00H 00T 01S'
- result = frequencies.to_offset(freqstr)
- assert (result.n == 1)
-
- freqstr = '-00H 03T 14S'
- result = frequencies.to_offset(freqstr)
- assert (result.n == -194)
-
- def test_to_offset_leading_plus(self):
- freqstr = '+1d'
- result = frequencies.to_offset(freqstr)
- assert (result.n == 1)
-
- freqstr = '+2h30min'
- result = frequencies.to_offset(freqstr)
- assert (result.n == 150)
-
- for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']:
- with pytest.raises(ValueError, match='Invalid frequency:'):
- frequencies.to_offset(bad_freq)
-
- def test_to_offset_pd_timedelta(self):
- # Tests for #9064
- td = Timedelta(days=1, seconds=1)
- result = frequencies.to_offset(td)
- expected = offsets.Second(86401)
- assert (expected == result)
-
- td = Timedelta(days=-1, seconds=1)
- result = frequencies.to_offset(td)
- expected = offsets.Second(-86399)
- assert (expected == result)
-
- td = Timedelta(hours=1, minutes=10)
- result = frequencies.to_offset(td)
- expected = offsets.Minute(70)
- assert (expected == result)
-
- td = Timedelta(hours=1, minutes=-10)
- result = frequencies.to_offset(td)
- expected = offsets.Minute(50)
- assert (expected == result)
-
- td = Timedelta(weeks=1)
- result = frequencies.to_offset(td)
- expected = offsets.Day(7)
- assert (expected == result)
-
- td1 = Timedelta(hours=1)
- result1 = frequencies.to_offset(td1)
- result2 = frequencies.to_offset('60min')
- assert (result1 == result2)
-
- td = Timedelta(microseconds=1)
- result = frequencies.to_offset(td)
- expected = offsets.Micro(1)
- assert (expected == result)
-
- td = Timedelta(microseconds=0)
- pytest.raises(ValueError, lambda: frequencies.to_offset(td))
-
- def test_anchored_shortcuts(self):
- result = frequencies.to_offset('W')
- expected = frequencies.to_offset('W-SUN')
- assert (result == expected)
-
- result1 = frequencies.to_offset('Q')
- result2 = frequencies.to_offset('Q-DEC')
- expected = offsets.QuarterEnd(startingMonth=12)
- assert (result1 == expected)
- assert (result2 == expected)
-
- result1 = frequencies.to_offset('Q-MAY')
- expected = offsets.QuarterEnd(startingMonth=5)
- assert (result1 == expected)
-
- result1 = frequencies.to_offset('SM')
- result2 = frequencies.to_offset('SM-15')
- expected = offsets.SemiMonthEnd(day_of_month=15)
- assert (result1 == expected)
- assert (result2 == expected)
-
- result = frequencies.to_offset('SM-1')
- expected = offsets.SemiMonthEnd(day_of_month=1)
- assert (result == expected)
-
- result = frequencies.to_offset('SM-27')
- expected = offsets.SemiMonthEnd(day_of_month=27)
- assert (result == expected)
-
- result = frequencies.to_offset('SMS-2')
- expected = offsets.SemiMonthBegin(day_of_month=2)
- assert (result == expected)
-
- result = frequencies.to_offset('SMS-27')
- expected = offsets.SemiMonthBegin(day_of_month=27)
- assert (result == expected)
-
- # ensure invalid cases fail as expected
- invalid_anchors = ['SM-0', 'SM-28', 'SM-29',
- 'SM-FOO', 'BSM', 'SM--1',
- 'SMS-1', 'SMS-28', 'SMS-30',
- 'SMS-BAR', 'SMS-BYR' 'BSMS',
- 'SMS--2']
- for invalid_anchor in invalid_anchors:
- with pytest.raises(ValueError, match='Invalid frequency: '):
- frequencies.to_offset(invalid_anchor)
-
-
-def test_ms_vs_MS():
- left = frequencies.get_offset('ms')
- right = frequencies.get_offset('MS')
- assert left == offsets.Milli()
- assert right == offsets.MonthBegin()
-
-
-def test_rule_aliases():
- rule = frequencies.to_offset('10us')
- assert rule == offsets.Micro(10)
-
-
-class TestFrequencyCode(object):
-
- def test_freq_code(self):
- assert get_freq('A') == 1000
- assert get_freq('3A') == 1000
- assert get_freq('-1A') == 1000
-
- assert get_freq('Y') == 1000
- assert get_freq('3Y') == 1000
- assert get_freq('-1Y') == 1000
-
- assert get_freq('W') == 4000
- assert get_freq('W-MON') == 4001
- assert get_freq('W-FRI') == 4005
-
- for freqstr, code in compat.iteritems(_period_code_map):
- result = get_freq(freqstr)
- assert result == code
-
- result = resolution.get_freq_group(freqstr)
- assert result == code // 1000 * 1000
-
- result = resolution.get_freq_group(code)
- assert result == code // 1000 * 1000
-
- def test_freq_group(self):
- assert resolution.get_freq_group('A') == 1000
- assert resolution.get_freq_group('3A') == 1000
- assert resolution.get_freq_group('-1A') == 1000
- assert resolution.get_freq_group('A-JAN') == 1000
- assert resolution.get_freq_group('A-MAY') == 1000
-
- assert resolution.get_freq_group('Y') == 1000
- assert resolution.get_freq_group('3Y') == 1000
- assert resolution.get_freq_group('-1Y') == 1000
- assert resolution.get_freq_group('Y-JAN') == 1000
- assert resolution.get_freq_group('Y-MAY') == 1000
-
- assert resolution.get_freq_group(offsets.YearEnd()) == 1000
- assert resolution.get_freq_group(offsets.YearEnd(month=1)) == 1000
- assert resolution.get_freq_group(offsets.YearEnd(month=5)) == 1000
-
- assert resolution.get_freq_group('W') == 4000
- assert resolution.get_freq_group('W-MON') == 4000
- assert resolution.get_freq_group('W-FRI') == 4000
- assert resolution.get_freq_group(offsets.Week()) == 4000
- assert resolution.get_freq_group(offsets.Week(weekday=1)) == 4000
- assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000
-
- def test_get_to_timestamp_base(self):
- tsb = libfrequencies.get_to_timestamp_base
-
- assert (tsb(get_freq_code('D')[0]) ==
- get_freq_code('D')[0])
- assert (tsb(get_freq_code('W')[0]) ==
- get_freq_code('D')[0])
- assert (tsb(get_freq_code('M')[0]) ==
- get_freq_code('D')[0])
-
- assert (tsb(get_freq_code('S')[0]) ==
- get_freq_code('S')[0])
- assert (tsb(get_freq_code('T')[0]) ==
- get_freq_code('S')[0])
- assert (tsb(get_freq_code('H')[0]) ==
- get_freq_code('S')[0])
-
- def test_freq_to_reso(self):
- Reso = resolution.Resolution
-
- assert Reso.get_str_from_freq('A') == 'year'
- assert Reso.get_str_from_freq('Q') == 'quarter'
- assert Reso.get_str_from_freq('M') == 'month'
- assert Reso.get_str_from_freq('D') == 'day'
- assert Reso.get_str_from_freq('H') == 'hour'
- assert Reso.get_str_from_freq('T') == 'minute'
- assert Reso.get_str_from_freq('S') == 'second'
- assert Reso.get_str_from_freq('L') == 'millisecond'
- assert Reso.get_str_from_freq('U') == 'microsecond'
- assert Reso.get_str_from_freq('N') == 'nanosecond'
-
- for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
- # check roundtrip
- result = Reso.get_freq(Reso.get_str_from_freq(freq))
- assert freq == result
-
- for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
- result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
- assert freq == result
-
- def test_resolution_bumping(self):
- # see gh-14378
- Reso = resolution.Resolution
-
- assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S')
- assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S')
- assert Reso.get_stride_from_decimal(1.04, 'H') == (3744, 'S')
- assert Reso.get_stride_from_decimal(1, 'D') == (1, 'D')
- assert (Reso.get_stride_from_decimal(0.342931, 'H') ==
- (1234551600, 'U'))
- assert Reso.get_stride_from_decimal(1.2345, 'D') == (106660800, 'L')
-
- with pytest.raises(ValueError):
- Reso.get_stride_from_decimal(0.5, 'N')
-
- # too much precision in the input can prevent
- with pytest.raises(ValueError):
- Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H')
-
- def test_get_freq_code(self):
- # frequency str
- assert (get_freq_code('A') ==
- (get_freq('A'), 1))
- assert (get_freq_code('3D') ==
- (get_freq('D'), 3))
- assert (get_freq_code('-2M') ==
- (get_freq('M'), -2))
-
- # tuple
- assert (get_freq_code(('D', 1)) ==
- (get_freq('D'), 1))
- assert (get_freq_code(('A', 3)) ==
- (get_freq('A'), 3))
- assert (get_freq_code(('M', -2)) ==
- (get_freq('M'), -2))
-
- # numeric tuple
- assert get_freq_code((1000, 1)) == (1000, 1)
-
- # offsets
- assert (get_freq_code(offsets.Day()) ==
- (get_freq('D'), 1))
- assert (get_freq_code(offsets.Day(3)) ==
- (get_freq('D'), 3))
- assert (get_freq_code(offsets.Day(-2)) ==
- (get_freq('D'), -2))
-
- assert (get_freq_code(offsets.MonthEnd()) ==
- (get_freq('M'), 1))
- assert (get_freq_code(offsets.MonthEnd(3)) ==
- (get_freq('M'), 3))
- assert (get_freq_code(offsets.MonthEnd(-2)) ==
- (get_freq('M'), -2))
-
- assert (get_freq_code(offsets.Week()) ==
- (get_freq('W'), 1))
- assert (get_freq_code(offsets.Week(3)) ==
- (get_freq('W'), 3))
- assert (get_freq_code(offsets.Week(-2)) ==
- (get_freq('W'), -2))
-
- # Monday is weekday=0
- assert (get_freq_code(offsets.Week(weekday=1)) ==
- (get_freq('W-TUE'), 1))
- assert (get_freq_code(offsets.Week(3, weekday=0)) ==
- (get_freq('W-MON'), 3))
- assert (get_freq_code(offsets.Week(-2, weekday=4)) ==
- (get_freq('W-FRI'), -2))
-
- def test_frequency_misc(self):
- assert (resolution.get_freq_group('T') ==
- FreqGroup.FR_MIN)
-
- code, stride = get_freq_code(offsets.Hour())
- assert code == FreqGroup.FR_HR
-
- code, stride = get_freq_code((5, 'T'))
- assert code == FreqGroup.FR_MIN
- assert stride == 5
-
- offset = offsets.Hour()
- result = frequencies.to_offset(offset)
- assert result == offset
-
- result = frequencies.to_offset((5, 'T'))
- expected = offsets.Minute(5)
- assert result == expected
-
- with pytest.raises(ValueError, match='Invalid frequency'):
- get_freq_code((5, 'baz'))
-
- with pytest.raises(ValueError, match='Invalid frequency'):
- frequencies.to_offset('100foo')
-
- with pytest.raises(ValueError, match='Could not evaluate'):
- frequencies.to_offset(('', ''))
-
-
-_dti = DatetimeIndex
-
-
-class TestFrequencyInference(object):
-
- def test_raise_if_period_index(self):
- index = period_range(start="1/1/1990", periods=20, freq="M")
- pytest.raises(TypeError, frequencies.infer_freq, index)
-
- def test_raise_if_too_few(self):
- index = _dti(['12/31/1998', '1/3/1999'])
- pytest.raises(ValueError, frequencies.infer_freq, index)
-
- def test_business_daily(self):
- index = _dti(['01/01/1999', '1/4/1999', '1/5/1999'])
- assert frequencies.infer_freq(index) == 'B'
-
- def test_business_daily_look_alike(self):
- # GH 16624, do not infer 'B' when 'weekend' (2-day gap) in wrong place
- index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
- assert frequencies.infer_freq(index) is None
-
- def test_day(self):
- self._check_tick(timedelta(1), 'D')
-
- def test_day_corner(self):
- index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
- assert frequencies.infer_freq(index) == 'D'
-
- def test_non_datetimeindex(self):
- dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
- assert frequencies.infer_freq(dates) == 'D'
-
- def test_hour(self):
- self._check_tick(timedelta(hours=1), 'H')
-
- def test_minute(self):
- self._check_tick(timedelta(minutes=1), 'T')
-
- def test_second(self):
- self._check_tick(timedelta(seconds=1), 'S')
-
- def test_millisecond(self):
- self._check_tick(timedelta(microseconds=1000), 'L')
-
- def test_microsecond(self):
- self._check_tick(timedelta(microseconds=1), 'U')
-
- def test_nanosecond(self):
- self._check_tick(np.timedelta64(1, 'ns'), 'N')
-
- def _check_tick(self, base_delta, code):
- b = Timestamp(datetime.now())
- for i in range(1, 5):
- inc = base_delta * i
- index = _dti([b + inc * j for j in range(3)])
- if i > 1:
- exp_freq = '%d%s' % (i, code)
- else:
- exp_freq = code
- assert frequencies.infer_freq(index) == exp_freq
-
- index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range(
- 3)])
- assert frequencies.infer_freq(index) is None
-
- index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta *
- 7])
-
- assert frequencies.infer_freq(index) is None
-
- def test_weekly(self):
- days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
-
- for day in days:
- self._check_generated_range('1/1/2000', 'W-%s' % day)
-
- def test_week_of_month(self):
- days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
-
- for day in days:
- for i in range(1, 5):
- self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
-
- def test_fifth_week_of_month(self):
- # Only supports freq up to WOM-4. See #9425
- func = lambda: date_range('2014-01-01', freq='WOM-5MON')
- pytest.raises(ValueError, func)
-
- def test_fifth_week_of_month_infer(self):
- # Only attempts to infer up to WOM-4. See #9425
- index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
- assert frequencies.infer_freq(index) is None
-
- def test_week_of_month_fake(self):
- # All of these dates are on same day of week and are 4 or 5 weeks apart
- index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29",
- "2013-11-26"])
- assert frequencies.infer_freq(index) != 'WOM-4TUE'
-
- def test_monthly(self):
- self._check_generated_range('1/1/2000', 'M')
-
- def test_monthly_ambiguous(self):
- rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
- assert rng.inferred_freq == 'M'
-
- def test_business_monthly(self):
- self._check_generated_range('1/1/2000', 'BM')
-
- def test_business_start_monthly(self):
- self._check_generated_range('1/1/2000', 'BMS')
-
- def test_quarterly(self):
- for month in ['JAN', 'FEB', 'MAR']:
- self._check_generated_range('1/1/2000', 'Q-%s' % month)
-
- def test_annual(self):
- for month in MONTHS:
- self._check_generated_range('1/1/2000', 'A-%s' % month)
-
- def test_business_annual(self):
- for month in MONTHS:
- self._check_generated_range('1/1/2000', 'BA-%s' % month)
-
- def test_annual_ambiguous(self):
- rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
- assert rng.inferred_freq == 'A-JAN'
-
- def _check_generated_range(self, start, freq):
- freq = freq.upper()
-
- gen = date_range(start, periods=7, freq=freq)
- index = _dti(gen.values)
- if not freq.startswith('Q-'):
- assert frequencies.infer_freq(index) == gen.freqstr
- else:
- inf_freq = frequencies.infer_freq(index)
- is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
- 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
- is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
- 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
- is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
- 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
- assert is_dec_range or is_nov_range or is_oct_range
-
- gen = date_range(start, periods=5, freq=freq)
- index = _dti(gen.values)
-
- if not freq.startswith('Q-'):
- assert frequencies.infer_freq(index) == gen.freqstr
- else:
- inf_freq = frequencies.infer_freq(index)
- is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
- 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
- is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
- 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
- is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
- 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
-
- assert is_dec_range or is_nov_range or is_oct_range
-
- def test_infer_freq(self):
- rng = period_range('1959Q2', '2009Q3', freq='Q')
- rng = Index(rng.to_timestamp('D', how='e').astype(object))
- assert rng.inferred_freq == 'Q-DEC'
-
- rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
- rng = Index(rng.to_timestamp('D', how='e').astype(object))
- assert rng.inferred_freq == 'Q-NOV'
-
- rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
- rng = Index(rng.to_timestamp('D', how='e').astype(object))
- assert rng.inferred_freq == 'Q-OCT'
-
- def test_infer_freq_tz(self):
-
- freqs = {'AS-JAN':
- ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
- 'Q-OCT':
- ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
- 'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
- 'W-SAT':
- ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
- 'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
- 'H': ['2011-12-31 22:00', '2011-12-31 23:00',
- '2012-01-01 00:00', '2012-01-01 01:00']}
-
- # GH 7310
- for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
- 'US/Pacific', 'US/Eastern']:
- for expected, dates in compat.iteritems(freqs):
- idx = DatetimeIndex(dates, tz=tz)
- assert idx.inferred_freq == expected
-
- def test_infer_freq_tz_transition(self):
- # Tests for #8772
- date_pairs = [['2013-11-02', '2013-11-5'], # Fall DST
- ['2014-03-08', '2014-03-11'], # Spring DST
- ['2014-01-01', '2014-01-03']] # Regular Time
- freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U',
- '3600000000001N']
-
- for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
- 'US/Pacific', 'US/Eastern']:
- for date_pair in date_pairs:
- for freq in freqs:
- idx = date_range(date_pair[0], date_pair[
- 1], freq=freq, tz=tz)
- assert idx.inferred_freq == freq
-
- index = date_range("2013-11-03", periods=5,
- freq="3H").tz_localize("America/Chicago")
- assert index.inferred_freq is None
-
- def test_infer_freq_businesshour(self):
- # GH 7905
- idx = DatetimeIndex(
- ['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
- '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
- # hourly freq in a day must result in 'H'
- assert idx.inferred_freq == 'H'
-
- idx = DatetimeIndex(
- ['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
- '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
- '2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00',
- '2014-07-02 10:00', '2014-07-02 11:00'])
- assert idx.inferred_freq == 'BH'
-
- idx = DatetimeIndex(
- ['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
- '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
- '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
- '2014-07-07 10:00', '2014-07-07 11:00'])
- assert idx.inferred_freq == 'BH'
-
- idx = DatetimeIndex(
- ['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
- '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
- '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
- '2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
- '2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
- '2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00',
- '2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00',
- '2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00'])
- assert idx.inferred_freq == 'BH'
-
- def test_not_monotonic(self):
- rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
- rng = rng[::-1]
- assert rng.inferred_freq == '-1A-JAN'
-
- def test_non_datetimeindex2(self):
- rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
-
- vals = rng.to_pydatetime()
-
- result = frequencies.infer_freq(vals)
- assert result == rng.inferred_freq
-
- def test_invalid_index_types(self):
-
- # test all index types
- for i in [tm.makeIntIndex(10), tm.makeFloatIndex(10),
- tm.makePeriodIndex(10)]:
- pytest.raises(TypeError, lambda: frequencies.infer_freq(i))
-
- # GH 10822
- # odd error message on conversions to datetime for unicode
- if not is_platform_windows():
- for i in [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]:
- pytest.raises(ValueError, lambda: frequencies.infer_freq(i))
-
- def test_string_datetimelike_compat(self):
-
- # GH 6463
- expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03',
- '2004-04'])
- result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03',
- '2004-04']))
- assert result == expected
-
- def test_series(self):
-
- # GH6407
- # inferring series
-
- # invalid type of Series
- for s in [Series(np.arange(10)), Series(np.arange(10.))]:
- pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
-
- # a non-convertible string
- pytest.raises(ValueError, lambda: frequencies.infer_freq(
- Series(['foo', 'bar'])))
-
- # cannot infer on PeriodIndex
- for freq in [None, 'L']:
- s = Series(period_range('2013', periods=10, freq=freq))
- pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
-
- # DateTimeIndex
- for freq in ['M', 'L', 'S']:
- s = Series(date_range('20130101', periods=10, freq=freq))
- inferred = frequencies.infer_freq(s)
- assert inferred == freq
-
- s = Series(date_range('20130101', '20130110'))
- inferred = frequencies.infer_freq(s)
- assert inferred == 'D'
-
- def test_legacy_offset_warnings(self):
- freqs = ['WEEKDAY', 'EOM', 'W@MON', 'W@TUE', 'W@WED', 'W@THU',
- 'W@FRI', 'W@SAT', 'W@SUN', 'Q@JAN', 'Q@FEB', 'Q@MAR',
- 'A@JAN', 'A@FEB', 'A@MAR', 'A@APR', 'A@MAY', 'A@JUN',
- 'A@JUL', 'A@AUG', 'A@SEP', 'A@OCT', 'A@NOV', 'A@DEC',
- 'Y@JAN', 'WOM@1MON', 'WOM@2MON', 'WOM@3MON',
- 'WOM@4MON', 'WOM@1TUE', 'WOM@2TUE', 'WOM@3TUE',
- 'WOM@4TUE', 'WOM@1WED', 'WOM@2WED', 'WOM@3WED',
- 'WOM@4WED', 'WOM@1THU', 'WOM@2THU', 'WOM@3THU',
- 'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI',
- 'WOM@4FRI']
-
- msg = INVALID_FREQ_ERR_MSG
- for freq in freqs:
- with pytest.raises(ValueError, match=msg):
- frequencies.get_offset(freq)
-
- with pytest.raises(ValueError, match=msg):
- date_range('2011-01-01', periods=5, freq=freq)
| https://api.github.com/repos/pandas-dev/pandas/pulls/25430 | 2019-02-24T11:01:00Z | 2019-02-28T15:20:12Z | 2019-02-28T15:20:12Z | 2019-02-28T18:17:19Z | |
Class to read OpenDocument Tables | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index fead806fc8e1c..6f85c32b9a915 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -16,6 +16,7 @@ dependencies:
- nomkl
- numexpr
- numpy=1.15.*
+ - odfpy
- openpyxl
- pandas-gbq
# https://github.com/pydata/pandas-gbq/issues/271
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 9af6c36cc4e4d..bf7ec561b4a7e 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -32,6 +32,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>`
text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
+ binary;`OpenDocument <http://www.opendocumentformat.org>`__;:ref:`read_excel<io.ods>`;
binary;`HDF5 Format <https://support.hdfgroup.org/HDF5/whatishdf5.html>`__;:ref:`read_hdf<io.hdf5>`;:ref:`to_hdf<io.hdf5>`
binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>`
binary;`Parquet Format <https://parquet.apache.org/>`__;:ref:`read_parquet<io.parquet>`;:ref:`to_parquet<io.parquet>`
@@ -2779,9 +2780,10 @@ parse HTML tables in the top-level pandas io function ``read_html``.
Excel files
-----------
-The :func:`~pandas.read_excel` method can read Excel 2003 (``.xls``) and
-Excel 2007+ (``.xlsx``) files using the ``xlrd`` Python
-module. The :meth:`~DataFrame.to_excel` instance method is used for
+The :func:`~pandas.read_excel` method can read Excel 2003 (``.xls``)
+files using the ``xlrd`` Python module. Excel 2007+ (``.xlsx``) files
+can be read using either ``xlrd`` or ``openpyxl``.
+The :meth:`~DataFrame.to_excel` instance method is used for
saving a ``DataFrame`` to Excel. Generally the semantics are
similar to working with :ref:`csv<io.read_csv_table>` data.
See the :ref:`cookbook<cookbook.excel>` for some advanced strategies.
@@ -3217,7 +3219,27 @@ The look and feel of Excel worksheets created from pandas can be modified using
* ``float_format`` : Format string for floating point numbers (default ``None``).
* ``freeze_panes`` : A tuple of two integers representing the bottommost row and rightmost column to freeze. Each of these parameters is one-based, so (1, 1) will freeze the first row and first column (default ``None``).
+.. _io.ods:
+OpenDocument Spreadsheets
+-------------------------
+
+.. versionadded:: 0.25
+
+The :func:`~pandas.read_excel` method can also read OpenDocument spreadsheets
+using the ``odfpy`` module. The semantics and features for reading
+OpenDocument spreadsheets match what can be done for `Excel files`_ using
+``engine='odf'``.
+
+.. code-block:: python
+
+ # Returns a DataFrame
+ pd.read_excel('path_to_file.ods', engine='odf')
+
+.. note::
+
+ Currently pandas only supports *reading* OpenDocument spreadsheets. Writing
+ is not implemented.
.. _io.clipboard:
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 2030bb4d974c3..35e9fe5706b31 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -164,6 +164,7 @@ Other enhancements
- Added new option ``plotting.backend`` to be able to select a plotting backend different than the existing ``matplotlib`` one. Use ``pandas.set_option('plotting.backend', '<backend-module>')`` where ``<backend-module`` is a library implementing the pandas plotting API (:issue:`14130`)
- :class:`pandas.offsets.BusinessHour` supports multiple opening hours intervals (:issue:`15481`)
- :func:`read_excel` can now use ``openpyxl`` to read Excel files via the ``engine='openpyxl'`` argument. This will become the default in a future release (:issue:`11499`)
+- :func:`pandas.io.excel.read_excel` supports reading OpenDocument tables. Specify ``engine='odf'`` to enable. Consult the :ref:`IO User Guide <io.ods>` for more details (:issue:`9070`)
.. _whatsnew_0250.api_breaking:
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 31746dc3d6c16..620884d66821c 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -13,6 +13,7 @@
"lxml.etree": "3.8.0",
"matplotlib": "2.2.2",
"numexpr": "2.6.2",
+ "odfpy": "1.3.0",
"openpyxl": "2.4.8",
"pandas_gbq": "0.8.0",
"pyarrow": "0.9.0",
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 84ca154d045fe..7fe9f8438ac74 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -422,6 +422,7 @@ def use_inf_as_na_cb(key):
_xls_options = ['xlrd']
_xlsm_options = ['xlrd', 'openpyxl']
_xlsx_options = ['xlrd', 'openpyxl']
+_ods_options = ['odf']
with cf.config_prefix("io.excel.xls"):
@@ -447,6 +448,14 @@ def use_inf_as_na_cb(key):
validator=str)
+with cf.config_prefix("io.excel.ods"):
+ cf.register_option("reader", "auto",
+ reader_engine_doc.format(
+ ext='ods',
+ others=', '.join(_ods_options)),
+ validator=str)
+
+
# Set up the io.excel specific writer configuration.
writer_engine_doc = """
: string
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 8055b6609b1c4..d10a40541bb6c 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -768,12 +768,14 @@ class ExcelFile:
Acceptable values are None or ``xlrd``.
"""
- from pandas.io.excel._xlrd import _XlrdReader
+ from pandas.io.excel._odfreader import _ODFReader
from pandas.io.excel._openpyxl import _OpenpyxlReader
+ from pandas.io.excel._xlrd import _XlrdReader
_engines = {
'xlrd': _XlrdReader,
'openpyxl': _OpenpyxlReader,
+ 'odf': _ODFReader,
}
def __init__(self, io, engine=None):
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
new file mode 100644
index 0000000000000..c820c1497c3c9
--- /dev/null
+++ b/pandas/io/excel/_odfreader.py
@@ -0,0 +1,176 @@
+from typing import List
+
+from pandas.compat._optional import import_optional_dependency
+
+import pandas as pd
+from pandas._typing import FilePathOrBuffer, Scalar
+
+from pandas.io.excel._base import _BaseExcelReader
+
+
+class _ODFReader(_BaseExcelReader):
+ """Read tables out of OpenDocument formatted files
+
+ Parameters
+ ----------
+ filepath_or_buffer: string, path to be parsed or
+ an open readable stream.
+ """
+ def __init__(self, filepath_or_buffer: FilePathOrBuffer):
+ import_optional_dependency("odf")
+ super().__init__(filepath_or_buffer)
+
+ @property
+ def _workbook_class(self):
+ from odf.opendocument import OpenDocument
+ return OpenDocument
+
+ def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
+ from odf.opendocument import load
+ return load(filepath_or_buffer)
+
+ @property
+ def empty_value(self) -> str:
+ """Property for compat with other readers."""
+ return ''
+
+ @property
+ def sheet_names(self) -> List[str]:
+ """Return a list of sheet names present in the document"""
+ from odf.table import Table
+
+ tables = self.book.getElementsByType(Table)
+ return [t.getAttribute("name") for t in tables]
+
+ def get_sheet_by_index(self, index: int):
+ from odf.table import Table
+ tables = self.book.getElementsByType(Table)
+ return tables[index]
+
+ def get_sheet_by_name(self, name: str):
+ from odf.table import Table
+
+ tables = self.book.getElementsByType(Table)
+
+ for table in tables:
+ if table.getAttribute("name") == name:
+ return table
+
+ raise ValueError("sheet {name} not found".format(name))
+
+ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
+ """Parse an ODF Table into a list of lists
+ """
+ from odf.table import CoveredTableCell, TableCell, TableRow
+
+ covered_cell_name = CoveredTableCell().qname
+ table_cell_name = TableCell().qname
+ cell_names = {covered_cell_name, table_cell_name}
+
+ sheet_rows = sheet.getElementsByType(TableRow)
+ empty_rows = 0
+ max_row_len = 0
+
+ table = [] # type: List[List[Scalar]]
+
+ for i, sheet_row in enumerate(sheet_rows):
+ sheet_cells = [x for x in sheet_row.childNodes
+ if x.qname in cell_names]
+ empty_cells = 0
+ table_row = [] # type: List[Scalar]
+
+ for j, sheet_cell in enumerate(sheet_cells):
+ if sheet_cell.qname == table_cell_name:
+ value = self._get_cell_value(sheet_cell, convert_float)
+ else:
+ value = self.empty_value
+
+ column_repeat = self._get_column_repeat(sheet_cell)
+
+ # Queue up empty values, writing only if content succeeds them
+ if value == self.empty_value:
+ empty_cells += column_repeat
+ else:
+ table_row.extend([self.empty_value] * empty_cells)
+ empty_cells = 0
+ table_row.extend([value] * column_repeat)
+
+ if max_row_len < len(table_row):
+ max_row_len = len(table_row)
+
+ row_repeat = self._get_row_repeat(sheet_row)
+ if self._is_empty_row(sheet_row):
+ empty_rows += row_repeat
+ else:
+ # add blank rows to our table
+ table.extend([[self.empty_value]] * empty_rows)
+ empty_rows = 0
+ for _ in range(row_repeat):
+ table.append(table_row)
+
+ # Make our table square
+ for row in table:
+ if len(row) < max_row_len:
+ row.extend([self.empty_value] * (max_row_len - len(row)))
+
+ return table
+
+ def _get_row_repeat(self, row) -> int:
+ """Return number of times this row was repeated
+ Repeating an empty row appeared to be a common way
+ of representing sparse rows in the table.
+ """
+ from odf.namespaces import TABLENS
+
+ return int(row.attributes.get((TABLENS, 'number-rows-repeated'), 1))
+
+ def _get_column_repeat(self, cell) -> int:
+ from odf.namespaces import TABLENS
+ return int(cell.attributes.get(
+ (TABLENS, 'number-columns-repeated'), 1))
+
+ def _is_empty_row(self, row) -> bool:
+ """Helper function to find empty rows
+ """
+ for column in row.childNodes:
+ if len(column.childNodes) > 0:
+ return False
+
+ return True
+
+ def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
+ from odf.namespaces import OFFICENS
+ cell_type = cell.attributes.get((OFFICENS, 'value-type'))
+ if cell_type == 'boolean':
+ if str(cell) == "TRUE":
+ return True
+ return False
+ if cell_type is None:
+ return self.empty_value
+ elif cell_type == 'float':
+ # GH5394
+ cell_value = float(cell.attributes.get((OFFICENS, 'value')))
+
+ if cell_value == 0. and str(cell) != cell_value: # NA handling
+ return str(cell)
+
+ if convert_float:
+ val = int(cell_value)
+ if val == cell_value:
+ return val
+ return cell_value
+ elif cell_type == 'percentage':
+ cell_value = cell.attributes.get((OFFICENS, 'value'))
+ return float(cell_value)
+ elif cell_type == 'string':
+ return str(cell)
+ elif cell_type == 'currency':
+ cell_value = cell.attributes.get((OFFICENS, 'value'))
+ return float(cell_value)
+ elif cell_type == 'date':
+ cell_value = cell.attributes.get((OFFICENS, 'date-value'))
+ return pd.to_datetime(cell_value)
+ elif cell_type == 'time':
+ return pd.to_datetime(str(cell)).time()
+ else:
+ raise ValueError('Unrecognized type {}'.format(cell_type))
diff --git a/pandas/tests/io/data/blank.ods b/pandas/tests/io/data/blank.ods
new file mode 100644
index 0000000000000..7ded3c3c1d688
Binary files /dev/null and b/pandas/tests/io/data/blank.ods differ
diff --git a/pandas/tests/io/data/blank_with_header.ods b/pandas/tests/io/data/blank_with_header.ods
new file mode 100644
index 0000000000000..0a2e696267fda
Binary files /dev/null and b/pandas/tests/io/data/blank_with_header.ods differ
diff --git a/pandas/tests/io/data/invalid_value_type.ods b/pandas/tests/io/data/invalid_value_type.ods
new file mode 100644
index 0000000000000..75a7a40b25d79
Binary files /dev/null and b/pandas/tests/io/data/invalid_value_type.ods differ
diff --git a/pandas/tests/io/data/test1.ods b/pandas/tests/io/data/test1.ods
new file mode 100644
index 0000000000000..5dc0e83456264
Binary files /dev/null and b/pandas/tests/io/data/test1.ods differ
diff --git a/pandas/tests/io/data/test2.ods b/pandas/tests/io/data/test2.ods
new file mode 100644
index 0000000000000..2a90db839026b
Binary files /dev/null and b/pandas/tests/io/data/test2.ods differ
diff --git a/pandas/tests/io/data/test3.ods b/pandas/tests/io/data/test3.ods
new file mode 100644
index 0000000000000..dc78781caa6e9
Binary files /dev/null and b/pandas/tests/io/data/test3.ods differ
diff --git a/pandas/tests/io/data/test4.ods b/pandas/tests/io/data/test4.ods
new file mode 100644
index 0000000000000..c73a20d8b0562
Binary files /dev/null and b/pandas/tests/io/data/test4.ods differ
diff --git a/pandas/tests/io/data/test5.ods b/pandas/tests/io/data/test5.ods
new file mode 100644
index 0000000000000..5872e2624d033
Binary files /dev/null and b/pandas/tests/io/data/test5.ods differ
diff --git a/pandas/tests/io/data/test_converters.ods b/pandas/tests/io/data/test_converters.ods
new file mode 100644
index 0000000000000..0216fb16311d8
Binary files /dev/null and b/pandas/tests/io/data/test_converters.ods differ
diff --git a/pandas/tests/io/data/test_index_name_pre17.ods b/pandas/tests/io/data/test_index_name_pre17.ods
new file mode 100644
index 0000000000000..56638c983d944
Binary files /dev/null and b/pandas/tests/io/data/test_index_name_pre17.ods differ
diff --git a/pandas/tests/io/data/test_multisheet.ods b/pandas/tests/io/data/test_multisheet.ods
new file mode 100644
index 0000000000000..39058e67b4d5b
Binary files /dev/null and b/pandas/tests/io/data/test_multisheet.ods differ
diff --git a/pandas/tests/io/data/test_squeeze.ods b/pandas/tests/io/data/test_squeeze.ods
new file mode 100644
index 0000000000000..10ccf0da2693e
Binary files /dev/null and b/pandas/tests/io/data/test_squeeze.ods differ
diff --git a/pandas/tests/io/data/test_types.ods b/pandas/tests/io/data/test_types.ods
new file mode 100644
index 0000000000000..c9a82bfff810b
Binary files /dev/null and b/pandas/tests/io/data/test_types.ods differ
diff --git a/pandas/tests/io/data/testdateoverflow.ods b/pandas/tests/io/data/testdateoverflow.ods
new file mode 100644
index 0000000000000..bb05267865303
Binary files /dev/null and b/pandas/tests/io/data/testdateoverflow.ods differ
diff --git a/pandas/tests/io/data/testdtype.ods b/pandas/tests/io/data/testdtype.ods
new file mode 100644
index 0000000000000..91145f807c9d9
Binary files /dev/null and b/pandas/tests/io/data/testdtype.ods differ
diff --git a/pandas/tests/io/data/testmultiindex.ods b/pandas/tests/io/data/testmultiindex.ods
new file mode 100644
index 0000000000000..b7f03900e6617
Binary files /dev/null and b/pandas/tests/io/data/testmultiindex.ods differ
diff --git a/pandas/tests/io/data/testskiprows.ods b/pandas/tests/io/data/testskiprows.ods
new file mode 100644
index 0000000000000..443602a2c3f98
Binary files /dev/null and b/pandas/tests/io/data/testskiprows.ods differ
diff --git a/pandas/tests/io/data/times_1900.ods b/pandas/tests/io/data/times_1900.ods
new file mode 100644
index 0000000000000..79e031c721ea3
Binary files /dev/null and b/pandas/tests/io/data/times_1900.ods differ
diff --git a/pandas/tests/io/data/times_1904.ods b/pandas/tests/io/data/times_1904.ods
new file mode 100644
index 0000000000000..b47a949d3b715
Binary files /dev/null and b/pandas/tests/io/data/times_1904.ods differ
diff --git a/pandas/tests/io/data/writertable.odt b/pandas/tests/io/data/writertable.odt
new file mode 100644
index 0000000000000..113bd651e8cd0
Binary files /dev/null and b/pandas/tests/io/data/writertable.odt differ
diff --git a/pandas/tests/io/excel/conftest.py b/pandas/tests/io/excel/conftest.py
index 935db254bd2e5..dd96fb2366152 100644
--- a/pandas/tests/io/excel/conftest.py
+++ b/pandas/tests/io/excel/conftest.py
@@ -30,7 +30,7 @@ def df_ref():
return df_ref
-@pytest.fixture(params=['.xls', '.xlsx', '.xlsm'])
+@pytest.fixture(params=['.xls', '.xlsx', '.xlsm', '.ods'])
def read_ext(request):
"""
Valid extensions for reading Excel files.
diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py
new file mode 100644
index 0000000000000..76b3fe19a0771
--- /dev/null
+++ b/pandas/tests/io/excel/test_odf.py
@@ -0,0 +1,39 @@
+import functools
+
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.util.testing as tm
+
+pytest.importorskip("odf")
+
+
+@pytest.fixture(autouse=True)
+def cd_and_set_engine(monkeypatch, datapath):
+ func = functools.partial(pd.read_excel, engine="odf")
+ monkeypatch.setattr(pd, 'read_excel', func)
+ monkeypatch.chdir(datapath("io", "data"))
+
+
+def test_read_invalid_types_raises():
+ # the invalid_value_type.ods required manually editing
+ # of the included content.xml file
+ with pytest.raises(ValueError,
+ match="Unrecognized type awesome_new_type"):
+ pd.read_excel("invalid_value_type.ods")
+
+
+def test_read_writer_table():
+ # Also test reading tables from an text OpenDocument file
+ # (.odt)
+ index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header")
+ expected = pd.DataFrame([
+ [1, np.nan, 7],
+ [2, np.nan, 8],
+ [3, np.nan, 9],
+ ], index=index, columns=["Column 1", "Unnamed: 2", "Column 3"])
+
+ result = pd.read_excel("writertable.odt", 'Table1', index_col=0)
+
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index be5951fe12b46..ae69c2302e60a 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -33,9 +33,21 @@ def ignore_xlrd_time_clock_warning():
@pytest.fixture(params=[
# Add any engines to test here
- pytest.param('xlrd', marks=td.skip_if_no('xlrd')),
- pytest.param('openpyxl', marks=td.skip_if_no('openpyxl')),
- pytest.param(None, marks=td.skip_if_no('xlrd')),
+ # When defusedxml is installed it triggers deprecation warnings for
+ # xlrd and openpyxl, so catch those here
+ pytest.param('xlrd', marks=[
+ td.skip_if_no('xlrd'),
+ pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
+ ]),
+ pytest.param('openpyxl', marks=[
+ td.skip_if_no('openpyxl'),
+ pytest.mark.filterwarnings("ignore:.*html argument"),
+ ]),
+ pytest.param(None, marks=[
+ td.skip_if_no('xlrd'),
+ pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
+ ]),
+ pytest.param("odf", marks=td.skip_if_no("odf")),
])
def engine(request):
"""
@@ -53,6 +65,11 @@ def cd_and_set_engine(self, engine, datapath, monkeypatch, read_ext):
"""
if engine == 'openpyxl' and read_ext == '.xls':
pytest.skip()
+ if engine == 'odf' and read_ext != '.ods':
+ pytest.skip()
+ if read_ext == ".ods" and engine != "odf":
+ pytest.skip()
+
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data"))
monkeypatch.setattr(pd, 'read_excel', func)
@@ -62,14 +79,16 @@ def test_usecols_int(self, read_ext, df_ref):
# usecols as int
with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ check_stacklevel=False,
+ raise_on_extra_warnings=False):
with ignore_xlrd_time_clock_warning():
df1 = pd.read_excel("test1" + read_ext, "Sheet1",
index_col=0, usecols=3)
# usecols as int
with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ check_stacklevel=False,
+ raise_on_extra_warnings=False):
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1],
index_col=0, usecols=3)
@@ -439,6 +458,9 @@ def test_bad_engine_raises(self, read_ext):
@tm.network
def test_read_from_http_url(self, read_ext):
+ if read_ext == '.ods': # TODO: remove once on master
+ pytest.skip()
+
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + read_ext)
url_table = pd.read_excel(url)
@@ -736,6 +758,10 @@ def cd_and_set_engine(self, engine, datapath, monkeypatch, read_ext):
"""
Change directory and set engine for ExcelFile objects.
"""
+ if engine == 'odf' and read_ext != '.ods':
+ pytest.skip()
+ if read_ext == ".ods" and engine != "odf":
+ pytest.skip()
if engine == 'openpyxl' and read_ext == '.xls':
pytest.skip()
@@ -802,7 +828,8 @@ def test_excel_table_sheet_by_index(self, read_ext, df_ref):
df3 = pd.read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False,
+ raise_on_extra_warnings=False):
with pd.ExcelFile('test1' + read_ext) as excel:
df4 = pd.read_excel(excel, 0, index_col=0, skip_footer=1)
diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py
index 94e1435d4dfab..d749f0ec3e252 100644
--- a/pandas/tests/io/excel/test_xlrd.py
+++ b/pandas/tests/io/excel/test_xlrd.py
@@ -10,6 +10,12 @@
xlwt = pytest.importorskip("xlwt")
+@pytest.fixture(autouse=True)
+def skip_ods_files(read_ext):
+ if read_ext == ".ods":
+ pytest.skip("Not valid for xlrd")
+
+
def test_read_xlrd_book(read_ext, frame):
df = frame
| closes #2311
This is primarily intended for LibreOffice calc spreadsheets but will
also work with LO Writer and probably with LO Impress documents.
This is an alternate solution to https://github.com/pandas-dev/pandas/pull/9070
There are test cases with several different problematic LibreOffice spread sheets.
git diff upstream/master -u | flake8 appeared to pass.
... I didn't do the whats new entry. Though I expect there's some more work to do before submitting this. I just wanted to get the core code in for comments.
The open issues is, the workaround for https://github.com/pandas-dev/pandas/issues/25422 is embedded in the current code (so all my tests pass right now) but that, or a better solution should move closer to the iso 8601 parser.
Also I don't have the parser class hooked up to a read_excel or read_ods function.
Using read_excel bothers me some... because its not an excel file. I think it would be more clear if there was either a separate read_ods function or a generic read_spreadsheet function than just pressing read_excel into being a generic spreadsheet reader.
Also this just a reader. I had never gotten around to implementing a writer.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25427 | 2019-02-24T06:59:48Z | 2019-07-03T21:45:44Z | 2019-07-03T21:45:43Z | 2019-11-09T15:04:12Z |
Backport PR #25289 on branch 0.24.x (BUG: fixed merging with empty frame containing an Int64 column (#25183)) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index a7e522d27f8e2..8f4beb3f484a4 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -96,7 +96,7 @@ Bug Fixes
**Other**
- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`)
--
+- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`)
-
.. _whatsnew_0.242.contributors:
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 640587b7f9f31..cb98274962656 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -190,6 +190,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
pass
elif getattr(self.block, 'is_sparse', False):
pass
+ elif getattr(self.block, 'is_extension', False):
+ pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 1e60fdbebfeb3..9fe4049dd698b 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -39,6 +39,54 @@ def get_test_data(ngroups=NGROUPS, n=N):
return arr
+def get_series():
+ return [
+ pd.Series([1], dtype='int64'),
+ pd.Series([1], dtype='Int64'),
+ pd.Series([1.23]),
+ pd.Series(['foo']),
+ pd.Series([True]),
+ pd.Series([pd.Timestamp('2018-01-01')]),
+ pd.Series([pd.Timestamp('2018-01-01', tz='US/Eastern')]),
+ ]
+
+
+def get_series_na():
+ return [
+ pd.Series([np.nan], dtype='Int64'),
+ pd.Series([np.nan], dtype='float'),
+ pd.Series([np.nan], dtype='object'),
+ pd.Series([pd.NaT]),
+ ]
+
+
+@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
+def series_of_dtype(request):
+ """
+ A parametrized fixture returning a variety of Series of different
+ dtypes
+ """
+ return request.param
+
+
+@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
+def series_of_dtype2(request):
+ """
+ A duplicate of the series_of_dtype fixture, so that it can be used
+ twice by a single function
+ """
+ return request.param
+
+
+@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
+def series_of_dtype_all_na(request):
+ """
+ A parametrized fixture returning a variety of Series with all NA
+ values
+ """
+ return request.param
+
+
class TestMerge(object):
def setup_method(self, method):
@@ -428,6 +476,36 @@ def check2(exp, kwarg):
check1(exp_in, kwarg)
check2(exp_out, kwarg)
+ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
+ # GH 25183
+ df = pd.DataFrame({'key': series_of_dtype, 'value': series_of_dtype2},
+ columns=['key', 'value'])
+ df_empty = df[:0]
+ expected = pd.DataFrame({
+ 'value_x': pd.Series(dtype=df.dtypes['value']),
+ 'key': pd.Series(dtype=df.dtypes['key']),
+ 'value_y': pd.Series(dtype=df.dtypes['value']),
+ }, columns=['value_x', 'key', 'value_y'])
+ actual = df_empty.merge(df, on='key')
+ assert_frame_equal(actual, expected)
+
+ def test_merge_all_na_column(self, series_of_dtype,
+ series_of_dtype_all_na):
+ # GH 25183
+ df_left = pd.DataFrame(
+ {'key': series_of_dtype, 'value': series_of_dtype_all_na},
+ columns=['key', 'value'])
+ df_right = pd.DataFrame(
+ {'key': series_of_dtype, 'value': series_of_dtype_all_na},
+ columns=['key', 'value'])
+ expected = pd.DataFrame({
+ 'key': series_of_dtype,
+ 'value_x': series_of_dtype_all_na,
+ 'value_y': series_of_dtype_all_na,
+ }, columns=['key', 'value_x', 'value_y'])
+ actual = df_left.merge(df_right, on='key')
+ assert_frame_equal(actual, expected)
+
def test_merge_nosort(self):
# #2098, anything to do?
| Backport PR #25289: BUG: fixed merging with empty frame containing an Int64 column (#25183) | https://api.github.com/repos/pandas-dev/pandas/pulls/25426 | 2019-02-24T03:47:48Z | 2019-02-24T07:25:54Z | 2019-02-24T07:25:54Z | 2019-02-24T07:25:55Z |
REF: Fix maybe_promote | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 3e92906be706c..7720dfa713c03 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1,6 +1,7 @@
""" routings for casting """
from datetime import datetime, timedelta
+import warnings
import numpy as np
@@ -48,6 +49,7 @@
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
+ ABCIndexClass,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
@@ -59,6 +61,9 @@
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
+_int64_min = np.iinfo(np.int64).min
+_uint64_max = np.iinfo(np.uint64).max
+_float32_max = np.finfo(np.float32).max
def maybe_convert_platform(values):
@@ -335,6 +340,40 @@ def changeit():
def maybe_promote(dtype, fill_value=np.nan):
+ """
+ Determine minimal dtype to hold fill_value, when starting from dtype
+
+ Parameters
+ ----------
+ dtype : DType
+ The dtype to start from.
+ fill_value : scalar or np.ndarray / Series / Index
+ The value that the output dtype needs to be able to hold.
+
+ NOTE: using arrays is discouraged and will likely be removed from this
+ method in the foreseeable future. Use maybe_promote_with_array instead.
+
+ Returns
+ -------
+ dtype : DType
+ The updated dtype.
+ fill_value : scalar
+ The type of this value depends on the type of the passed fill_value
+
+ * If fill_value is a scalar, the method returns that scalar, but
+ modified to fit the updated dtype. For example, a datetime fill_value
+ will be returned as an integer (representing ns) for M8[ns], and
+ values considered missing (see pd.isna) will be returned as the
+ corresponding missing value marker for the updated dtype.
+ * If fill_value is an ndarray/Series/Index, this method will always
+ return the missing value marker for the updated dtype. This value
+ will be None for dtypes that cannot hold missing values (integers,
+ booleans, bytes).
+
+ See Also
+ --------
+ maybe_promote_with_array : underlying method for array case
+ """
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
@@ -462,6 +501,281 @@ def maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
+def maybe_promote_with_array(dtype, fill_value=np.nan):
+ """
+ Determine minimal dtype to hold fill_value, when starting from dtype
+
+ This will also return the default missing value for the resulting dtype, if
+ necessary (e.g. for datetime / timedelta, the missing value will be `NaT`)
+
+ Parameters
+ ----------
+ dtype : DType
+ The dtype to start from.
+ fill_value : np.ndarray / Series / Index
+ Array-like of values that the output dtype needs to be able to hold.
+
+ Returns
+ -------
+ dtype : DType
+ The updated dtype.
+ na_value : scalar
+ The missing value for the new dtype. Returns None or dtypes that
+ cannot hold missing values (integers, booleans, bytes).
+
+ See Also
+ --------
+ maybe_promote : similar method for scalar case
+
+ Examples
+ --------
+ >>> maybe_promote_with_array(np.dtype('int'), fill_value=np.array([None]))
+ (dtype('float64'), nan)
+ >>> maybe_promote_with_array(np.dtype('float'),
+ ... fill_value=np.array(['abcd']))
+ (dtype('O'), nan)
+
+ For datetimes without timezones, the missing value marker is
+ numpy.datetime64('NaT'), and similarly for timedelta values.
+
+ >>> maybe_promote_with_array(np.dtype('datetime64[ns]'),
+ ... fill_value=np.array([None]))
+ (dtype('<M8[ns]'), numpy.datetime64('NaT'))
+
+ String values do not get cast to datetime/timedelta automatically, but
+ force an upcast to object (with corresponding missing value marker nan).
+
+ >>> maybe_promote_with_array(np.dtype('datetime64[ns]'),
+ ... fill_value=np.array(['2018-01-01']))
+ (dtype('O'), nan)
+
+ The method will infer as conservatively as possible for integer types:
+
+ >>> maybe_promote_with_array(
+ ... np.dtype('uint8'), fill_value=np.array([np.iinfo('uint8').max + 1])
+ ... )
+ (dtype('uint16'), None)
+ >>> maybe_promote_with_array(np.dtype('uint8'), fill_value=np.array([-1]))
+ (dtype('int16'), None)
+ """
+
+ if isinstance(fill_value, np.ndarray):
+ if fill_value.ndim == 0:
+ # zero-dimensional arrays cannot be iterated over
+ fill_value = np.expand_dims(fill_value, 0)
+ elif fill_value.ndim > 1:
+ # ndarray, but too high-dimensional
+ fill_value = fill_value.ravel()
+ elif not isinstance(fill_value, (ABCSeries, ABCIndexClass)):
+ fill_type = type(fill_value).__name__
+ raise ValueError(
+ "fill_value must either be a Series / Index / "
+ "np.ndarray, received {}".format(fill_type)
+ )
+
+ if all(isna(x) for x in fill_value):
+ # only missing values (or no values at all)
+
+ if is_datetime64_dtype(dtype):
+ return dtype, np.datetime64("NaT", "ns")
+ elif is_timedelta64_dtype(dtype):
+ return dtype, np.timedelta64("NaT", "ns")
+ elif is_datetime64tz_dtype(dtype):
+ return dtype, NaT
+
+ na_value = np.nan
+ if len(fill_value) == 0:
+ # empty array; no values to force change
+ if is_integer_dtype(dtype) or dtype in (bool, bytes):
+ # these types do not have a missing value marker
+ na_value = None
+ # otherwise nothing changes
+ elif any(x is NaT for x in fill_value):
+ # presence of pd.NaT upcasts everything that's not
+ # datetime/timedelta (see above) to object
+ dtype = np.dtype(object)
+ elif is_integer_dtype(dtype):
+ # integer + other missing value (np.nan / None) casts to float
+ dtype = np.dtype("float64")
+ elif is_extension_array_dtype(dtype):
+ na_value = dtype.na_value
+ elif is_string_dtype(dtype) or dtype in (bool, bytes):
+ # original dtype cannot hold nans
+ dtype = np.dtype(object)
+
+ return dtype, na_value
+
+ fill_dtype = fill_value.dtype
+ if fill_dtype == object:
+ # for object dtype, we determine if we actually need to upcast
+ # by inferring the dtype of fill_value
+ inferred_dtype = lib.infer_dtype(fill_value, skipna=True)
+
+ # cases that would yield 'empty' have been treated in branch above
+ if inferred_dtype in ["period", "interval", "datetime64tz"]:
+ # TODO: handle & test pandas-dtypes
+ # TODO: lib.infer_dtype does not support datetime64tz yet
+ pass
+ else:
+ # rest can be mapped to numpy dtypes
+ map_inferred_to_numpy = {
+ "floating": float,
+ "mixed-integer-float": float,
+ "decimal": float,
+ "integer": int,
+ "boolean": bool,
+ "complex": complex,
+ "bytes": bytes,
+ "datetime64": "datetime64[ns]",
+ "datetime": "datetime64[ns]",
+ "date": "datetime64[ns]",
+ "timedelta64": "timedelta64[ns]",
+ "timedelta": "timedelta64[ns]",
+ "time": object, # time cannot be cast to datetime/timedelta
+ "string": object,
+ "mixed-integer": object,
+ "mixed": object,
+ }
+ fill_dtype = np.dtype(map_inferred_to_numpy[inferred_dtype])
+
+ # now that we have the correct dtype; check how we must upcast
+ # * extension arrays
+ # * int vs int
+ # * int vs float / complex
+ # * float vs float
+ # * float vs complex (and vice versa)
+ # * bool
+ # * datetimetz
+ # * datetime
+ # * timedelta
+ # * string/object
+
+ # if (is_extension_array_dtype(dtype)
+ # or is_extension_array_dtype(fill_dtype)):
+ # # TODO: dispatch to ExtensionDType.maybe_promote? GH 24246
+ if is_integer_dtype(dtype) and is_integer_dtype(fill_dtype):
+ if is_unsigned_integer_dtype(dtype) and all(fill_value >= 0):
+ # can stay unsigned
+ fill_max = fill_value.max()
+ if fill_max > _uint64_max:
+ return np.dtype(object), np.nan
+
+ while fill_max > np.iinfo(dtype).max:
+ # itemsize is the number of bytes; times eight is number of
+ # bits, which is used in the string identifier of the dtype;
+ # if fill_max is above the max for that dtype,
+ # we double the number of bytes/bits.
+ dtype = np.dtype("uint{}".format(dtype.itemsize * 8 * 2))
+ return dtype, None
+ else:
+ # cannot stay unsigned
+ if dtype == "uint64":
+ # need to hold negative values, but int64 cannot hold
+ # maximum of uint64 -> needs object
+ return np.dtype(object), np.nan
+ elif is_unsigned_integer_dtype(dtype):
+ # need to turn into signed integers to hold negative values
+ # int8 cannot hold maximum of uint8; similar for 16/32
+ # therefore, upcast at least to next higher int-type
+ dtype = np.dtype("int{}".format(dtype.itemsize * 8 * 2))
+
+ fill_max = fill_value.max()
+ fill_min = fill_value.min()
+ if isinstance(fill_max, np.uint64):
+ # numpy comparator is broken for uint64;
+ # see https://github.com/numpy/numpy/issues/12525
+ # use .item to get int object
+ fill_max = fill_max.item()
+
+ # comparison mechanics are broken above _int64_max;
+ # use greater equal instead of equal
+ if fill_max >= _int64_max + 1 or fill_min <= _int64_min - 1:
+ return np.dtype(object), np.nan
+
+ while fill_max > np.iinfo(dtype).max or fill_min < np.iinfo(dtype).min:
+ # same mechanism as above, but for int instead of uint
+ dtype = np.dtype("int{}".format(dtype.itemsize * 8 * 2))
+ return dtype, None
+ elif is_integer_dtype(dtype) and is_float_dtype(fill_dtype):
+ # int with float: always upcasts to float64
+ return np.dtype("float64"), np.nan
+ elif is_integer_dtype(dtype) and is_complex_dtype(fill_dtype):
+ # int with complex: always upcasts to complex128
+ return np.dtype("complex128"), np.nan
+ elif (is_float_dtype(dtype) or is_complex_dtype(dtype)) and is_integer_dtype(
+ fill_dtype
+ ):
+ # float/complex with int: always stays original float/complex dtype
+ return dtype, np.nan
+ elif is_float_dtype(dtype) and is_float_dtype(fill_dtype):
+ # float with float; upcasts depending on absolute max of fill_value
+ if dtype == "float32" and np.abs(fill_value).max() <= _float32_max:
+ return dtype, np.nan
+ # all other cases return float64
+ return np.dtype("float64"), np.nan
+ elif (is_float_dtype(dtype) or is_complex_dtype(dtype)) and (
+ is_float_dtype(fill_dtype) or is_complex_dtype(fill_dtype)
+ ):
+ # at least one is complex; otherwise we'd have hit float/float above
+ with warnings.catch_warnings():
+ # work around GH 27610
+ warnings.filterwarnings("ignore", category=FutureWarning)
+ if (
+ dtype in ["float32", "complex64"]
+ and max(
+ np.abs(np.real(fill_value)).max(), # also works for float
+ np.abs(np.imag(fill_value)).max(),
+ )
+ <= _float32_max
+ ):
+ return np.complex64, np.nan
+ # all other cases return complex128
+ return np.dtype("complex128"), np.nan
+ elif is_bool_dtype(dtype) and is_bool_dtype(fill_dtype):
+ # bool with bool is the only combination that stays bool; any other
+ # combination involving bool upcasts to object, see else-clause below
+ return dtype, None
+ elif (
+ is_datetime64tz_dtype(dtype)
+ and is_datetime64tz_dtype(fill_dtype)
+ and (dtype.tz == fill_dtype.tz)
+ ):
+ # datetimetz with datetimetz with the same timezone is the only
+ # combination that stays datetimetz (in particular, mixing timezones or
+ # tz-aware and tz-naive datetimes will cast to object); any other
+ # combination involving datetimetz upcasts to object, see below
+ return dtype, NaT
+ elif (is_timedelta64_dtype(dtype) and is_timedelta64_dtype(fill_dtype)) or (
+ is_datetime64_dtype(dtype) and is_datetime64_dtype(fill_dtype)
+ ):
+ # datetime and timedelta try to cast; if successful, keep dtype,
+ # otherwise upcast to object
+ try:
+ with warnings.catch_warnings():
+ msg = (
+ "parsing timezone aware datetimes is deprecated; "
+ "this will raise an error in the future"
+ )
+ warnings.filterwarnings(
+ "ignore", message=msg, category=DeprecationWarning
+ )
+ fill_value.astype(dtype)
+
+ # can simplify if-cond. compared to cond. for entering this branch
+ if is_datetime64_dtype(dtype):
+ na_value = np.datetime64("NaT", "ns")
+ else:
+ na_value = np.timedelta64("NaT", "ns")
+ except (ValueError, TypeError):
+ dtype = np.dtype(object)
+ na_value = np.nan
+ return dtype, na_value
+ else:
+ # anything else (e.g. strings, objects, bytes, or unmatched
+ # bool / datetime / datetimetz / timedelta)
+ return np.dtype(object), np.nan
+
+
def _ensure_dtype_type(value, dtype):
"""
Ensure that the given value is an instance of the given dtype.
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 5c61574eddb50..34aaa0605b15f 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -3,6 +3,7 @@
"""
import datetime
+import warnings
import numpy as np
import pytest
@@ -10,7 +11,7 @@
from pandas._libs.tslibs import NaT
from pandas.compat import is_platform_windows
-from pandas.core.dtypes.cast import maybe_promote
+from pandas.core.dtypes.cast import maybe_promote, maybe_promote_with_array
from pandas.core.dtypes.common import (
is_complex_dtype,
is_datetime64_dtype,
@@ -19,7 +20,6 @@
is_integer_dtype,
is_object_dtype,
is_scalar,
- is_string_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
@@ -134,7 +134,7 @@ def _check_promote(
# box_dtype; the expected value returned from maybe_promote is the
# missing value marker for the returned dtype.
fill_array = np.array([fill_value], dtype=box_dtype)
- result_dtype, result_fill_value = maybe_promote(dtype, fill_array)
+ result_dtype, result_fill_value = maybe_promote_with_array(dtype, fill_array)
expected_fill_value = exp_val_for_array
else:
# here, we pass on fill_value as a scalar directly; the expected value
@@ -163,7 +163,11 @@ def _assert_match(result_fill_value, expected_fill_value):
# On some builds, type comparison fails, e.g. np.int32 != np.int32
assert res_type == ex_type or res_type.__name__ == ex_type.__name__
- match_value = result_fill_value == expected_fill_value
+ with warnings.catch_warnings():
+ # we do not care about this warning, NaT is handled below anyway
+ msg = "In the future, 'NAT == x' and 'x == NAT' will always be False"
+ warnings.filterwarnings("ignore", message=msg, category=FutureWarning)
+ match_value = result_fill_value == expected_fill_value
# Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None (which is checked
@@ -285,15 +289,7 @@ def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype, box):
expected_dtype = np.dtype(expected_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if boxed:
- if expected_dtype != object:
- pytest.xfail("falsely casts to object")
- if box_dtype is None and (
- fill_value > np.iinfo("int64").max or np.iinfo("int64").min < fill_value < 0
- ):
- pytest.xfail("falsely casts to float instead of object")
-
- # output is not a generic int, but corresponds to expected_dtype
+ # output is not a python int, but a numpy int of expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
# no missing value marker for integers
exp_val_for_array = None if expected_dtype != "object" else np.nan
@@ -309,8 +305,6 @@ def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype, box):
)
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(True, None), (False, None)])
def test_maybe_promote_int_with_float(any_int_dtype, float_dtype, box):
dtype = np.dtype(any_int_dtype)
fill_dtype = np.dtype(float_dtype)
@@ -336,8 +330,6 @@ def test_maybe_promote_int_with_float(any_int_dtype, float_dtype, box):
)
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(True, None), (False, None)])
def test_maybe_promote_float_with_int(float_dtype, any_int_dtype, box):
dtype = np.dtype(float_dtype)
@@ -396,17 +388,6 @@ def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype, box):
expected_dtype = np.dtype(expected_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if box_dtype == object:
- pytest.xfail("falsely upcasts to object")
- elif boxed and is_float_dtype(dtype) and is_complex_dtype(expected_dtype):
- pytest.xfail("does not upcast to complex")
- elif boxed and (dtype, expected_dtype) in [
- ("float32", "float64"),
- ("float32", "complex64"),
- ("complex64", "complex128"),
- ]:
- pytest.xfail("does not upcast correctly depending on value")
-
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
exp_val_for_array = np.nan
@@ -427,13 +408,6 @@ def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced, box):
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if boxed and fill_dtype == bool:
- pytest.xfail("falsely upcasts to object")
- if boxed and box_dtype is None and fill_dtype.kind == "M":
- pytest.xfail("wrongly casts fill_value")
- if boxed and box_dtype is None and fill_dtype.kind == "m":
- pytest.xfail("wrongly casts fill_value")
-
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -458,11 +432,6 @@ def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box):
fill_value = True
boxed, box_dtype = box # read from parametrized fixture
- if boxed and dtype == bool:
- pytest.xfail("falsely upcasts to object")
- if boxed and dtype not in (str, object) and box_dtype is None:
- pytest.xfail("falsely upcasts to object")
-
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
# output is not a generic bool, but corresponds to expected_dtype
@@ -508,25 +477,18 @@ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced, box)
@pytest.mark.parametrize(
"box",
[
- (True, None), # fill_value wrapped in array with auto-dtype (fixed len)
- (True, "bytes"), # fill_value wrapped in array with generic bytes-dtype
+ (True, None), # fill_value wrapped in array with default dtype
+ (True, "bytes"), # fill_value in array with generic bytes dtype
(True, object), # fill_value wrapped in array with object dtype
- (False, None), # fill_value directly
+ (False, None), # fill_value passed on as scalar
],
+ ids=["True-None", "True-bytes", "True-object", "False-None"],
)
def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_dtype = np.dtype(bytes_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if not issubclass(dtype.type, np.bytes_):
- if (
- boxed
- and (box_dtype == "bytes" or box_dtype is None)
- and not (is_string_dtype(dtype) or dtype == bool)
- ):
- pytest.xfail("does not upcast to object")
-
# create array of given dtype
fill_value = b"abc"
@@ -557,13 +519,6 @@ def test_maybe_promote_datetime64_with_any(
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if is_datetime64_dtype(fill_dtype):
- if box_dtype == object:
- pytest.xfail("falsely upcasts to object")
- else:
- if boxed and box_dtype is None:
- pytest.xfail("does not upcast to object")
-
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -594,11 +549,11 @@ def test_maybe_promote_datetime64_with_any(
"box",
[
(True, None), # fill_value wrapped in array with default dtype
- # disabled due to too many xfails; see GH 23982 / 25425
- # (True, 'dt_dtype'), # fill_value in array with explicit datetime dtype
- # (True, object), # fill_value wrapped in array with object dtype
+ (True, "dt_dtype"), # fill_value in array with explicit datetime dtype
+ (True, object), # fill_value wrapped in array with object dtype
(False, None), # fill_value passed on as scalar
],
+ ids=["True-None", "True-dt_dtype", "True-object", "False-None"],
)
@pytest.mark.parametrize(
"fill_value",
@@ -616,23 +571,10 @@ def test_maybe_promote_any_with_datetime64(
dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if is_datetime64_dtype(dtype):
- if boxed and (
- box_dtype == object
- or (box_dtype is None and not is_datetime64_dtype(type(fill_value)))
- ):
- pytest.xfail("falsely upcasts to object")
- else:
- if boxed and (
- box_dtype == "dt_dtype"
- or (box_dtype is None and is_datetime64_dtype(type(fill_value)))
- ):
- pytest.xfail("mix of lack of upcasting, resp. wrong missing value")
-
# special case for box_dtype
box_dtype = np.dtype(datetime64_dtype) if box_dtype == "dt_dtype" else box_dtype
- # filling datetime with anything but datetime casts to object
+ # filling anything but datetime with datetime casts to object
if is_datetime64_dtype(dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to pd.Timestamp.value
@@ -654,8 +596,6 @@ def test_maybe_promote_any_with_datetime64(
)
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(True, object)])
def test_maybe_promote_datetimetz_with_any_numpy_dtype(
tz_aware_fixture, any_numpy_dtype_reduced, box
):
@@ -663,6 +603,9 @@ def test_maybe_promote_datetimetz_with_any_numpy_dtype(
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
+ if not boxed:
+ pytest.xfail("unfixed error: does not upcast correctly")
+
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -682,8 +625,6 @@ def test_maybe_promote_datetimetz_with_any_numpy_dtype(
)
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(True, None), (True, object)])
def test_maybe_promote_datetimetz_with_datetimetz(
tz_aware_fixture, tz_aware_fixture2, box
):
@@ -693,12 +634,17 @@ def test_maybe_promote_datetimetz_with_datetimetz(
from dateutil.tz import tzlocal
+ if not boxed:
+ pytest.xfail("unfixed error: does not upcast for unmatched timezones")
if is_platform_windows() and tz_aware_fixture2 == tzlocal():
pytest.xfail("Cannot process fill_value with this dtype, see GH 24310")
- if dtype.tz == fill_dtype.tz and boxed:
- pytest.xfail("falsely upcasts")
+ if dtype.tz == fill_dtype.tz:
+ # here we should keep the datetime64tz dtype, but since that cannot be
+ # inferred correctly for fill_value, the calling dtype ends up being
+ # compared to a tz-naive datetime64-dtype, and must therefore upcast
+ pytest.xfail("cannot infer datetime64tz dtype, see GH 23554")
- # create array of given dtype; casts "1" to correct dtype
+ # create array of given dtype; casts "10 ** 9" to correct dtype
fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0]
# filling datetimetz with datetimetz casts to object, unless tz matches
@@ -722,8 +668,6 @@ def test_maybe_promote_datetimetz_with_datetimetz(
@pytest.mark.parametrize("fill_value", [None, np.nan, NaT])
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(False, None)])
def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value, box):
dtype = DatetimeTZDtype(tz=tz_aware_fixture)
@@ -761,6 +705,11 @@ def test_maybe_promote_any_numpy_dtype_with_datetimetz(
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
boxed, box_dtype = box # read from parametrized fixture
+ if is_datetime64_dtype(dtype):
+ # fill_dtype does not get inferred correctly to datetime64tz but to
+ # datetime64, which then falsely matches with datetime64 dtypes.
+ pytest.xfail("cannot infer datetime64tz dtype, see GH 23554")
+
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
# filling any numpy dtype with datetimetz casts to object
@@ -786,13 +735,6 @@ def test_maybe_promote_timedelta64_with_any(
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if is_timedelta64_dtype(fill_dtype):
- if box_dtype == object:
- pytest.xfail("falsely upcasts to object")
- else:
- if boxed and box_dtype is None:
- pytest.xfail("does not upcast to object")
-
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -828,11 +770,11 @@ def test_maybe_promote_timedelta64_with_any(
"box",
[
(True, None), # fill_value wrapped in array with default dtype
- # disabled due to too many xfails; see GH 23982 / 25425
- # (True, 'td_dtype'), # fill_value in array with explicit timedelta dtype
+ (True, "td_dtype"), # fill_value in array with explicit timedelta dtype
(True, object), # fill_value wrapped in array with object dtype
(False, None), # fill_value passed on as scalar
],
+ ids=["True-None", "True-td_dtype", "True-object", "False-None"],
)
def test_maybe_promote_any_with_timedelta64(
any_numpy_dtype_reduced, timedelta64_dtype, fill_value, box
@@ -840,16 +782,6 @@ def test_maybe_promote_any_with_timedelta64(
dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if is_timedelta64_dtype(dtype):
- if boxed and (
- box_dtype == object
- or (box_dtype is None and not is_timedelta64_dtype(type(fill_value)))
- ):
- pytest.xfail("falsely upcasts to object")
- else:
- if boxed and box_dtype is None and is_timedelta64_dtype(type(fill_value)):
- pytest.xfail("does not upcast correctly")
-
# special case for box_dtype
box_dtype = np.dtype(timedelta64_dtype) if box_dtype == "td_dtype" else box_dtype
@@ -904,11 +836,12 @@ def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced, bo
"box",
[
# disabled due to too many xfails; see GH 23982 / 25425
- # (True, None), # fill_value wrapped in array with default dtype
- # (True, 'str'), # fill_value wrapped in array with generic string-dtype
+ (True, None), # fill_value wrapped in array with default dtype
+ (True, "str"), # fill_value wrapped in array with generic string-dtype
(True, object), # fill_value wrapped in array with object dtype
(False, None), # fill_value passed on as scalar
],
+ ids=["True-None", "True-str", "True-object", "False-None"],
)
def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype, box):
dtype = np.dtype(any_numpy_dtype_reduced)
@@ -986,8 +919,15 @@ def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype, bo
@pytest.mark.parametrize("fill_value", [None, np.nan, NaT])
-# override parametrization due to to many xfails; see GH 23982 / 25425
-@pytest.mark.parametrize("box", [(False, None)])
+# override parametrization of box, because default dtype for na is always float
+@pytest.mark.parametrize(
+ "box",
+ [
+ (True, object), # fill_value wrapped in array with object dtype
+ (False, None), # fill_value passed on as scalar
+ ],
+ ids=["True-object", "False-None"],
+)
def test_maybe_promote_any_numpy_dtype_with_na(
any_numpy_dtype_reduced, fill_value, box
):
@@ -1052,11 +992,18 @@ def test_maybe_promote_dimensions(any_numpy_dtype_reduced, dim):
fill_array = np.expand_dims(fill_array, 0)
# test against 1-dimensional case
- expected_dtype, expected_missing_value = maybe_promote(
+ expected_dtype, expected_missing_value = maybe_promote_with_array(
dtype, np.array([1], dtype=dtype)
)
- result_dtype, result_missing_value = maybe_promote(dtype, fill_array)
+ result_dtype, result_missing_value = maybe_promote_with_array(dtype, fill_array)
assert result_dtype == expected_dtype
_assert_match(result_missing_value, expected_missing_value)
+
+
+def test_maybe_promote_raises(any_numpy_dtype):
+ msg = "fill_value must either be a Series / Index / np.ndarray, received.*"
+ with pytest.raises(ValueError, match=msg):
+ # something that's not a Series / Index / np.ndarray
+ maybe_promote_with_array(any_numpy_dtype, 1)
| - [x] closes #23833
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
This PR is the culmination of ongoing work since the start of November, and is therefore a bit on the bigger side, with several notes to make.
Things started out with me wanting to unify `.update` for Series/DF (#22358), resp. aiming towards a beefed-up `update`/`combine_first`/`coalesce` (#22812). While tackling the former (#23192), I encountered some problems with `df.update` upcasting stuff unnecessarily (#23606), and while trying to fix it, I ran into problems with `maybe_upcast_putmask` (#23823), which were directly caused by the utterly broken (and completely untested) `maybe_promote` (#23833).
I started with writing some tests (#23982), which turned out to be not so trivial, because there's a lot of complexity, and the correct behaviour wasn't alwasy immediate (also encountered some fun numpy bugs in the process: e.g. numpy/numpy#12525, numpy/numpy#12550)
I set out to write out a PR to fix those tests then, with the obvious goal of getting the test suite to pass - already that required a full rewrite of the method. I cracked my own tests after a while, but the test suite eluded me. As it turns out, `maybe_promote` mixes two very different behaviours - scalar values get cast to the new dtype, whereas arrays return their missing value marker. I tried kludging around this for a while, and decided it wasn't possible without creating a franken-solution.
The next step was to *separate* these two different behaviours into different functions, `maybe_promote_with_scalar` and `maybe_promote_with_array`, where `maybe_promote` is then just a thin wrapper that switches between the two. Actually also `maybe_promote_with_scalar` is just a fairly thin wrapper around `maybe_promote_with_array`, so that the actual many-cased promotion logic does not have to be implemented twice.
Often, the call-sites in the code just need the one or the other, and this could later be broken up correspondingly.
I updated the tests in #23982 (taking care to fully capture all the xfails there) and based this PR on that. This should give already an overview of what changed. In many cases, the current behaviour is broken, but I did make a few design decisions worth noting:
* `maybe_promote_with_array` consistently returns the missing value marker for the updated dtype. Since integer dtypes (plus bools and bytes) cannot hold `np.nan`, these cases now return `None`.
* all promotion logic is as conservative as possible, also within subtypes. For arrays, promotion always goes by value, and never by dtype. That means that, for example:
```
>>> maybe_promote(np.dtype('uint8'), fill_value=np.iinfo('uint8').max + 1)
(dtype('uint16'), 256)
>>> maybe_promote(np.dtype('uint8'), fill_value=np.array([-1], dtype='int64'))
(dtype('int16'), None)
```
* all promotion logic is as type-safe as possible, which means that [x] only stays [x] if the `fill_value` is of type [x] as well, where x is one of (datetime, timedelta, bool, bytes). Datetimetz must additionally match the timezone.
* all scalar `fill_values` now truly get cast to the updated dtype (before there were lots of ambiguities around int/float/complex/datetime/timedelta subtypes)
* I have changed the behavior that strings get interpreted for datetimes/timedeltas. Since this is an untested private method, and the test suite still passes just fine, I think this is actually a good thing, because it's too much in one method. String to datetime/timedelta *should* need an explicit cast, IMO.
```
>>> # master
>>> maybe_promote(np.dtype('datetime64[ns]'), '2018-01-01')
(dtype('<M8[ns]'), 1514764800000000000)
>>> # PR
>>> maybe_promote(np.dtype('datetime64[ns]'), '2018-01-01')
(dtype('O'), '2018-01-01')
>>> # master
>>> maybe_promote(np.dtype('timedelta64[ns]'), '1 day')
(dtype('<m8[ns]'), 86400000000000)
>>> # PR
>>> maybe_promote(np.dtype('timedelta64[ns]'), '1 day')
(dtype('O'), '1 day')
```
* `iNaT` is considered a missing value from the POV of `maybe_promote_with_array` in all situations. This takes one single integer out of the usable `int64`-range, but I think this is much cleaner.
There's still a few issues with `lib.infer_dtype` (e.g. #23554, of which I already fixed the complex case #25382), most notably that it cannot infer `datetime64tz` yet. Actually, through this PR, I'm learning how broken that method is *as well*, but fixing that will have to wait for some other time. Among other things, it currently faceplants for `PeriodArray` / `IntervalArray` (#23553). I haven't added tests for these types here, but ~9000 tests is already better than nothing, I hope. ;-)
Another point that could/should be considered is how EAs should deal with this (#24246). | https://api.github.com/repos/pandas-dev/pandas/pulls/25425 | 2019-02-24T02:54:29Z | 2019-10-30T12:02:29Z | null | 2019-10-30T12:03:01Z |
Fixed regression of Multi index with NaN | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index d186fdfe0f322..a9ac687f5e2f5 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -199,6 +199,7 @@ Missing
- Fixed misleading exception message in :meth:`Series.missing` if argument ``order`` is required, but omitted (:issue:`10633`, :issue:`24014`).
- Fixed class type displayed in exception message in :meth:`DataFrame.dropna` if invalid ``axis`` parameter passed (:issue:`25555`)
+- Fixed MultiIndex bug copying values incorrectly when adding values to index, in case `NaN` is included in the index (:issue:`22247`)
-
MultiIndex
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 8cea529fbb07e..5b42697548dff 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -591,7 +591,7 @@ cdef class BaseMultiIndexCodesEngine:
level, then locating (the integer representation of) codes.
"""
def __init__(self, object levels, object labels,
- ndarray[uint64_t, ndim=1] offsets):
+ ndarray[uint64_t, ndim=1] offsets, hasnans):
"""
Parameters
----------
@@ -605,6 +605,7 @@ cdef class BaseMultiIndexCodesEngine:
self.levels = levels
self.offsets = offsets
+ self.hasnans = hasnans
# Transform labels in a single array, and add 1 so that we are working
# with positive integers (-1 for NaN becomes 0):
@@ -657,6 +658,14 @@ cdef class BaseMultiIndexCodesEngine:
indexer = indexer[order]
else:
indexer = self._base.get_indexer(self, lab_ints)
+ # HashTable return same value for 'NaN' and new value
+ # simple fix by take maximum value from array and plus once
+ len = indexer.size - 1
+ if len + 1 > 1 and self.hasnans:
+ check_dup = np.any(self._isin(indexer[0:len],
+ indexer[len:indexer.size]))
+ if check_dup and indexer[len]==-1:
+ indexer[len] = np.max(indexer) + 1
return indexer
@@ -673,8 +682,18 @@ cdef class BaseMultiIndexCodesEngine:
# Transform indices into single integer:
lab_int = self._codes_to_ints(np.array(indices, dtype='uint64'))
-
- return self._base.get_loc(self, lab_int)
+ ret = []
+ try:
+ ret = self._base.get_loc(self, lab_int)
+ except KeyError:
+ if self.hasnans:
+ # as NaN value, we have 0 bit represent for codes
+ # hacking here by add position of NaN in levels.
+ lab_int += len(self.levels[len(self.levels)-1])
+ ret = self._base.get_loc(self, np.uint64(lab_int))
+ else:
+ raise KeyError(lab_int)
+ return ret
def get_indexer_non_unique(self, object target):
# This needs to be overridden just because the default one works on
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 616c17cd16f9a..805b29ab57f53 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -17,7 +17,7 @@
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_categorical_dtype, is_hashable,
is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar,
- pandas_dtype)
+ is_string_dtype, pandas_dtype)
from pandas.core.dtypes.dtypes import ExtensionDtype, PandasExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
@@ -74,8 +74,30 @@ def _codes_to_ints(self, codes):
# Single key
return np.bitwise_or.reduce(codes)
+ codes = np.bitwise_or.reduce(codes, axis=1)
+ if codes.size > 1 and self.hasnans:
+ check_dup = np.any(algos.isin(codes[0:codes.size - 1],
+ codes[codes.size - 1:codes.size]))
+ if check_dup:
+ codes[codes.size - 1] = np.max(codes) + 1
+
# Multiple keys
- return np.bitwise_or.reduce(codes, axis=1)
+ return codes
+
+ def _isin(self, comps, values):
+ """
+ Compute the isin boolean array
+ Note just wraping algorithms.isin function to avoid fail of isort
+ Parameters
+ ----------
+ comps : array-like
+ values : array-like
+
+ Returns
+ -------
+ boolean array same length as comps
+ """
+ return algos.isin(comps, values)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine,
@@ -116,8 +138,32 @@ def _codes_to_ints(self, codes):
# Single key
return np.bitwise_or.reduce(codes)
+ codes = np.bitwise_or.reduce(codes, axis=1)
+ # Shift return same value for 'NaN' and new value
+ # simple fix by take maximum value from array and plus once
+ if codes.size > 1 and self.hasnans:
+ check_dup = np.any(algos.isin(codes[0:codes.size - 1],
+ codes[codes.size - 1:codes.size]))
+ if check_dup:
+ codes[codes.size - 1] = np.max(codes) + 1
+
# Multiple keys
- return np.bitwise_or.reduce(codes, axis=1)
+ return codes
+
+ def _isin(self, comps, values):
+ """
+ Compute the isin boolean array
+ Note just wraping algorithms.isin function to avoid fail of isort
+ Parameters
+ ----------
+ comps : array-like
+ values : array-like
+
+ Returns
+ -------
+ boolean array same length as comps
+ """
+ return algos.isin(comps, values)
class MultiIndex(Index):
@@ -208,6 +254,7 @@ class MultiIndex(Index):
_levels = FrozenList()
_codes = FrozenList()
_comparables = ['names']
+ _isna = False
rename = Index.set_names
# --------------------------------------------------------------------
@@ -702,6 +749,34 @@ def _set_codes(self, codes, level=None, copy=False, validate=True,
self._codes = new_codes
self._tuples = None
self._reset_cache()
+ self._hasnans()
+
+ def _hasnans(self):
+ """
+ Return if I have any nans
+ """
+ is_not_right_level = False
+ try:
+ self._verify_integrity()
+ except ValueError:
+ is_not_right_level = True
+
+ if is_not_right_level:
+ return
+
+ if (self.values.size > 0 and is_string_dtype(self.values)):
+ flat = []
+ # flatten tuple to 1-D array for searching 'NaN'
+ for row in self.values:
+ flat.extend(row)
+ # algorithms.isin can not pass test_has_duplicates_overflow
+ with warnings.catch_warnings():
+ warnings.simplefilter(action='ignore', category=FutureWarning)
+ try:
+ self._isna = np.array(np.where(
+ np.hstack(flat) == 'nan')).size > 0
+ except UnicodeDecodeError:
+ self._isna = False
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
@@ -1161,8 +1236,10 @@ def _engine(self):
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
- return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
- return MultiIndexUIntEngine(self.levels, self.codes, offsets)
+ return MultiIndexPyIntEngine(self.levels,
+ self.codes, offsets, self._isna)
+ return MultiIndexUIntEngine(self.levels,
+ self.codes, offsets, self._isna)
@property
def values(self):
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index cd4adfa96ef54..338a8b9b03753 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -127,3 +127,73 @@ def test_nan_stays_float():
assert pd.isna(df0.index.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()
+
+
+def test_nan_multi_index():
+ # GH 22247
+ # When using the MultiIndex features of pandas, when an `np.nan`
+ # is in the index when new values are added to the DF then the
+ # values are not `np.nan`, but copied from the `np.nan` row.
+ df = pd.DataFrame(
+ [
+ ['A', np.nan, 1.23, 4.56],
+ ['A', 'G', 1.23, 4.56],
+ ['A', 'D', 9.87, 10.54],
+ ],
+ columns=['pivot_0', 'pivot_1', 'col_1', 'col_2'],
+ )
+ df.set_index(['pivot_0', 'pivot_1'], inplace=True)
+ pivot_0 = 'A'
+ pivot_1_values = ['D', 'E', 'F']
+ for value in pivot_1_values:
+ if value not in df.index.get_level_values('pivot_1').tolist():
+ df.at[(pivot_0, value), 'col_2'] = 0.0
+
+ assert df.loc[('A', 'F')]['col_2'] == 0.0 # Pass
+ # Fails: value of 1.23 from the first row in the df is copied
+ # This behavior shows for all versions v0.23.x, however is fine for 0.22.0.
+ assert pd.isna(df.loc[('A', 'F')]['col_1'])
+
+
+def test_nan_set_value_multi_index():
+ # GH 22247
+ # When using the MultiIndex features of pandas, when an `np.nan`
+ # is in the index when new values are added to the DF then the
+ # values are not `np.nan`, but copied from the `np.nan` row.
+ df = pd.DataFrame(
+ [
+ ['A', 'G', 1.23, 4.56],
+ ['A', 'D', 9.87, 10.54],
+ ],
+ columns=['pivot_0', 'pivot_1', 'col_1', 'col_2'],
+ )
+ df.set_index(['pivot_0', 'pivot_1'], inplace=True)
+ df.at[('A', 'E'), 'col_2'] = 0.0
+ df.at[('A', 'F'), 'col_2'] = 0.0
+ # Fails: raise exception
+ # This behavior shows for all versions v0.23.x, however is fine for 0.22.0.
+ df.at[('A', np.nan), 'col_2'] = 0.0
+
+ assert df.loc[('A', np.nan)]['col_2'] == 0.0
+ assert pd.isna(df.loc[('A', np.nan)]['col_1'])
+
+
+def test_nan_sigle_index():
+ # GH 22247
+ df = pd.DataFrame(
+ [
+ [np.nan, 1.23, 4.56],
+ ['G', 1.23, 4.56],
+ ['D', 9.87, 10.54],
+ ],
+ columns=['pivot_0', 'col_1', 'col_2'],
+ )
+ df.set_index(['pivot_0'], inplace=True)
+
+ pivot_0_values = ['D', 'E', 'F']
+ for value in pivot_0_values:
+ if value not in df.index.get_level_values('pivot_0').tolist():
+ df.at[(value), 'col_2'] = 0.0
+
+ assert df.loc[('F')]['col_2'] == 0.0
+ assert pd.isna(df.loc[('F')]['col_1'])
| - [x] closes #22247
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
As pull #19074 changed compute of hashtable from `self.values` to `self.codes` and `self.levels`, NaN values doesn't cover.
I just simply fixed this regression the special case, by check return value from the hashtable. | https://api.github.com/repos/pandas-dev/pandas/pulls/25424 | 2019-02-24T02:16:12Z | 2019-04-16T20:10:13Z | null | 2019-04-16T20:10:13Z |
DOC: Rewriting of ParserError doc + minor spacing | diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 493ee65f63c6a..7d5a7f1a99e41 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -9,10 +9,10 @@
class PerformanceWarning(Warning):
"""
- Warning raised when there is a possible
- performance impact.
+ Warning raised when there is a possible performance impact.
"""
+
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
@@ -20,6 +20,7 @@ class UnsupportedFunctionCall(ValueError):
the object e.g. ``np.cumsum(groupby_object)``.
"""
+
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
@@ -31,9 +32,15 @@ class UnsortedIndexError(KeyError):
class ParserError(ValueError):
"""
- Exception that is raised by an error encountered in `pd.read_csv`.
+ Exception that is raised by an error encountered in parsing file contents.
+
+ This is a generic error raised for errors encountered when functions like
+ `read_csv` or `read_html` are parsing contents of a file.
- e.g. HTML Parsing will raise this error.
+ See Also
+ --------
+ read_csv : Read CSV (comma-separated) file into a DataFrame.
+ read_html : Read HTML table into a DataFrame.
"""
@@ -182,4 +189,4 @@ def __str__(self):
else:
name = self.class_instance.__class__.__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
- return (msg.format(methodtype=self.methodtype, name=name))
+ return msg.format(methodtype=self.methodtype, name=name)
| Follow-up to #25414.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25421 | 2019-02-23T19:57:33Z | 2019-02-24T03:27:13Z | 2019-02-24T03:27:13Z | 2019-02-24T03:27:17Z |
fixed geo accessor example in extending.rst | diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index e6928d9efde06..9e5034f6d3db0 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -33,8 +33,9 @@ decorate a class, providing the name of attribute to add. The class's
@staticmethod
def _validate(obj):
- if 'lat' not in obj.columns or 'lon' not in obj.columns:
- raise AttributeError("Must have 'lat' and 'lon'.")
+ # verify there is a column latitude and a column longitude
+ if 'latitude' not in obj.columns or 'longitude' not in obj.columns:
+ raise AttributeError("Must have 'latitude' and 'longitude'.")
@property
def center(self):
| The previous code was incompatible with the following paragraph. See the second block of code in https://pandas.pydata.org/pandas-docs/stable/development/extending.html#registering-custom-accessors
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
<details>
```python
import pandas as pd
import numpy as np
```
# Before
## Register the DataFrame accessor "geo"
```python
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor(object):
def __init__(self, pandas_obj):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj):
if 'lat' not in obj.columns or 'lon' not in obj.columns:
raise AttributeError("Must have 'lat' and 'lon'.")
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
```
## Test the DataFrame accessor "geo"
```python
ds = pd.DataFrame({'longitude': np.linspace(0, 10),
'latitude': np.linspace(0, 20)})
ds.geo.center
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-71f0445704bc> in <module>()
1 ds = pd.DataFrame({'longitude': np.linspace(0, 10),
2 'latitude': np.linspace(0, 20)})
----> 3 ds.geo.center
~\Anaconda3\lib\site-packages\pandas\core\generic.py in __getattr__(self, name)
5061 if (name in self._internal_names_set or name in self._metadata or
5062 name in self._accessors):
-> 5063 return object.__getattribute__(self, name)
5064 else:
5065 if self._info_axis._can_hold_identifiers_and_holds_name(name):
~\Anaconda3\lib\site-packages\pandas\core\accessor.py in __get__(self, obj, cls)
169 # we're accessing the attribute of the class, i.e., Dataset.geo
170 return self._accessor
--> 171 accessor_obj = self._accessor(obj)
172 # Replace the property with the accessor object. Inspired by:
173 # http://www.pydanny.com/cached-property.html
<ipython-input-2-114caa965d3b> in __init__(self, pandas_obj)
2 class GeoAccessor(object):
3 def __init__(self, pandas_obj):
----> 4 self._validate(pandas_obj)
5 self._obj = pandas_obj
6
<ipython-input-2-114caa965d3b> in _validate(obj)
8 def _validate(obj):
9 if 'lat' not in obj.columns or 'lon' not in obj.columns:
---> 10 raise AttributeError("Must have 'lat' and 'lon'.")
11
12 @property
AttributeError: Must have 'lat' and 'lon'.
# After
## Register the DataFrame accessor "geo"
```python
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor(object):
def __init__(self, pandas_obj):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj):
# verify each column name contains 'lat' or 'lon'
for col in obj.columns:
if 'lat' not in col and 'lon' not in col:
raise AttributeError("Must have 'lat' and 'lon'.")
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
```
## Test the DataFrame accessor "geo"
```python
ds = pd.DataFrame({'longitude': np.linspace(0, 10),
'latitude': np.linspace(0, 20)})
ds.geo.center
```
(5.0, 10.0)
</details>
| https://api.github.com/repos/pandas-dev/pandas/pulls/25420 | 2019-02-23T19:47:34Z | 2019-02-25T22:35:46Z | 2019-02-25T22:35:46Z | 2019-02-25T22:35:46Z |
ENH: Add Series.str.casefold | diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index a6ac40b5203bf..b406893e3414a 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -409,6 +409,7 @@ strings and apply several methods to it. These can be accessed like
:template: autosummary/accessor_method.rst
Series.str.capitalize
+ Series.str.casefold
Series.str.cat
Series.str.center
Series.str.contains
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index e4f60a761750d..6f21a7d9beb36 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -600,6 +600,7 @@ Method Summary
:meth:`~Series.str.partition`;Equivalent to ``str.partition``
:meth:`~Series.str.rpartition`;Equivalent to ``str.rpartition``
:meth:`~Series.str.lower`;Equivalent to ``str.lower``
+ :meth:`~Series.str.casefold`;Equivalent to ``str.casefold``
:meth:`~Series.str.upper`;Equivalent to ``str.upper``
:meth:`~Series.str.find`;Equivalent to ``str.find``
:meth:`~Series.str.rfind`;Equivalent to ``str.rfind``
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..b94a18d863a41 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -22,6 +22,7 @@ Other Enhancements
- Indexing of ``DataFrame`` and ``Series`` now accepts zerodim ``np.ndarray`` (:issue:`24919`)
- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`)
- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`)
+- ``Series.str`` has gained :meth:`Series.str.casefold` method to removes all case distinctions present in a string (:issue:`25405`)
- :meth:`DataFrame.set_index` now works for instances of ``abc.Iterator``, provided their output is of the same length as the calling frame (:issue:`22484`, :issue:`24984`)
- :meth:`DatetimeIndex.union` now supports the ``sort`` argument. The behaviour of the sort parameter matches that of :meth:`Index.union` (:issue:`24994`)
-
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index cc7a4db515c42..9577b07360f65 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -2926,7 +2926,7 @@ def rindex(self, sub, start=0, end=None):
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
-
+ %(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
@@ -2943,6 +2943,7 @@ def rindex(self, sub, start=0, end=None):
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
+ Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
@@ -2989,12 +2990,15 @@ def rindex(self, sub, start=0, end=None):
3 sWaPcAsE
dtype: object
""")
- _shared_docs['lower'] = dict(type='lowercase', method='lower')
- _shared_docs['upper'] = dict(type='uppercase', method='upper')
- _shared_docs['title'] = dict(type='titlecase', method='title')
+ _shared_docs['lower'] = dict(type='lowercase', method='lower', version='')
+ _shared_docs['upper'] = dict(type='uppercase', method='upper', version='')
+ _shared_docs['title'] = dict(type='titlecase', method='title', version='')
_shared_docs['capitalize'] = dict(type='be capitalized',
- method='capitalize')
- _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
+ method='capitalize', version='')
+ _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase',
+ version='')
+ _shared_docs['casefold'] = dict(type='be casefolded', method='casefold',
+ version='\n .. versionadded:: 0.25.0\n')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
@@ -3010,6 +3014,9 @@ def rindex(self, sub, start=0, end=None):
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
+ casefold = _noarg_wrapper(lambda x: x.casefold(),
+ docstring=_shared_docs['casemethods'] %
+ _shared_docs['casefold'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string are %(type)s.
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 7cea3be03d1a7..1ecfedc8685da 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -76,7 +76,7 @@ def assert_series_or_index_equal(left, right):
'len', 'lower', 'lstrip', 'partition',
'rpartition', 'rsplit', 'rstrip',
'slice', 'slice_replace', 'split',
- 'strip', 'swapcase', 'title', 'upper'
+ 'strip', 'swapcase', 'title', 'upper', 'casefold'
], [()] * 100, [{}] * 100))
ids, _, _ = zip(*_any_string_method) # use method name as fixture-id
@@ -3424,3 +3424,12 @@ def test_method_on_bytes(self):
expected = Series(np.array(
['ad', 'be', 'cf'], 'S2').astype(object))
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.skipif(compat.PY2, reason='not in python2')
+ def test_casefold(self):
+ # GH25405
+ expected = Series(['ss', NA, 'case', 'ssd'])
+ s = Series(['ß', NA, 'case', 'ßd'])
+ result = s.str.casefold()
+
+ tm.assert_series_equal(result, expected)
| - [x] closes #25405
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25419 | 2019-02-23T19:41:19Z | 2019-02-28T21:21:40Z | 2019-02-28T21:21:39Z | 2019-02-28T21:21:48Z |
Correct a typo of version number for interpolate() | diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index a462f01dcd14f..7883814e91c94 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -335,7 +335,7 @@ examined :ref:`in the API <api.dataframe.missing>`.
Interpolation
~~~~~~~~~~~~~
-.. versionadded:: 0.21.0
+.. versionadded:: 0.23.0
The ``limit_area`` keyword argument was added.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3647565123523..eb84a9a5810f4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6648,7 +6648,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
- .. versionadded:: 0.21.0
+ .. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
| Correct a typo of version number in the docstring of _shared_docs['interpolat e'] which is the docstring for pandas.core.resample.Resampler.interpolate, pandas.DataFrame.interpolate, panda s.Series.interpolate, and pandas.Panel.interpolate, and in documentation/user_guide/missing_data about the limit_area keyword argument in interpolate(). The reference can be found at https://github.com/pandas-dev/pandas/issue s/8000#issuecomment-465802842.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25418 | 2019-02-23T00:12:57Z | 2019-02-23T00:16:37Z | 2019-02-23T00:16:37Z | 2019-02-23T03:18:57Z |
DEP: add pytest-mock to environment.yml | diff --git a/environment.yml b/environment.yml
index 47fe8e4c2a640..ce68dccca0c07 100644
--- a/environment.yml
+++ b/environment.yml
@@ -20,6 +20,7 @@ dependencies:
- isort
- moto
- pytest>=4.0
+ - pytest-mock
- sphinx
- numpydoc
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 76aaeefa648f4..22c01ebcef7f0 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -11,6 +11,7 @@ hypothesis>=3.82
isort
moto
pytest>=4.0
+pytest-mock
sphinx
numpydoc
beautifulsoup4>=4.2.1
| Ran into errors when trying to run the test suite in a freshly installed conda environment. There's an error being caused by the fact that #25346 added `pytest-mock` as a dependency in all the CI jobs (to be able to use the `mocker` fixture), but didn't adapt `environment.yml`. This PR fixes that.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25417 | 2019-02-23T00:07:04Z | 2019-02-23T18:36:38Z | 2019-02-23T18:36:38Z | 2019-03-07T17:33:35Z |
EA: BoolArray | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 4088697fa6f5f..ad0f15117b78b 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -149,12 +149,33 @@ def all_arithmetic_operators(request):
return request.param
-_all_numeric_reductions = ['sum', 'max', 'min',
- 'mean', 'prod', 'std', 'var', 'median',
- 'kurt', 'skew']
+# reductions that are generally applicable to all data types
+_non_numeric_reductions = ['min', 'max', 'sum']
+# reductions that are generally application to
+# only numeric data dtypes
+_numeric_reductions = ['mean', 'prod',
+ 'std', 'var', 'median',
+ 'kurt', 'skew']
-@pytest.fixture(params=_all_numeric_reductions)
+
+@pytest.fixture(params=_non_numeric_reductions)
+def only_non_numeric_reductions(request):
+ """
+ Fixture for only non numeric reduction names
+ """
+ return request.param
+
+
+@pytest.fixture(params=_numeric_reductions)
+def only_numeric_reductions(request):
+ """
+ Fixture for only numeric reduction names
+ """
+ return request.param
+
+
+@pytest.fixture(params=_non_numeric_reductions + _numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 29c146cb55a23..9ad65687712ce 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -19,6 +19,7 @@
from pandas.core import nanops
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
+from pandas.core.arrays.mask import get_mask_array_type
from pandas.core.tools.numeric import to_numeric
@@ -287,7 +288,7 @@ def __init__(self, values, mask, copy=False):
and is_integer_dtype(values.dtype)):
raise TypeError("values should be integer numpy array. Use "
"the 'integer_array' function instead")
- if not (isinstance(mask, np.ndarray) and is_bool_dtype(mask.dtype)):
+ if not is_bool_dtype(mask):
raise TypeError("mask should be boolean numpy array. Use "
"the 'integer_array' function instead")
@@ -296,7 +297,7 @@ def __init__(self, values, mask, copy=False):
mask = mask.copy()
self._data = values
- self._mask = mask
+ self._mask = get_mask_array_type()._from_sequence(mask, copy=False)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
@@ -332,7 +333,8 @@ def _coerce_to_ndarray(self):
# TODO(jreback) make this better
data = self._data.astype(object)
- data[self._mask] = self._na_value
+ mask = np.array(self._mask, copy=False)
+ data[mask] = self._na_value
return data
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
@@ -407,6 +409,11 @@ def nbytes(self):
def isna(self):
return self._mask
+ @property
+ def flags(self):
+ # compat
+ return self._data.flags
+
@property
def _na_value(self):
return np.nan
@@ -559,6 +566,7 @@ def cmp_method(self, other):
else:
mask = self._mask | mask
+ mask = np.array(mask, copy=False)
result[mask] = op_name == 'ne'
return result
diff --git a/pandas/core/arrays/mask/__init__.py b/pandas/core/arrays/mask/__init__.py
new file mode 100644
index 0000000000000..a417679944bc6
--- /dev/null
+++ b/pandas/core/arrays/mask/__init__.py
@@ -0,0 +1,30 @@
+_MaskArrayType = None
+
+
+def get_mask_array_type():
+ """Set the mask array type to use, we need to do
+ this after all modules are imported as the implementations
+ e.g. pyarrow depend on pandas being importable
+ """
+ global _MaskArrayType
+
+ if _MaskArrayType is not None:
+ return _MaskArrayType
+
+ # if ArrowBoolArray is available use it
+ # otherwise use the NumpyMask
+ try:
+ from pandas.core.arrays.mask._pyarrow import ArrowMaskArray
+
+ MaskArray = ArrowMaskArray
+
+ except ImportError:
+ from pandas.core.arrays.mask._numpy import NumpyMaskArray
+
+ MaskArray = NumpyMaskArray
+
+ _MaskArrayType = MaskArray
+ return _MaskArrayType
+
+
+__all__ = ['get_mask_array_type']
diff --git a/pandas/core/arrays/mask/_base.py b/pandas/core/arrays/mask/_base.py
new file mode 100644
index 0000000000000..0bb72d2ab951e
--- /dev/null
+++ b/pandas/core/arrays/mask/_base.py
@@ -0,0 +1,140 @@
+"""A boolean mask interface.
+
+This module provides an interface to a numpy / pyarrow boolean mask.
+This is limited as not all of the implementations can hold NA, so
+for consistency this is an internal.
+"""
+
+import copy
+
+import numpy as np
+
+from pandas.api.extensions import ExtensionDtype
+from pandas.api.types import is_scalar
+from pandas.core.arrays.base import ExtensionArray
+from pandas.core.missing import isna
+
+
+class MaskDtype(ExtensionDtype):
+
+ type = np.bool_
+ kind = 'b'
+ name = 'bool'
+
+ @classmethod
+ def construct_from_string(cls, string):
+ if string == cls.name:
+ return cls()
+ else:
+ raise TypeError("Cannot construct a '{}' from "
+ "'{}'".format(cls, string))
+
+ def _is_boolean(self):
+ return True
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ # compare == to np.dtype('bool')
+ if isinstance(other, str):
+ return other == self.name
+ elif isinstance(other, type(self)):
+ return True
+ elif isinstance(other, np.dtype):
+ return other == 'bool'
+ else:
+ return hash(self) == hash(other)
+
+
+class MaskArray(ExtensionArray):
+ """Common baseclass for both pyarrow and numpy masked arrays"""
+ _typ = "maskarray"
+
+ @classmethod
+ def _from_sequence(cls, scalars, dtype=None, copy=False):
+ return cls.from_scalars(scalars)
+
+ @property
+ def size(self):
+ return len(self)
+
+ def __eq__(self, other):
+ return np.array(self, copy=False) == np.array(other, copy=False)
+
+ def __len__(self):
+ return len(self._data)
+
+ def isna(self):
+ nas = isna(np.array(self._data, copy=False))
+ return type(self).from_scalars(nas)
+
+ def __invert__(self):
+ return type(self).from_scalars(
+ ~np.array(self._data, copy=False)
+ )
+
+ def __or__(self, other):
+ return type(self).from_scalars(np.array(
+ self, copy=False).__or__(np.array(other, copy=False)))
+
+ def __ior__(self, other):
+ return type(self).from_scalars(
+ np.array(self, copy=False) | np.array(other, copy=False))
+
+ def __and__(self, other):
+ return type(self).from_scalars(
+ np.array(self, copy=False).__and__(np.array(other, copy=False)))
+
+ def __iand__(self, other):
+ return type(self).from_scalars(
+ np.array(self, copy=False) & (np.array(other, copy=False)))
+
+ def __getitem__(self, item):
+ arr = np.array(self, copy=False)
+ if is_scalar(item):
+ return arr[item]
+ else:
+ arr = arr[item]
+ return type(self).from_scalars(arr)
+
+ def view(self, dtype=None):
+ arr = np.array(self._data, copy=False)
+ if dtype is not None:
+ arr = arr.view(dtype=dtype)
+ return arr
+
+ def sum(self, axis=None, min_count=None):
+ return np.array(self, copy=False).sum()
+
+ def copy(self, deep=False):
+ if deep:
+ return type(self)(copy.deepcopy(self._data))
+ else:
+ return type(self)(copy.copy(self._data))
+
+ def any(self, axis=0, out=None):
+ return np.array(self._data, copy=False).any()
+
+ def all(self, axis=0, out=None):
+ return np.array(self._data, copy=False).all()
+
+ def min(self, axis=0, out=None):
+ return np.array(self._data, copy=False).min()
+
+ def max(self, axis=0, out=None):
+ return np.array(self._data, copy=False).max()
+
+ def _reduce(self, method, skipna=True, **kwargs):
+ if skipna:
+ arr = self[~self.isna()]
+ else:
+ arr = self
+ # we only allow explicity defined methods
+ # ndarrays actually support: mean, var, prod, min, max
+ try:
+ op = getattr(arr, method)
+ return op()
+ except AttributeError:
+ pass
+ raise TypeError
diff --git a/pandas/core/arrays/mask/_numpy.py b/pandas/core/arrays/mask/_numpy.py
new file mode 100644
index 0000000000000..e59f1f050ee5d
--- /dev/null
+++ b/pandas/core/arrays/mask/_numpy.py
@@ -0,0 +1,82 @@
+"""
+This module provide a numpy-boolean boolean array
+"""
+
+import numpy as np
+
+from pandas.api.extensions import take
+from pandas.core.arrays.mask._base import MaskArray, MaskDtype
+
+
+class NumpyMaskDtype(MaskDtype):
+
+ na_value = np.nan
+
+ @classmethod
+ def construct_array_type(cls):
+ return NumpyMaskArray
+
+
+class NumpyMaskArray(MaskArray):
+ """Generic class which can be used to represent missing data.
+ """
+
+ dtype = NumpyMaskDtype()
+
+ @classmethod
+ def from_scalars(cls, values):
+ arr = np.asarray(values).astype(np.bool_, copy=False)
+ return cls(arr, copy=False)
+
+ def __init__(self, mask, copy=True):
+ """
+ Parameters
+ ----------
+ mask : numpy array
+ Mask of missing values.
+ """
+ assert isinstance(mask, np.ndarray)
+ assert mask.dtype == np.bool_
+
+ if copy:
+ mask = mask.copy()
+ self._data = mask
+
+ def __setitem__(self, key, value):
+ self._data[key] = value
+
+ def __array__(self, dtype=None):
+ return self._data
+
+ def __iter__(self):
+ return iter(self._data)
+
+ @property
+ def nbytes(self):
+ return self._data.nbytes
+
+ def reshape(self, shape, **kwargs):
+ return np.array(self, copy=False).reshape(shape, **kwargs)
+
+ def astype(self, dtype, copy=True):
+ # needed to fix this astype for the Series constructor.
+ if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
+ if copy:
+ return self.copy()
+ return self
+ return super(NumpyMaskArray, self).astype(dtype, copy)
+
+ def take(self, indices, allow_fill=False, fill_value=None, axis=None):
+ # TODO: had to add axis here
+ data = self._data
+
+ if allow_fill and fill_value is None:
+ fill_value = self.dtype.na_value
+
+ result = take(data, indices, fill_value=fill_value,
+ allow_fill=allow_fill)
+ return self._from_sequence(result, dtype=self.dtype)
+
+ def _concat_same_type(cls, to_concat):
+ concat = np.concatenate(to_concat)
+ return cls.from_scalars(concat)
diff --git a/pandas/core/arrays/mask/_pyarrow.py b/pandas/core/arrays/mask/_pyarrow.py
new file mode 100644
index 0000000000000..c9b53d56108c8
--- /dev/null
+++ b/pandas/core/arrays/mask/_pyarrow.py
@@ -0,0 +1,96 @@
+"""Rudimentary Apache Arrow-backed ExtensionArray.
+
+At the moment, just a boolean array / type is implemented.
+Eventually, we'll want to parametrize the type and support
+multiple dtypes. Not all methods are implemented yet, and the
+current implementation is not efficient.
+"""
+from distutils.version import LooseVersion
+import itertools
+
+import numpy as np
+
+from pandas.api.extensions import take
+from pandas.core.arrays.mask._base import MaskArray, MaskDtype
+
+# we require pyarrow >= 0.10.0
+
+try:
+ import pyarrow as pa
+ if pa.__version__ < LooseVersion('0.10.0'):
+ raise ImportError("pyarrow minimum for bool suppport is 0.10.0")
+except ImportError:
+ raise
+
+
+class ArrowMaskDtype(MaskDtype):
+
+ na_value = pa.NULL
+
+ @classmethod
+ def construct_array_type(cls):
+ return ArrowMaskArray
+
+
+class ArrowMaskArray(MaskArray):
+
+ dtype = ArrowMaskDtype()
+
+ @classmethod
+ def from_scalars(cls, values):
+ values = np.asarray(values).astype(np.bool_, copy=False)
+ arr = pa.chunked_array([values])
+ return cls(arr)
+
+ def __init__(self, values, copy=False):
+
+ # TODO: we need to rationalize the return types from
+ # various ops, we oftentimes return boolean array arrays
+ # but not chunked ones
+ if not isinstance(values, pa.ChunkedArray):
+ values = pa.chunked_array([values])
+ assert values.type == pa.bool_()
+ if copy:
+ values = values.copy()
+
+ self._data = values
+
+ def __setitem__(self, key, value):
+ # TODO: hack-a-minute
+ data = np.array(self._data)
+ data[key] = value
+ self._data = pa.array(data)
+
+ def astype(self, dtype, copy=True):
+ # needed to fix this astype for the Series constructor.
+ if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
+ if copy:
+ return self.copy()
+ return self
+ return super(ArrowMaskArray, self).astype(dtype, copy)
+
+ @property
+ def nbytes(self):
+ return sum(x.size for chunk in self._data.chunks
+ for x in chunk.buffers()
+ if x is not None)
+
+ def take(self, indices, allow_fill=False, fill_value=None, axis=None):
+ # TODO: had to add axis here
+ data = self._data.to_pandas()
+
+ if allow_fill and fill_value is None:
+ fill_value = self.dtype.na_value
+
+ result = take(data, indices, fill_value=fill_value,
+ allow_fill=allow_fill)
+ return self._from_sequence(result, dtype=self.dtype)
+
+ def _concat_same_type(cls, to_concat):
+ chunks = list(itertools.chain.from_iterable(x._data.chunks
+ for x in to_concat))
+ arr = pa.chunked_array(chunks)
+ return cls(arr)
+
+ def __array__(self, dtype=None):
+ return np.array(self._data, copy=False)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e62a2119df820..ec45b521c9dd4 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -19,7 +19,8 @@
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer)
-from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCIndex, ABCIndexClass, ABCMaskArray, ABCSeries)
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
@@ -115,7 +116,7 @@ def is_bool_indexer(key: Any) -> bool:
and contains missing values.
"""
na_msg = 'cannot index with vector containing NA / NaN values'
- if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or
+ if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex, ABCMaskArray)) or
(is_array_like(key) and is_extension_array_dtype(key.dtype))):
if key.dtype == np.object_:
key = np.asarray(values_from_object(key))
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 134ec95729833..aca82ed621540 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -73,6 +73,9 @@ def _check(cls, inst):
ABCPandasArray = create_pandas_abc_type("ABCPandasArray",
"_typ",
("npy_extension",))
+ABCMaskArray = create_pandas_abc_type("ABCMaskArray",
+ "_typ",
+ ("maskarray"))
class _ABCGeneric(type):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index bd8a8852964e3..a7a46048acb4a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -785,8 +785,12 @@ def _try_cast(self, result, obj, numeric_only=False):
elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
+
+ # return the same type (Series) as our caller
try:
- result = obj._values._from_sequence(result, dtype=dtype)
+ result = result._constructor(
+ obj._values._from_sequence(result, dtype=dtype),
+ index=result.index, name=result.name)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
@@ -1277,6 +1281,16 @@ def f(self, **kwargs):
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
+
+ # coerce the columns if we can
+ if isinstance(result, DataFrame):
+ for col in result.columns:
+ result[col] = self._try_cast(
+ result[col], self.obj[col])
+ else:
+ result = self._try_cast(
+ result, self.obj)
+
if _convert:
result = result._convert(datetime=True)
return result
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 68d4e746f72ad..f262722d0641b 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -11,7 +11,8 @@
from pandas.core.dtypes.common import (
ensure_platform_int, is_float, is_integer, is_integer_dtype, is_iterator,
is_list_like, is_numeric_dtype, is_scalar, is_sequence, is_sparse)
-from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCDataFrame, ABCMaskArray, ABCPanel, ABCSeries)
from pandas.core.dtypes.missing import _infer_fill_value, isna
import pandas.core.common as com
@@ -2494,6 +2495,8 @@ def check_bool_indexer(ax, key):
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
+ elif isinstance(result, ABCMaskArray):
+ result = np.array(result, copy=False)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 662fe6e3ecb37..038c982749f5f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1783,6 +1783,19 @@ def _slice(self, slicer):
return self.values[slicer]
+ def _try_cast_result(self, result, dtype=None):
+ """
+ if we have an operation that operates on for example floats
+ we want to try to cast back to our EA here if possible
+ """
+ try:
+ result = self._holder._from_sequence(
+ result.ravel(), dtype=dtype)
+ except Exception:
+ pass
+
+ return result
+
def formatting_values(self):
# Deprecating the ability to override _formatting_values.
# Do the warning here, it's only user in pandas, since we
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 95516aec060b7..c2b2e9deac389 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -88,11 +88,12 @@ def _f(*args, **kwargs):
class bottleneck_switch:
- def __init__(self, **kwargs):
+ def __init__(self, name=None, **kwargs):
+ self.name = name
self.kwargs = kwargs
def __call__(self, alt):
- bn_name = alt.__name__
+ bn_name = self.name or alt.__name__
try:
bn_func = getattr(bn, bn_name)
@@ -724,7 +725,8 @@ def nansem(values, axis=None, skipna=True, ddof=1, mask=None):
def _nanminmax(meth, fill_value_typ):
- @bottleneck_switch()
+
+ @bottleneck_switch(name='nan' + meth)
def reduction(values, axis=None, skipna=True, mask=None):
values, mask, dtype, dtype_max, fill_value = _get_values(
@@ -744,7 +746,6 @@ def reduction(values, axis=None, skipna=True, mask=None):
result = _wrap_results(result, dtype, fill_value)
return _maybe_null_out(result, axis, mask)
- reduction.__name__ = 'nan' + meth
return reduction
diff --git a/pandas/tests/extension/arrow/__init__.py b/pandas/tests/arrays/mask/__init__.py
similarity index 100%
rename from pandas/tests/extension/arrow/__init__.py
rename to pandas/tests/arrays/mask/__init__.py
diff --git a/pandas/tests/arrays/mask/test_mask.py b/pandas/tests/arrays/mask/test_mask.py
new file mode 100644
index 0000000000000..ae7ec1d8b7a3d
--- /dev/null
+++ b/pandas/tests/arrays/mask/test_mask.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+import pytest
+
+from pandas.util import testing as tm
+
+
+@pytest.fixture(params=['numpy', 'arrow', 'mask'])
+def mask_dtype(request):
+ """ dtype type """
+ if request.param == 'numpy':
+ from pandas.core.arrays.mask._numpy import NumpyMaskDtype
+ return NumpyMaskDtype
+ elif request.param == 'arrow':
+ pytest.importorskip('pyarrow', minversion="0.10.0")
+ from pandas.core.arrays.mask._pyarrow import ArrowMaskDtype
+ return ArrowMaskDtype
+ elif request.param == 'mask':
+ from pandas.core.arrays.mask import get_mask_array_type
+ return type(get_mask_array_type().dtype)
+
+
+@pytest.fixture
+def mask_type(mask_dtype):
+ """ array type """
+ return mask_dtype.construct_array_type()
+
+
+@pytest.fixture
+def mask(mask_type):
+ """ array object """
+ return mask_type._from_sequence([1, 0, 1])
+
+
+def test_construction(mask_type):
+ expected = np.array([1, 0, 1], dtype=bool)
+
+ # list
+ result = np.array(mask_type._from_sequence([1, 0, 1]))
+ tm.assert_numpy_array_equal(result, expected)
+
+ # array
+ result = np.array(mask_type._from_sequence(np.array([1, 0, 1])))
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = np.array(mask_type._from_sequence(
+ np.array([1, 0, 1], dtype=bool)))
+ tm.assert_numpy_array_equal(result, expected)
+
+
+def test_str(mask):
+
+ result = repr(mask)
+ expected = '<{}>\n[True, False, True]\nLength: 3, dtype: {}'.format(
+ mask.__class__.__name__, mask.dtype)
+ assert result == expected
+
+
+def test_indexing(mask):
+
+ # getitem
+ assert mask[0]
+ assert not mask[1]
+ assert mask[2]
+
+ # slice
+ assert (mask[:] == mask).all()
+ assert (mask[[0, 1]] == mask._from_sequence([1, 0])).all()
+
+ # setitem
+ mask[0] = False
+ assert not mask[0]
+ mask[[0, 1]] = [1, 1]
+ assert mask.all()
+
+
+def test_ops(mask):
+
+ mask2 = mask._from_sequence([0, 0, 0])
+ assert not mask.all()
+ assert mask.any()
+ assert (mask2 | mask == mask).all()
+ assert (mask2 & mask == mask2).any()
+
+ assert (~mask2).all()
+
+ # inplace
+ mask2 |= mask
+ assert (mask2 == mask._from_sequence([1, 0, 1])).all()
+
+ mask2 &= np.array([0, 0, 0], dtype=bool)
+ assert (mask2 == mask._from_sequence([0, 0, 0])).all()
+
+
+def test_functions(mask):
+
+ assert mask.sum() == 2
+
+ mask2 = mask.copy()
+ assert mask2 is not mask
+ assert (mask2 == mask).all()
+
+ assert mask.size == len(mask)
+
+
+def test_dtype(mask_dtype):
+ m = mask_dtype()
+ assert m == m
+ assert m == mask_dtype()
+ assert hash(m) is not None
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 4512e98ebe0cf..c58c6d615b496 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -167,6 +167,7 @@ def _check_op(self, s, op_name, other, exc=None):
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisions that are resulting in float dtypes
+ mask |= (expected == np.inf) | (expected == -np.inf)
expected[mask] = np.nan
tm.assert_series_equal(result, expected)
@@ -341,7 +342,8 @@ def _compare_other(self, data, op_name, other):
# fill the nan locations
expected[data._mask] = op_name == '__ne__'
- tm.assert_series_equal(result, expected)
+ # TODO: remove check_dtype
+ tm.assert_series_equal(result, expected, check_dtype=False)
# series
s = pd.Series(data)
@@ -353,7 +355,8 @@ def _compare_other(self, data, op_name, other):
# fill the nan locations
expected[data._mask] = op_name == '__ne__'
- tm.assert_series_equal(result, expected)
+ # TODO: remove check_dtype
+ tm.assert_series_equal(result, expected, check_dtype=False)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
@@ -553,13 +556,15 @@ def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype='int64')
mask = np.array([False, False, False, True], dtype='bool')
+ # TODO: need to construct an equiv mask here
+ # for a pa.bool_ dtype
result = IntegerArray(values, mask)
assert result._data is values
- assert result._mask is mask
+ assert (result._mask == mask).all()
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
- assert result._mask is not mask
+ assert (result._mask == mask).all()
@pytest.mark.parametrize(
@@ -691,7 +696,7 @@ def test_reduce_to_float(op):
expected = pd.DataFrame({
"B": np.array([1.0, 3.0]),
- "C": integer_array([1, 3], dtype="Int64")
+ "C": np.array([1.0, 3.0]),
}, index=pd.Index(['a', 'b'], name='A'))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 3634b59047f76..a9bd15a6722df 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -14,6 +14,7 @@
from pandas.conftest import (
ALL_EA_INT_DTYPES, ALL_INT_DTYPES, SIGNED_EA_INT_DTYPES, SIGNED_INT_DTYPES,
UNSIGNED_EA_INT_DTYPES, UNSIGNED_INT_DTYPES)
+from pandas.core.arrays.mask import get_mask_array_type
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@@ -526,6 +527,8 @@ def test_is_bool_dtype():
assert com.is_bool_dtype(np.bool)
assert com.is_bool_dtype(np.array([True, False]))
assert com.is_bool_dtype(pd.Index([True, False]))
+ assert com.is_bool_dtype(
+ get_mask_array_type()._from_sequence([True, False]))
@pytest.mark.parametrize("check_scipy", [
diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py
deleted file mode 100644
index 025c4cacd8fa1..0000000000000
--- a/pandas/tests/extension/arrow/bool.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Rudimentary Apache Arrow-backed ExtensionArray.
-
-At the moment, just a boolean array / type is implemented.
-Eventually, we'll want to parametrize the type and support
-multiple dtypes. Not all methods are implemented yet, and the
-current implementation is not efficient.
-"""
-import copy
-import itertools
-
-import numpy as np
-import pyarrow as pa
-
-import pandas as pd
-from pandas.api.extensions import (
- ExtensionArray, ExtensionDtype, register_extension_dtype, take)
-
-
-@register_extension_dtype
-class ArrowBoolDtype(ExtensionDtype):
-
- type = np.bool_
- kind = 'b'
- name = 'arrow_bool'
- na_value = pa.NULL
-
- @classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError("Cannot construct a '{}' from "
- "'{}'".format(cls, string))
-
- @classmethod
- def construct_array_type(cls):
- return ArrowBoolArray
-
- def _is_boolean(self):
- return True
-
-
-class ArrowBoolArray(ExtensionArray):
- def __init__(self, values):
- if not isinstance(values, pa.ChunkedArray):
- raise ValueError
-
- assert values.type == pa.bool_()
- self._data = values
- self._dtype = ArrowBoolDtype()
-
- def __repr__(self):
- return "ArrowBoolArray({})".format(repr(self._data))
-
- @classmethod
- def from_scalars(cls, values):
- arr = pa.chunked_array([pa.array(np.asarray(values))])
- return cls(arr)
-
- @classmethod
- def from_array(cls, arr):
- assert isinstance(arr, pa.Array)
- return cls(pa.chunked_array([arr]))
-
- @classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
- return cls.from_scalars(scalars)
-
- def __getitem__(self, item):
- if pd.api.types.is_scalar(item):
- return self._data.to_pandas()[item]
- else:
- vals = self._data.to_pandas()[item]
- return type(self).from_scalars(vals)
-
- def __len__(self):
- return len(self._data)
-
- def astype(self, dtype, copy=True):
- # needed to fix this astype for the Series constructor.
- if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
- if copy:
- return self.copy()
- return self
- return super(ArrowBoolArray, self).astype(dtype, copy)
-
- @property
- def dtype(self):
- return self._dtype
-
- @property
- def nbytes(self):
- return sum(x.size for chunk in self._data.chunks
- for x in chunk.buffers()
- if x is not None)
-
- def isna(self):
- nas = pd.isna(self._data.to_pandas())
- return type(self).from_scalars(nas)
-
- def take(self, indices, allow_fill=False, fill_value=None):
- data = self._data.to_pandas()
-
- if allow_fill and fill_value is None:
- fill_value = self.dtype.na_value
-
- result = take(data, indices, fill_value=fill_value,
- allow_fill=allow_fill)
- return self._from_sequence(result, dtype=self.dtype)
-
- def copy(self, deep=False):
- if deep:
- return type(self)(copy.deepcopy(self._data))
- else:
- return type(self)(copy.copy(self._data))
-
- def _concat_same_type(cls, to_concat):
- chunks = list(itertools.chain.from_iterable(x._data.chunks
- for x in to_concat))
- arr = pa.chunked_array(chunks)
- return cls(arr)
-
- def __invert__(self):
- return type(self).from_scalars(
- ~self._data.to_pandas()
- )
-
- def _reduce(self, method, skipna=True, **kwargs):
- if skipna:
- arr = self[~self.isna()]
- else:
- arr = self
-
- try:
- op = getattr(arr, method)
- except AttributeError:
- raise TypeError
- return op(**kwargs)
-
- def any(self, axis=0, out=None):
- return self._data.to_pandas().any()
-
- def all(self, axis=0, out=None):
- return self._data.to_pandas().all()
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
deleted file mode 100644
index 2aece66d94150..0000000000000
--- a/pandas/tests/extension/arrow/test_bool.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas.tests.extension import base
-import pandas.util.testing as tm
-
-pytest.importorskip('pyarrow', minversion="0.10.0")
-
-from .bool import ArrowBoolArray, ArrowBoolDtype # isort:skip
-
-
-@pytest.fixture
-def dtype():
- return ArrowBoolDtype()
-
-
-@pytest.fixture
-def data():
- return ArrowBoolArray.from_scalars(np.random.randint(0, 2, size=100,
- dtype=bool))
-
-
-@pytest.fixture
-def data_missing():
- return ArrowBoolArray.from_scalars([None, True])
-
-
-class BaseArrowTests:
- pass
-
-
-class TestDtype(BaseArrowTests, base.BaseDtypeTests):
- def test_array_type_with_arg(self, data, dtype):
- pytest.skip("GH-22666")
-
-
-class TestInterface(BaseArrowTests, base.BaseInterfaceTests):
- def test_repr(self, data):
- raise pytest.skip("TODO")
-
-
-class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
- def test_from_dtype(self, data):
- pytest.skip("GH-22666")
-
- # seems like some bug in isna on empty BoolArray returning floats.
- @pytest.mark.xfail(reason='bad is-na for empty data')
- def test_from_sequence_from_cls(self, data):
- super(TestConstructors, self).test_from_sequence_from_cls(data)
-
-
-class TestReduce(base.BaseNoReduceTests):
- def test_reduce_series_boolean(self):
- pass
-
-
-class TestReduceBoolean(base.BaseBooleanReduceTests):
- pass
-
-
-def test_is_bool_dtype(data):
- assert pd.api.types.is_bool_dtype(data)
- assert pd.core.common.is_bool_indexer(data)
- s = pd.Series(range(len(data)))
- result = s[data]
- expected = s[np.asarray(data)]
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 6388902e45627..786e9df8eee55 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -59,8 +59,10 @@ def test_isna_extension_array(self, data_missing):
# _reduce. At the *very* least, you must implement any and all
na = data_missing.isna()
if is_extension_array_dtype(na):
- assert na._reduce('any')
- assert na.any()
+
+ # TODO: .isna() can actuall be all False
+ assert na._reduce('any') in [True, False]
+ assert na.any() in [True, False]
assert not na._reduce('all')
assert not na.all()
diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index c4b70f2013265..a9fe21357a135 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -18,6 +18,12 @@ def check_reduce(self, s, op_name, skipna):
expected = getattr(s.astype('float64'), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
+ def check_reduce_bool(self, s, op_name, skipna):
+ """check_reduce with casting back to bool"""
+ result = getattr(s, op_name)(skipna=skipna)
+ expected = bool(getattr(s.astype('float64'), op_name)(skipna=skipna))
+ tm.assert_almost_equal(result, expected)
+
class BaseNoReduceTests(BaseReduceTests):
""" we don't define any reductions """
diff --git a/pandas/tests/extension/mask/__init__.py b/pandas/tests/extension/mask/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/extension/mask/test_numpy_bool.py b/pandas/tests/extension/mask/test_numpy_bool.py
new file mode 100644
index 0000000000000..330bd7c37179d
--- /dev/null
+++ b/pandas/tests/extension/mask/test_numpy_bool.py
@@ -0,0 +1,85 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas.core.arrays.mask._numpy import NumpyMaskArray, NumpyMaskDtype
+from pandas.tests.extension import base
+import pandas.util.testing as tm
+
+
+@pytest.fixture
+def dtype():
+ return NumpyMaskDtype()
+
+
+@pytest.fixture
+def data():
+ return NumpyMaskArray.from_scalars(
+ np.random.randint(0, 2, size=100, dtype=bool))
+
+
+@pytest.fixture
+def data_missing():
+ pytest.skip("not supported in NumpyMaskArray")
+
+
+class BaseNumpyTests(object):
+ pass
+
+
+class TestDtype(BaseNumpyTests, base.BaseDtypeTests):
+ pass
+
+
+class TestInterface(BaseNumpyTests, base.BaseInterfaceTests):
+ pass
+
+
+class TestConstructors(BaseNumpyTests, base.BaseConstructorsTests):
+ def test_from_dtype(self, data):
+ pytest.skip("GH-22666")
+
+
+class TestReduceBoolean(base.BaseBooleanReduceTests):
+
+ @pytest.mark.parametrize('skipna', [True, False])
+ def test_reduce_series(
+ self, data, only_numeric_reductions, skipna):
+ op_name = only_numeric_reductions
+ s = pd.Series(data)
+ with pytest.raises(TypeError):
+ getattr(s, op_name)(skipna=skipna)
+
+ @pytest.mark.parametrize('skipna', [True, False])
+ def test_reduce_series_non_numeric(
+ self, data, only_non_numeric_reductions, skipna):
+ op_name = only_non_numeric_reductions
+ s = pd.Series(data)
+ if op_name == 'sum':
+ self.check_reduce(s, op_name, skipna)
+ else:
+ self.check_reduce_bool(s, op_name, skipna)
+
+
+def test_is_bool_dtype(data):
+ assert pd.api.types.is_bool_dtype(data)
+ assert pd.core.common.is_bool_indexer(data)
+ s = pd.Series(range(len(data)))
+ result = s[data]
+ expected = s[np.asarray(data)]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/mask/test_pyarrow_bool.py b/pandas/tests/extension/mask/test_pyarrow_bool.py
new file mode 100644
index 0000000000000..817349f0773f4
--- /dev/null
+++ b/pandas/tests/extension/mask/test_pyarrow_bool.py
@@ -0,0 +1,89 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas.tests.extension import base
+import pandas.util.testing as tm
+
+pytest.importorskip('pyarrow', minversion="0.10.0")
+
+from pandas.core.arrays.mask._pyarrow import ( # isort:skip
+ ArrowMaskArray, ArrowMaskDtype)
+
+
+@pytest.fixture
+def dtype():
+ return ArrowMaskDtype()
+
+
+@pytest.fixture
+def data():
+ return ArrowMaskArray.from_scalars(
+ np.random.randint(0, 2, size=100, dtype=bool))
+
+
+@pytest.fixture
+def data_missing():
+ return ArrowMaskArray.from_scalars([None, True])
+
+
+class BaseArrowTests:
+ pass
+
+
+class TestDtype(BaseArrowTests, base.BaseDtypeTests):
+ pass
+
+
+class TestInterface(BaseArrowTests, base.BaseInterfaceTests):
+ pass
+
+
+class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
+ def test_from_dtype(self, data):
+ pytest.skip("GH-22666")
+
+
+class TestReduceBoolean(base.BaseBooleanReduceTests):
+
+ @pytest.mark.parametrize('skipna', [True, False])
+ def test_reduce_series(
+ self, data, only_numeric_reductions, skipna):
+ op_name = only_numeric_reductions
+ s = pd.Series(data)
+ with pytest.raises(TypeError):
+ getattr(s, op_name)(skipna=skipna)
+
+ @pytest.mark.parametrize('skipna', [True, False])
+ def test_reduce_series_non_numeric(
+ self, data, only_non_numeric_reductions, skipna):
+ op_name = only_non_numeric_reductions
+ s = pd.Series(data)
+ if op_name == 'sum':
+ self.check_reduce(s, op_name, skipna)
+ else:
+ self.check_reduce_bool(s, op_name, skipna)
+
+
+def test_is_bool_dtype(data):
+ assert pd.api.types.is_bool_dtype(data)
+ assert pd.core.common.is_bool_indexer(data)
+ s = pd.Series(range(len(data)))
+ result = s[data]
+ expected = s[np.asarray(data)]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index e9f96390821a6..1c35bc0dcbd45 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -24,6 +24,7 @@
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype,
UInt32Dtype, UInt64Dtype)
from pandas.tests.extension import base
+from pandas.util import testing as tm
def make_data():
@@ -181,7 +182,22 @@ class TestSetitem(base.BaseSetitemTests):
class TestMissing(base.BaseMissingTests):
- pass
+
+ def test_isna(self, data_missing):
+ # TODO: should actually compare that this is a ArrowBoolArray
+ expected = np.array([True, False])
+
+ result = np.array(pd.isna(data_missing))
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.Series(data_missing).isna()
+ expected = pd.Series(expected)
+ self.assert_series_equal(result, expected, check_dtype=False)
+
+ # TODO: need a pd.bool_ dtype here
+ result = pd.Series(data_missing).drop([0, 1]).isna()
+ expected = pd.Series([], dtype=bool)
+ self.assert_series_equal(result, expected, check_dtype=False)
class TestMethods(base.BaseMethodsTests):
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 7a3d189d3020e..5bc093ae858de 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -284,18 +284,21 @@ def test_first_last_tz(data, expected_first, expected_last):
])
def test_first_last_tz_multi_column(method, ts, alpha):
# GH 21603
+ category_string = pd.Series(list('abc')).astype(
+ 'category')
df = pd.DataFrame({'group': [1, 1, 2],
- 'category_string': pd.Series(list('abc')).astype(
- 'category'),
+ 'category_string': category_string,
'datetimetz': pd.date_range('20130101', periods=3,
tz='US/Eastern')})
result = getattr(df.groupby('group'), method)()
- expepcted = pd.DataFrame({'category_string': [alpha, 'c'],
- 'datetimetz': [ts,
- Timestamp('2013-01-03',
- tz='US/Eastern')]},
- index=pd.Index([1, 2], name='group'))
- assert_frame_equal(result, expepcted)
+ expected = pd.DataFrame(
+ {'category_string': pd.Categorical(
+ [alpha, 'c'], dtype=category_string.dtype),
+ 'datetimetz': [ts,
+ Timestamp('2013-01-03',
+ tz='US/Eastern')]},
+ index=pd.Index([1, 2], name='group'))
+ assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected():
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index b5cc28e07fca6..c0d4f2ba8a424 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -112,6 +112,11 @@ def test_resample_integerarray():
dtype="Int64")
assert_series_equal(result, expected)
+ result = ts.resample('3T').mean()
+ expected = Series([1.0, 4, 7],
+ index=pd.date_range('1/1/2000', periods=3, freq='3T'))
+ assert_series_equal(result, expected)
+
def test_resample_basic_grouper(series):
s = series
| this builds on #22226 and #22238 by @WillAyd and the ``ArrowBoolArray`` by @TomAugspurger to create a first class bool array that is backed by a numpy EA array or a pyarrow array if available. Uses this MaskArray as the mask inside IntegerArray. The is a POC to see how feasible this is generally. Note that I did this with ``bitarray`` as well, but this is more general purpose / forward looking.
Some decent memory savings as the bits are packed by arrow.
PR:
```
In [2]: s = pd.Series(np.arange(100000), dtype='Int64')
In [3]: s.nbytes
Out[3]: 812500
In [4]: s._values._mask.nbytes
Out[4]: 12500
```
0.24.1 / master
```
In [2]: s = pd.Series(np.arange(100000), dtype='Int64')
In [3]: s.nbytes
Out[3]: 900000
In [4]: s._values._mask.nbytes
Out[4]: 100000
```
TODO:
- [x] ``pyarrow`` uses ``pandas`` as a dependency, so we have a hoop to jump to actually have this import in a sane way
- [ ] we need a ``pd.bool_`` dtype to avoid exposing the pyarrow dtypes directly (mainly because we have this as an optional dep and now BoolArray can either be ``np.bool_`` or ``arrow_bool`` so need a single type to expose to end users; this is not hard just not done).
- [x] The ``BoolArray`` allows NaN's when its an ArrowBoolArray but not for a NumpyBoolArray; I avoided this entirely but just using this as a masking array (so its guaranteed as only bool types), but eventually want to allow this
- [ ] A number of missing methods on ArrowBoolArray that we should push upstream (implemented now by simply coercing and using numpy)
- [x] ~Should just rename ``MaskArray`` to ``BoolArray`` I think.~ This is now internal only, ``BoolArray`` will use this.
cc @pandas-dev/pandas-core | https://api.github.com/repos/pandas-dev/pandas/pulls/25415 | 2019-02-22T17:42:24Z | 2019-06-27T22:12:05Z | null | 2019-06-27T22:12:05Z |
ERR: doc update for ParsingError | diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index c57d27ff03ac6..493ee65f63c6a 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -32,6 +32,8 @@ class UnsortedIndexError(KeyError):
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in `pd.read_csv`.
+
+ e.g. HTML Parsing will raise this error.
"""
| Added documentation to Parse Error.
- [x] closes #22881
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25414 | 2019-02-22T16:13:04Z | 2019-02-23T19:47:47Z | 2019-02-23T19:47:47Z | 2019-02-23T19:47:56Z |
DOC: Edited docstring of Interval | diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index eb511b1adb28a..e86b692e9915e 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -150,9 +150,6 @@ cdef class Interval(IntervalMixin):
Left bound for the interval.
right : orderable scalar
Right bound for the interval.
- closed : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the interval is closed on the left-side, right-side, both or
- neither.
closed : {'right', 'left', 'both', 'neither'}, default 'right'
Whether the interval is closed on the left-side, right-side, both or
neither. See the Notes for more detailed explanation.
| The docstring contained a repeated segment, which I removed. | https://api.github.com/repos/pandas-dev/pandas/pulls/25410 | 2019-02-22T13:31:11Z | 2019-02-22T15:13:31Z | 2019-02-22T15:13:31Z | 2019-02-22T15:14:22Z |
dropna(subset=...) should accept incomplete key | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 6e225185ecf84..e4c1bcf6596fe 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -153,7 +153,7 @@ MultiIndex
^^^^^^^^^^
- Bug in which incorrect exception raised by :meth:`pd.Timedelta` when testing the membership of :class:`MultiIndex` (:issue:`24570`)
--
+- Bug in :func:`dropna` does not axcept incomplete ``MultiIndex`` key (:issue:`17737`)
-
I/O
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 79f209f9ebc0a..196732e7f1b0a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4533,6 +4533,14 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
+ if isinstance(ax, MultiIndex):
+ if (len(subset[0]) < ax.nlevels and
+ isinstance(subset[0], tuple)):
+ for _ in range(ax.nlevels - len(subset[0])):
+ ax = ax.droplevel(ax.nlevels - 1)
+ elif ax.nlevels > 1 and isinstance(subset[0], str):
+ for _ in range(ax.nlevels - 1):
+ ax = ax.droplevel(ax.nlevels - 1)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 77a3d4785d295..2f7687bda883a 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -206,6 +206,22 @@ def test_dropna_tz_aware_datetime(self):
index=[0, 3])
assert_frame_equal(result, expected)
+ @pytest.mark.parametrize('input_vals', [
+ ('other', '', ''), ('I', 'a', 'x')
+ ])
+ def test_dropna_subset(self, input_vals):
+ # GH 17737
+ col = pd.MultiIndex.from_product([['I', 'II'],
+ ['a', 'b'],
+ ["x", "y"]])
+ df = pd.DataFrame(index=range(3), columns=col)
+ df['other'] = (1, 3, np.nan)
+
+ result = df.dropna(subset=[input_vals[0]])
+ expected = df.dropna(subset=[input_vals])
+
+ tm.assert_frame_equal(result, expected)
+
def test_fillna(self):
tf = self.tsframe
tf.loc[tf.index[:5], 'A'] = np.nan
|
I'm not sure if this is the best way to go about solving this problem, so feedback would be appreciated.
- [x] closes #17737
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25407 | 2019-02-22T06:34:00Z | 2019-04-20T17:40:43Z | null | 2019-04-20T17:40:43Z |
CI: Set pytest minversion to 4.0.2 | diff --git a/ci/deps/azure-27-compat.yaml b/ci/deps/azure-27-compat.yaml
index c68b51fbd6644..a7784f17d1956 100644
--- a/ci/deps/azure-27-compat.yaml
+++ b/ci/deps/azure-27-compat.yaml
@@ -18,7 +18,7 @@ dependencies:
- xlsxwriter=0.5.2
- xlwt=0.7.5
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- isort
diff --git a/ci/deps/azure-27-locale.yaml b/ci/deps/azure-27-locale.yaml
index 5679c503caddc..8636a63d02fed 100644
--- a/ci/deps/azure-27-locale.yaml
+++ b/ci/deps/azure-27-locale.yaml
@@ -20,7 +20,7 @@ dependencies:
- xlsxwriter=0.5.2
- xlwt=0.7.5
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index de1f4ad0e9a76..3f788e5ddcf39 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -26,7 +26,7 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- moto
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index a89e63a2b7d3a..9d598cddce91a 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -25,7 +25,7 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- isort
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 3132de891299c..e58c1f599279c 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -6,7 +6,7 @@ dependencies:
- pytz
- Cython>=0.28.2
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml
index 9710bcb5bf43d..2326e8092cc85 100644
--- a/ci/deps/azure-macos-35.yaml
+++ b/ci/deps/azure-macos-35.yaml
@@ -21,11 +21,11 @@ dependencies:
- xlrd
- xlsxwriter
- xlwt
- # universal
- - pytest
- - pytest-xdist
- - pytest-mock
- isort
- pip:
- python-dateutil==2.5.3
+ # universal
+ - pytest>=4.0.2
+ - pytest-xdist
+ - pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-windows-27.yaml b/ci/deps/azure-windows-27.yaml
index 093c055e69553..f40efdfca3cbd 100644
--- a/ci/deps/azure-windows-27.yaml
+++ b/ci/deps/azure-windows-27.yaml
@@ -25,7 +25,7 @@ dependencies:
- xlwt
# universal
- cython>=0.28.2
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- moto
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index e9db271a75d9d..8517d340f2ba8 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -23,7 +23,7 @@ dependencies:
- xlwt
# universal
- cython>=0.28.2
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml
index 71b224b2c68c2..a910af36a6b10 100644
--- a/ci/deps/travis-27.yaml
+++ b/ci/deps/travis-27.yaml
@@ -39,7 +39,7 @@ dependencies:
- xlsxwriter=0.5.2
- xlwt=0.7.5
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- moto==1.3.4
diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml
index 1a65d292ef085..6f33bc58a8b21 100644
--- a/ci/deps/travis-36-doc.yaml
+++ b/ci/deps/travis-36-doc.yaml
@@ -41,6 +41,6 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- isort
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 36dbb8013104a..34b289e6c0c2f 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -28,7 +28,7 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- moto
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index f4b9091c4300b..46875d59411d9 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -25,7 +25,7 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- moto
diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml
index e22529784b5ec..06fc0d76a3d16 100644
--- a/ci/deps/travis-36.yaml
+++ b/ci/deps/travis-36.yaml
@@ -33,7 +33,7 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-cov
- pytest-mock
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index a8a5df5894ba5..f71d29fe13378 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -12,7 +12,7 @@ dependencies:
- nomkl
- pyarrow
- pytz
- - pytest
+ - pytest>=4.0.2
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 511936467641e..1270bfec098e8 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -731,7 +731,7 @@ extensions in `numpy.testing
.. note::
- The earliest supported pytest version is 3.6.0.
+ The earliest supported pytest version is 4.0.2.
Writing tests
~~~~~~~~~~~~~
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 92364fcc9ebd2..5310667c403e5 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -202,7 +202,7 @@ pandas is equipped with an exhaustive set of unit tests, covering about 97% of
the code base as of this writing. To run it on your machine to verify that
everything is working (and that you have all of the dependencies, soft and hard,
installed), make sure you have `pytest
-<http://docs.pytest.org/en/latest/>`__ >= 3.6 and `Hypothesis
+<http://docs.pytest.org/en/latest/>`__ >= 4.0.2 and `Hypothesis
<https://hypothesis.readthedocs.io/>`__ >= 3.58, then run:
::
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..23effdc0d17ee 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -63,6 +63,20 @@ is respected in indexing. (:issue:`24076`, :issue:`16785`)
df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00']
+.. _whatsnew_0250.api_breaking.deps:
+
+Increased minimum versions for dependencies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have updated our minimum supported versions of dependencies (:issue:`23519`).
+If installed, we now require:
+
++-----------------+-----------------+----------+
+| Package | Minimum Version | Required |
++=================+=================+==========+
+| pytest (dev) | 4.0.2 | |
++-----------------+-----------------+----------+
+
.. _whatsnew_0250.api.other:
Other API Changes
diff --git a/environment.yml b/environment.yml
index ce68dccca0c07..c1669c9f49017 100644
--- a/environment.yml
+++ b/environment.yml
@@ -19,7 +19,7 @@ dependencies:
- hypothesis>=3.82
- isort
- moto
- - pytest>=4.0
+ - pytest>=4.0.2
- pytest-mock
- sphinx
- numpydoc
diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py
index 18e8d415459fd..19b1cc700261c 100644
--- a/pandas/util/_tester.py
+++ b/pandas/util/_tester.py
@@ -11,7 +11,7 @@ def test(extra_args=None):
try:
import pytest
except ImportError:
- raise ImportError("Need pytest>=3.0 to run tests")
+ raise ImportError("Need pytest>=4.0.2 to run tests")
try:
import hypothesis # noqa
except ImportError:
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 22c01ebcef7f0..be84c6f29fdeb 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -10,7 +10,7 @@ gitpython
hypothesis>=3.82
isort
moto
-pytest>=4.0
+pytest>=4.0.2
pytest-mock
sphinx
numpydoc
diff --git a/setup.cfg b/setup.cfg
index 956aa23839e73..84b8f69a83f16 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -57,6 +57,7 @@ split_penalty_after_opening_bracket = 1000000
split_penalty_logical_operator = 30
[tool:pytest]
+minversion = 4.0.2
testpaths = pandas
markers =
single: mark a test as single cpu only
| - [x] closes #23519
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25402 | 2019-02-21T15:53:07Z | 2019-02-28T12:59:33Z | 2019-02-28T12:59:33Z | 2019-02-28T14:30:12Z |
Mark test_pct_max_many_rows as high memory | diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 10c42e0d1a1cf..6bb9dea15d1ce 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -310,6 +310,7 @@ def test_rank_pct_true(self, method, exp):
tm.assert_frame_equal(result, expected)
@pytest.mark.single
+ @pytest.mark.high_memory
def test_pct_max_many_rows(self):
# GH 18271
df = DataFrame({'A': np.arange(2**24 + 1),
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index 510a51e002918..dfcda889269ee 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -499,6 +499,7 @@ def test_rank_first_pct(dtype, ser, exp):
@pytest.mark.single
+@pytest.mark.high_memory
def test_pct_max_many_rows():
# GH 18271
s = Series(np.arange(2**24 + 1))
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 888cf78a1c66a..cb7426ce2f7c9 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1484,6 +1484,7 @@ def test_too_many_ndims(self):
algos.rank(arr)
@pytest.mark.single
+ @pytest.mark.high_memory
@pytest.mark.parametrize('values', [
np.arange(2**24 + 1),
np.arange(2**25 + 2).reshape(2**24 + 1, 2)],
| Fixes issue #25384
- [ ] closes #25384
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25400 | 2019-02-21T14:06:56Z | 2019-02-22T19:04:26Z | 2019-02-22T19:04:26Z | 2019-02-25T10:59:49Z |
BUG: pd.Series.interpolate non-numeric index column (21662) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ccf5c43280765..98fb2af19be64 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -222,7 +222,7 @@ Numeric
- Bug in :meth:`to_numeric` in which numbers were being coerced to float, even though ``errors`` was not ``coerce`` (:issue:`24910`)
- Bug in error messages in :meth:`DataFrame.corr` and :meth:`Series.corr`. Added the possibility of using a callable. (:issue:`25729`)
- Bug in :meth:`Series.divmod` and :meth:`Series.rdivmod` which would raise an (incorrect) ``ValueError`` rather than return a pair of :class:`Series` objects as result (:issue:`25557`)
--
+- Raises a helpful exception when a non-numeric index is sent to :meth:`interpolate` with methods which require numeric index. (:issue:`21662`)
-
-
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1717b00664f92..de237d32235ca 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -25,10 +25,10 @@
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.common import (
ensure_int64, ensure_object, is_bool, is_bool_dtype,
- is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like,
- is_extension_array_dtype, is_integer, is_list_like, is_number,
- is_numeric_dtype, is_object_dtype, is_period_arraylike, is_re_compilable,
- is_scalar, is_timedelta64_dtype, pandas_dtype)
+ is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
+ is_dict_like, is_extension_array_dtype, is_integer, is_list_like,
+ is_number, is_numeric_dtype, is_object_dtype, is_period_arraylike,
+ is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype)
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
@@ -6863,6 +6863,18 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
+ methods = {"index", "values", "nearest", "time"}
+ is_numeric_or_datetime = (
+ is_numeric_dtype(index) or
+ is_datetime64_dtype(index) or
+ is_timedelta64_dtype(index)
+ )
+ if method not in methods and not is_numeric_or_datetime:
+ raise ValueError(
+ "Index column must be numeric or datetime type when "
+ "using {method} method other than linear. "
+ "Try setting a numeric or datetime index column before "
+ "interpolating.".format(method=method))
if isna(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 2163914f915b2..403fdb383d81a 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -870,6 +870,22 @@ def nontemporal_method(request):
return method, kwargs
+@pytest.fixture(params=['linear', 'slinear', 'zero', 'quadratic', 'cubic',
+ 'barycentric', 'krogh', 'polynomial', 'spline',
+ 'piecewise_polynomial', 'from_derivatives', 'pchip',
+ 'akima', ])
+def interp_methods_ind(request):
+ """ Fixture that returns a (method name, required kwargs) pair to
+ be tested for various Index types.
+
+ This fixture does not include methods - 'time', 'index', 'nearest',
+ 'values' as a parameterization
+ """
+ method = request.param
+ kwargs = dict(order=1) if method in ('spline', 'polynomial') else dict()
+ return method, kwargs
+
+
class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
@@ -1397,3 +1413,55 @@ def test_nonzero_warning(self):
ser = pd.Series([1, 0, 3, 4])
with tm.assert_produces_warning(FutureWarning):
ser.nonzero()
+
+ @pytest.mark.parametrize(
+ "ind",
+ [
+ ['a', 'b', 'c', 'd'],
+ pd.period_range(start="2019-01-01", periods=4),
+ pd.interval_range(start=0, end=4),
+ ])
+ def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
+ # gh 21662
+ df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
+
+ method, kwargs = interp_methods_ind
+ if method == "pchip":
+ _skip_if_no_pchip()
+
+ if method == "linear":
+ result = df[0].interpolate(**kwargs)
+ expected = pd.Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
+ assert_series_equal(result, expected)
+ else:
+ expected_error = (
+ "Index column must be numeric or datetime type when "
+ "using {method} method other than linear. "
+ "Try setting a numeric or datetime index column before "
+ "interpolating.".format(method=method))
+ with pytest.raises(ValueError, match=expected_error):
+ df[0].interpolate(method=method, **kwargs)
+
+ def test_interpolate_timedelta_index(self, interp_methods_ind):
+ """
+ Tests for non numerical index types - object, period, timedelta
+ Note that all methods except time, index, nearest and values
+ are tested here.
+ """
+ # gh 21662
+ ind = pd.timedelta_range(start=1, periods=4)
+ df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
+
+ method, kwargs = interp_methods_ind
+ if method == "pchip":
+ _skip_if_no_pchip()
+
+ if method in {"linear", "pchip"}:
+ result = df[0].interpolate(method=method, **kwargs)
+ expected = pd.Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
+ assert_series_equal(result, expected)
+ else:
+ pytest.skip(
+ "This interpolation method is not supported for "
+ "Timedelta Index yet."
+ )
| - [x] closes #21662
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Raises a helpful exception when a non-numeric index is sent to ```interpolate``` with methods which require numeric index. Skipped a few methods which can also work on datetime index.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25394 | 2019-02-21T06:10:16Z | 2019-03-24T23:34:05Z | 2019-03-24T23:34:05Z | 2019-03-25T01:59:03Z |
DOC: Fix typo of see also in DataFrame stat funcs | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e79c02d7dbdd..3647565123523 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10866,7 +10866,7 @@ def _doc_parms(cls):
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
-DataFrame.min : Return the sum over the requested axis.
+DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
| fix typo of DataFrame stat functions. | https://api.github.com/repos/pandas-dev/pandas/pulls/25388 | 2019-02-20T14:21:15Z | 2019-02-20T15:50:59Z | 2019-02-20T15:50:59Z | 2019-02-20T15:51:04Z |
Backport PR #25329 on branch 0.24.x (REGR: fix TimedeltaIndex sum and datetime subtraction with NaT (#25282, #25317)) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f528c058d2868..a7e522d27f8e2 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -26,6 +26,8 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
+- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the `Series` on the right contains null values (:issue:`25317`)
+- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index d7a8417a71be2..69cb787e0b888 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -720,11 +720,11 @@ def _sub_datetime_arraylike(self, other):
self_i8 = self.asi8
other_i8 = other.asi8
+ arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8,
- arr_mask=self._isnan)
+ arr_mask=arr_mask)
if self._hasnans or other._hasnans:
- mask = (self._isnan) | (other._isnan)
- new_values[mask] = iNaT
+ new_values[arr_mask] = iNaT
return new_values.view('timedelta64[ns]')
def _add_offset(self, offset):
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 4f0c96f7927da..bb61ac2f96da5 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -190,6 +190,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
"ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
+ if values.ndim != 1:
+ raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == 'i8':
# for compat with datetime/timedelta/period shared methods,
@@ -945,6 +947,9 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
.format(dtype=data.dtype))
data = np.array(data, copy=copy)
+ if data.ndim != 1:
+ raise ValueError("Only 1-dimensional input arrays are supported.")
+
assert data.dtype == 'm8[ns]', data
return data, inferred_freq
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2fa034670e885..55a32f1fd4fb7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -665,7 +665,8 @@ def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc.
"""
- if is_bool_dtype(result):
+ result = lib.item_from_zerodim(result)
+ if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index f97a1651163e8..acf4075feb94a 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1434,6 +1434,20 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture,
class TestDatetime64OverflowHandling(object):
# TODO: box + de-duplicate
+ def test_dt64_overflow_masking(self, box_with_array):
+ # GH#25317
+ left = Series([Timestamp('1969-12-31')])
+ right = Series([NaT])
+
+ left = tm.box_expected(left, box_with_array)
+ right = tm.box_expected(right, box_with_array)
+
+ expected = TimedeltaIndex([NaT])
+ expected = tm.box_expected(expected, box_with_array)
+
+ result = left - right
+ tm.assert_equal(result, expected)
+
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp('1700-01-31')
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 6b4662ca02e80..1fec533a14a6f 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -9,6 +9,18 @@
class TestTimedeltaArrayConstructor(object):
+ def test_only_1dim_accepted(self):
+ # GH#25282
+ arr = np.array([0, 1, 2, 3], dtype='m8[h]').astype('m8[ns]')
+
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
+ # 2-dim
+ TimedeltaArray(arr.reshape(2, 2))
+
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
+ # 0-dim
+ TimedeltaArray(arr[[0]].squeeze())
+
def test_freq_validation(self):
# ensure that the public constructor cannot create an invalid instance
arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9
@@ -51,6 +63,16 @@ def test_copy(self):
class TestTimedeltaArray(object):
+ def test_np_sum(self):
+ # GH#25282
+ vals = np.arange(5, dtype=np.int64).view('m8[h]').astype('m8[ns]')
+ arr = TimedeltaArray(vals)
+ result = np.sum(arr)
+ assert result == vals.sum()
+
+ result = np.sum(pd.TimedeltaIndex(arr))
+ assert result == vals.sum()
+
def test_from_sequence_dtype(self):
msg = "dtype .*object.* cannot be converted to timedelta64"
with pytest.raises(ValueError, match=msg):
| Backport PR #25329: REGR: fix TimedeltaIndex sum and datetime subtraction with NaT (#25282, #25317) | https://api.github.com/repos/pandas-dev/pandas/pulls/25385 | 2019-02-20T10:53:05Z | 2019-02-20T12:01:52Z | 2019-02-20T12:01:52Z | 2019-02-20T12:01:53Z |
CLN: (re-)enable infer_dtype to catch complex | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1f0f0a408aee8..34ceeb20e260e 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -939,6 +939,7 @@ _TYPE_MAP = {
'float32': 'floating',
'float64': 'floating',
'f': 'floating',
+ 'complex64': 'complex',
'complex128': 'complex',
'c': 'complex',
'string': 'string' if PY2 else 'bytes',
@@ -1305,6 +1306,9 @@ def infer_dtype(value: object, skipna: object=None) -> str:
elif is_decimal(val):
return 'decimal'
+ elif is_complex(val):
+ return 'complex'
+
elif util.is_float_object(val):
if is_float_array(values):
return 'floating'
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 49a66efaffc11..187b37d4f788e 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -618,6 +618,37 @@ def test_decimals(self):
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
+ # complex is compatible with nan, so skipna has no effect
+ @pytest.mark.parametrize('skipna', [True, False])
+ def test_complex(self, skipna):
+ # gets cast to complex on array construction
+ arr = np.array([1.0, 2.0, 1 + 1j])
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'complex'
+
+ arr = np.array([1.0, 2.0, 1 + 1j], dtype='O')
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'mixed'
+
+ # gets cast to complex on array construction
+ arr = np.array([1, np.nan, 1 + 1j])
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'complex'
+
+ arr = np.array([1.0, np.nan, 1 + 1j], dtype='O')
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'mixed'
+
+ # complex with nans stays complex
+ arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype='O')
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'complex'
+
+ # test smaller complex dtype; will pass through _try_infer_map fastpath
+ arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
+ result = lib.infer_dtype(arr, skipna=skipna)
+ assert result == 'complex'
+
def test_string(self):
pass
| - [x] takes a small bite out of #23554
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
`complex` should already be inferred according to the docstring, but isn't. I'm hitting this while working on my PR for #23833 so splitting that off as a separate change.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25382 | 2019-02-20T08:05:21Z | 2019-02-21T13:53:43Z | 2019-02-21T13:53:43Z | 2019-02-22T07:00:33Z |
DOC: edited whatsnew typo | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index afde665407d18..db8f12669bb69 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -28,7 +28,7 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- :meth:`Timestamp.strptime` will now rise a NotImplementedError (:issue:`21257`)
+- :meth:`Timestamp.strptime` will now raise a NotImplementedError (:issue:`25016`)
.. _whatsnew_0250.api.other:
| xref: redoing the whatsnew entry for PR #25124. Typos were caought by @h-vetinari .
| https://api.github.com/repos/pandas-dev/pandas/pulls/25381 | 2019-02-20T06:22:35Z | 2019-02-20T14:06:50Z | 2019-02-20T14:06:50Z | 2019-04-15T18:29:37Z |
Backport PR #25338 on branch 0.24.x (Interval dtype fix) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 8e59c2300e7ca..f528c058d2868 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -26,6 +26,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
+- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index f84471c3b04e8..b73f55329e25b 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -932,13 +932,18 @@ def construct_from_string(cls, string):
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
- if (isinstance(string, compat.string_types) and
- (string.startswith('interval') or
- string.startswith('Interval'))):
- return cls(string)
+ if not isinstance(string, compat.string_types):
+ msg = "a string needs to be passed, got type {typ}"
+ raise TypeError(msg.format(typ=type(string)))
+
+ if (string.lower() == 'interval' or
+ cls._match.search(string) is not None):
+ return cls(string)
- msg = "a string needs to be passed, got type {typ}"
- raise TypeError(msg.format(typ=type(string)))
+ msg = ('Incorrectly formatted string passed to constructor. '
+ 'Valid formats include Interval or Interval[dtype] '
+ 'where dtype is numeric, datetime, or timedelta')
+ raise TypeError(msg)
@property
def type(self):
@@ -979,7 +984,7 @@ def is_dtype(cls, dtype):
return True
else:
return False
- except ValueError:
+ except (ValueError, TypeError):
return False
else:
return False
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 0fe0a845f5129..71eaf504bdc46 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -501,10 +501,11 @@ def test_construction_not_supported(self, subtype):
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
- def test_construction_errors(self):
+ @pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
+ def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
- IntervalDtype('xx')
+ IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
@@ -513,7 +514,7 @@ def test_construction_from_string(self):
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
- 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
+ 0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
@@ -522,10 +523,12 @@ def test_construction_from_string_errors(self, string):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
- 'interval[foo]'])
+ 'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
- msg = 'could not construct IntervalDtype'
+ msg = ("Incorrectly formatted string passed to constructor. "
+ r"Valid formats include Interval or Interval\[dtype\] "
+ "where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@@ -549,6 +552,7 @@ def test_is_dtype(self):
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
+ assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 4d3c9926fc5ae..b2aac441db195 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -563,6 +563,13 @@ def test_comp_ops_df_compat(self):
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
+ def test_compare_series_interval_keyword(self):
+ # GH 25338
+ s = Series(['IntervalA', 'IntervalB', 'IntervalC'])
+ result = s == 'IntervalA'
+ expected = Series([True, False, False])
+ assert_series_equal(result, expected)
+
class TestSeriesFlexComparisonOps(object):
| Backport PR #25338: Interval dtype fix | https://api.github.com/repos/pandas-dev/pandas/pulls/25380 | 2019-02-20T03:51:44Z | 2019-02-20T08:05:50Z | 2019-02-20T08:05:50Z | 2019-02-20T08:05:50Z |
CI: Add 'conda list' for azure/posix after activate 'pandas-dev' | diff --git a/ci/incremental/setup_conda_environment.cmd b/ci/incremental/setup_conda_environment.cmd
index c104d78591384..1893954c570c8 100644
--- a/ci/incremental/setup_conda_environment.cmd
+++ b/ci/incremental/setup_conda_environment.cmd
@@ -16,6 +16,7 @@ conda remove --all -q -y -n pandas-dev
conda env create --file=ci\deps\azure-windows-%CONDA_PY%.yaml
call activate pandas-dev
+@rem Display pandas-dev environment (for debugging)
conda list
if %errorlevel% neq 0 exit /b %errorlevel%
diff --git a/ci/incremental/setup_conda_environment.sh b/ci/incremental/setup_conda_environment.sh
index f174c17a614d8..6903553abcfe9 100755
--- a/ci/incremental/setup_conda_environment.sh
+++ b/ci/incremental/setup_conda_environment.sh
@@ -23,6 +23,9 @@ set +v
source activate pandas-dev
set -v
+# Display pandas-dev environment (for debugging)
+conda list
+
# remove any installed pandas package
# w/o removing anything else
echo
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index d1a940f119228..5169a55ae6e91 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -100,6 +100,7 @@ pip list --format columns |grep pandas
echo "[running setup.py develop]"
python setup.py develop || exit 1
+# Display pandas-dev environment (for debugging)
echo
echo "[show environment]"
conda list
| Running into debugging problems in #24984 because `conda list` is never called after installing the `pandas-dev` environment.
It seems that this is just missing in azure/posix (I reflected the comment in other locations not least to show where else this is already done). | https://api.github.com/repos/pandas-dev/pandas/pulls/25377 | 2019-02-19T21:19:37Z | 2019-02-24T10:20:29Z | null | 2019-02-24T10:21:00Z |
Backport PR #25371 on branch 0.24.x (Remove spurious MultiIndex creation in `_set_axis_name`) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1d8077873e1ea..45a03f19bb3bb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1331,7 +1331,6 @@ def _set_axis_name(self, name, axis=0, inplace=False):
cat 4
monkey 2
"""
- pd.MultiIndex.from_product([["mammal"], ['dog', 'cat', 'monkey']])
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
| Backport PR #25371: Remove spurious MultiIndex creation in `_set_axis_name` | https://api.github.com/repos/pandas-dev/pandas/pulls/25372 | 2019-02-19T13:13:05Z | 2019-02-19T13:52:12Z | 2019-02-19T13:52:12Z | 2019-02-19T13:52:13Z |
Remove spurious MultiIndex creation in `_set_axis_name` | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3a73861086bed..6e79c02d7dbdd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1333,7 +1333,6 @@ def _set_axis_name(self, name, axis=0, inplace=False):
cat 4
monkey 2
"""
- pd.MultiIndex.from_product([["mammal"], ['dog', 'cat', 'monkey']])
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
| * Resovles #25370
* Introduced by #22969
- [x] closes #25370
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ N/A ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25371 | 2019-02-19T11:45:56Z | 2019-02-19T13:12:35Z | 2019-02-19T13:12:35Z | 2019-02-19T13:12:41Z |
BUG: Fix potential segfault after pd.Categorical(pd.Series(...), categories=...) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index a7e522d27f8e2..f2d7e473fe11d 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -30,6 +30,8 @@ Fixed Regressions
- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
+- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`)
+
.. _whatsnew_0242.enhancements:
Enhancements
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 79e565df94eae..337312553262d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -323,14 +323,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None,
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step futher below
- if is_categorical(values):
- # GH23814, for perf, if values._values already an instance of
- # Categorical, set values to codes, and run fastpath
- if (isinstance(values, (ABCSeries, ABCIndexClass)) and
- isinstance(values._values, type(self))):
- values = values._values.codes.copy()
- fastpath = True
-
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
@@ -382,7 +374,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None,
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
- old_codes = (values.cat.codes if isinstance(values, ABCSeries)
+ old_codes = (values._values.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
@@ -2625,6 +2617,9 @@ def _recode_for_categories(codes, old_categories, new_categories):
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
+ elif new_categories.equals(old_categories):
+ # Same categories, so no need to actually recode
+ return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 25c299692ceca..f07e3aba53cd4 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -212,6 +212,18 @@ def test_constructor(self):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
+ def test_constructor_with_existing_categories(self):
+ # GH25318: constructing with pd.Series used to bogusly skip recoding
+ # categories
+ c0 = Categorical(["a", "b", "c", "a"])
+ c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
+
+ c2 = Categorical(c0, categories=c1.categories)
+ tm.assert_categorical_equal(c1, c2)
+
+ c3 = Categorical(Series(c0), categories=c1.categories)
+ tm.assert_categorical_equal(c1, c3)
+
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
| Closes #25318
| https://api.github.com/repos/pandas-dev/pandas/pulls/25368 | 2019-02-19T08:07:48Z | 2019-03-03T02:36:37Z | 2019-03-03T02:36:37Z | 2019-03-03T02:36:37Z |
#23049: test for Fatal Stack Overflow stemming From Misuse of astype(… | diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 59497153c8524..c2364dc135a9a 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -504,6 +504,16 @@ def test_concat_numerical_names(self):
names=[1, 2]))
tm.assert_frame_equal(result, expected)
+ def test_concat_astype_dup_col(self):
+ # gh 23049
+ df = pd.DataFrame([{'a': 'b'}])
+ df = pd.concat([df, df], axis=1)
+
+ result = df.astype('category')
+ expected = pd.DataFrame(np.array(["b", "b"]).reshape(1, 2),
+ columns=["a", "a"]).astype("category")
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameCombineFirst(TestData):
| …'category')
- [x] closes #23049
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The referenced issue seems to be solved in the current master. So added a test for the edge case.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25366 | 2019-02-19T06:31:01Z | 2019-02-19T13:14:25Z | 2019-02-19T13:14:25Z | 2019-02-19T18:55:22Z |
DOC: modify typos in Contributing section | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index c9d6845107dfc..511936467641e 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -54,7 +54,7 @@ Bug reports must:
...
```
-#. Include the full version string of *pandas* and its dependencies. You can use the built in function::
+#. Include the full version string of *pandas* and its dependencies. You can use the built-in function::
>>> import pandas as pd
>>> pd.show_versions()
@@ -211,7 +211,7 @@ See the full conda docs `here <http://conda.pydata.org/docs>`__.
Creating a Python Environment (pip)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If you aren't using conda for you development environment, follow these instructions.
+If you aren't using conda for your development environment, follow these instructions.
You'll need to have at least python3.5 installed on your system.
.. code-block:: none
@@ -484,7 +484,7 @@ contributing them to the project::
./ci/code_checks.sh
-The script verify the linting of code files, it looks for common mistake patterns
+The script verifies the linting of code files, it looks for common mistake patterns
(like missing spaces around sphinx directives that make the documentation not
being rendered properly) and it also validates the doctests. It is possible to
run the checks independently by using the parameters ``lint``, ``patterns`` and
@@ -675,7 +675,7 @@ Otherwise, you need to do it manually:
You'll also need to
-1. write a new test that asserts a warning is issued when calling with the deprecated argument
+1. Write a new test that asserts a warning is issued when calling with the deprecated argument
2. Update all of pandas existing tests and code to use the new argument
See :ref:`contributing.warnings` for more.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I participated in an open source sprint day today. This is my first attempt to request a PR to contribute to open source. I made changes to fix a few minor typos. | https://api.github.com/repos/pandas-dev/pandas/pulls/25365 | 2019-02-19T05:48:42Z | 2019-02-19T08:45:51Z | 2019-02-19T08:45:51Z | 2019-02-20T08:01:39Z |
Backport PR #25352: TST: xfail excel styler tests, xref GH25351 | diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 717e9bc23c6b1..e4dd18db37eac 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2417,7 +2417,10 @@ def style(df):
['', '', '']],
index=df.index, columns=df.columns)
- def assert_equal_style(cell1, cell2):
+ def assert_equal_style(cell1, cell2, engine):
+ if engine in ['xlsxwriter', 'openpyxl']:
+ pytest.xfail(reason=("GH25351: failing on some attribute "
+ "comparisons in {}".format(engine)))
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
@@ -2461,7 +2464,7 @@ def custom_converter(css):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
n_cells += 1
# ensure iteration actually happened:
@@ -2519,7 +2522,7 @@ def custom_converter(css):
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
@@ -2537,7 +2540,7 @@ def custom_converter(css):
assert not cell1.font.bold
assert cell2.font.bold
else:
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
| Backports https://github.com/pandas-dev/pandas/pull/25352, as the excel tests are also failing on 0.24.x (cc @jreback ) | https://api.github.com/repos/pandas-dev/pandas/pulls/25363 | 2019-02-18T15:17:54Z | 2019-02-19T08:44:04Z | 2019-02-19T08:44:04Z | 2019-02-19T08:44:49Z |
DOC: Fixing SS02 errors as per #25113 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index ac6aade106ce6..8aad8bf80deb9 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -241,8 +241,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, PR10, EX04, RT04, RT05, SS05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,RT05,SS05,SA05
+ MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, PR10, EX04, RT04, RT05, SS05, SA05, SS02)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,RT05,SS05,SA05,SS02
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 6e40063fb925a..35c8562815ced 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -98,7 +98,7 @@ _no_input = object()
@cython.wraparound(False)
def ints_to_pytimedelta(int64_t[:] arr, box=False):
"""
- convert an i8 repr to an ndarray of timedelta or Timedelta (if box ==
+ Convert an i8 repr to an ndarray of timedelta or Timedelta (if box ==
True)
Parameters
@@ -247,7 +247,7 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
cdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
- """ return a casting of the unit represented to nanoseconds
+ """ Return a casting of the unit represented to nanoseconds
round the fractional part of a float to our precision, p """
cdef:
int64_t m
@@ -792,7 +792,7 @@ cdef class _Timedelta(timedelta):
def _ensure_components(_Timedelta self):
"""
- compute the components
+ Compute the components
"""
if self.is_populated:
return
@@ -815,7 +815,7 @@ cdef class _Timedelta(timedelta):
cpdef timedelta to_pytimedelta(_Timedelta self):
"""
- return an actual datetime.timedelta object
+ Return an actual datetime.timedelta object
note: we lose nanosecond resolution if any
"""
return timedelta(microseconds=int(self.value) / 1000)
@@ -851,7 +851,7 @@ cdef class _Timedelta(timedelta):
return self.value / 1e9
def view(self, dtype):
- """ array view compat """
+ """ Array view compat """
return np.timedelta64(self.value).view(dtype)
@property
@@ -1260,7 +1260,7 @@ class Timedelta(_Timedelta):
def floor(self, freq):
"""
- return a new Timedelta floored to this resolution
+ Return a new Timedelta floored to this resolution
Parameters
----------
@@ -1270,7 +1270,7 @@ class Timedelta(_Timedelta):
def ceil(self, freq):
"""
- return a new Timedelta ceiled to this resolution
+ Return a new Timedelta ceiled to this resolution
Parameters
----------
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2ee8759b9bdd8..70842b02cf49a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1080,7 +1080,7 @@ def create_table_index(self, key, **kwargs):
s.create_index(**kwargs)
def groups(self):
- """return a list of all the top-level nodes (that are not themselves a
+ """Return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
| Some of the SS02 error mentioned are not because of methods but of docstring.
For example, in the first SS02 error pandas/tseries/offsets.py has no method named normalize.
- [X] Closes #25113
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25362 | 2019-02-18T14:20:49Z | 2019-03-19T04:17:10Z | null | 2019-03-19T04:17:10Z |
Backport PR #25360 on branch 0.24.x (DOC: Correct doc mistake in combiner func) | diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index 02cbc7e2c3b6d..bbec7b5de1d2e 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -505,7 +505,7 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above:
.. ipython:: python
def combiner(x, y):
- np.where(pd.isna(x), y, x)
+ return np.where(pd.isna(x), y, x)
df1.combine(df2, combiner)
.. _basics.stats:
| Backport PR #25360: DOC: Correct doc mistake in combiner func | https://api.github.com/repos/pandas-dev/pandas/pulls/25361 | 2019-02-18T13:30:26Z | 2019-02-18T13:34:29Z | 2019-02-18T13:34:29Z | 2019-02-18T18:38:29Z |
DOC: Correct doc mistake in combiner func | diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index 02cbc7e2c3b6d..bbec7b5de1d2e 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -505,7 +505,7 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above:
.. ipython:: python
def combiner(x, y):
- np.where(pd.isna(x), y, x)
+ return np.where(pd.isna(x), y, x)
df1.combine(df2, combiner)
.. _basics.stats:
| Closes #25359.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25360 | 2019-02-18T10:27:49Z | 2019-02-18T13:29:56Z | 2019-02-18T13:29:56Z | 2019-02-18T19:00:44Z |
DOC: Fix #24268 by updating description for keep in Series.nlargest | diff --git a/pandas/core/series.py b/pandas/core/series.py
index cada6663ce651..f6598ed1ee614 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3098,8 +3098,10 @@ def nlargest(self, n=5, keep='first'):
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- - ``first`` : take the first occurrences based on the index order
- - ``last`` : take the last occurrences based on the index order
+ - ``first`` : return the first `n` occurrences in order
+ of appearance.
+ - ``last`` : return the last `n` occurrences in reverse
+ order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
@@ -3194,8 +3196,10 @@ def nsmallest(self, n=5, keep='first'):
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- - ``first`` : take the first occurrences based on the index order
- - ``last`` : take the last occurrences based on the index order
+ - ``first`` : return the first `n` occurrences in order
+ of appearance.
+ - ``last`` : return the last `n` occurrences in reverse
+ order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
@@ -3236,7 +3240,7 @@ def nsmallest(self, n=5, keep='first'):
Monserat 5200
dtype: int64
- The `n` largest elements where ``n=5`` by default.
+ The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Monserat 5200
| Fix #24268 by updating the description of `keep` in Series.nlargest
- [x] closes #24268
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25358 | 2019-02-18T08:38:09Z | 2019-03-05T03:07:09Z | 2019-03-05T03:07:08Z | 2019-03-05T03:07:09Z |
CLN: Parmeterize test cases | diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 9d0bce3b342b4..806bd7f2b7c93 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -704,45 +704,29 @@ def test_complex(self):
# Complex data type should raise error
pytest.raises(ValueError, df.to_sql, 'test_complex', self.conn)
- def test_to_sql_index_label(self):
- temp_frame = DataFrame({'col1': range(4)})
-
+ @pytest.mark.parametrize("index_name,index_label,expected", [
# no index name, defaults to 'index'
- sql.to_sql(temp_frame, 'test_index_label', self.conn)
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == 'index'
-
+ (None, None, "index"),
# specifying index_label
- sql.to_sql(temp_frame, 'test_index_label', self.conn,
- if_exists='replace', index_label='other_label')
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == "other_label"
-
+ (None, "other_label", "other_label"),
# using the index name
- temp_frame.index.name = 'index_name'
- sql.to_sql(temp_frame, 'test_index_label', self.conn,
- if_exists='replace')
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == "index_name"
-
+ ("index_name", None, "index_name"),
# has index name, but specifying index_label
- sql.to_sql(temp_frame, 'test_index_label', self.conn,
- if_exists='replace', index_label='other_label')
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == "other_label"
-
+ ("index_name", "other_label", "other_label"),
# index name is integer
- temp_frame.index.name = 0
- sql.to_sql(temp_frame, 'test_index_label', self.conn,
- if_exists='replace')
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == "0"
-
- temp_frame.index.name = None
+ (0, None, "0"),
+ # index name is None but index label is integer
+ (None, 0, "0"),
+ ])
+ def test_to_sql_index_label(self, index_name,
+ index_label, expected):
+ temp_frame = DataFrame({'col1': range(4)})
+ temp_frame.index.name = index_name
+ query = 'SELECT * FROM test_index_label'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
- if_exists='replace', index_label=0)
- frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
- assert frame.columns[0] == "0"
+ index_label=index_label)
+ frame = sql.read_sql_query(query, self.conn)
+ assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
| - [x] xref #22624
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25355 | 2019-02-17T19:04:46Z | 2019-03-09T02:02:56Z | 2019-03-09T02:02:56Z | 2019-03-09T02:03:01Z |
DOC: Fixing SA04 errors as per #25337 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c4840f1e836c4..f12dc7dec3a51 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -241,8 +241,10 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05
+
+ MSG='Validate docstrings (GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA04 SA05)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA04,SA05
+
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 6e40063fb925a..24a684b597cdf 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1089,7 +1089,7 @@ cdef class _Timedelta(timedelta):
See Also
--------
- Timestamp.isoformat
+ Timestamp.isoformat: Format Timestamp as ISO 8601
Notes
-----
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 050749741e7bd..7be08be51b527 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -262,24 +262,30 @@ def plot(self):
@Appender(_doc % dict(klass="DataFrame",
- others=("register_series_accessor, "
- "register_index_accessor")))
+ others=("register_series_accessor: Register a custom"
+ "accessor on DataFrame objects."
+ "register_index_accessor: Register a custom"
+ "accessor on Index objects.")))
def register_dataframe_accessor(name):
from pandas import DataFrame
return _register_accessor(name, DataFrame)
@Appender(_doc % dict(klass="Series",
- others=("register_dataframe_accessor, "
- "register_index_accessor")))
+ others=("register_dataframe_accessor: Register a custom"
+ "accessor on DataFrame objects"
+ "register_index_accessor: Register a custom"
+ "accessor on Index objects.")))
def register_series_accessor(name):
from pandas import Series
return _register_accessor(name, Series)
@Appender(_doc % dict(klass="Index",
- others=("register_dataframe_accessor, "
- "register_series_accessor")))
+ others=("register_dataframe_accessor: Register a custom\
+ accessor on DataFrame objects. "
+ "register_series_accessor: Register a custom\
+ accessor on Series objects.")))
def register_index_accessor(name):
from pandas import Index
return _register_accessor(name, Index)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4a71951e2435e..20946b99a3f9b 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -300,8 +300,8 @@ def unique(values):
See Also
--------
- Index.unique
- Series.unique
+ Index.unique: Return unique values in the index.
+ Series.unique: Return unique values of Series object.
Examples
--------
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index e770281596134..a3d6d532c3393 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -171,8 +171,10 @@ def _from_factorized(cls, values, original):
See Also
--------
- pandas.factorize
- ExtensionArray.factorize
+ pandas.factorize: Encode the object as an enumerated type or
+ categorical variable.
+ ExtensionArray.factorize: Encode the extension array as
+ an enumerated type.
"""
raise AbstractMethodError(cls)
@@ -377,7 +379,7 @@ def _values_for_argsort(self):
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort: Return the indices that would sort this array.
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
@@ -403,7 +405,7 @@ def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
See Also
--------
- numpy.argsort : Sorting implementation used internally.
+ numpy.argsort : Sort implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
@@ -647,7 +649,7 @@ def factorize(self, na_sentinel=-1):
See Also
--------
- pandas.factorize : Top-level factorize method that dispatches here.
+ pandas.factorize: Top-level factorize method that dispatches here.
Notes
-----
@@ -776,8 +778,8 @@ def take(self, indices, allow_fill=False, fill_value=None):
See Also
--------
- numpy.take
- pandas.api.extensions.take
+ numpy.take: Take elements from an array along an axis.
+ pandas.api.extensions.take: Take elements from an array.
Examples
--------
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 37a24a54be8b1..de884cd638823 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -276,8 +276,8 @@ class Categorical(ExtensionArray, PandasObject):
See Also
--------
- api.types.CategoricalDtype : Type for categorical data.
- CategoricalIndex : An Index with an underlying ``Categorical``.
+ api.types.CategoricalDtype: Type for categorical data.
+ CategoricalIndex: An Index with an underlying ``Categorical``.
Notes
-----
@@ -421,12 +421,12 @@ def categories(self):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Rename categories.
+ reorder_categories: Reorder categories into specified new categories.
+ add_categories: Add new categories.
+ remove_categories: Remove specified categories.
+ remove_unused_categories: Remove unused categories.
+ set_categories: Set new categories inplace.
"""
return self.dtype.categories
@@ -836,11 +836,11 @@ def set_categories(self, new_categories, ordered=None, rename=False,
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
+ rename_categories: Rename categories.
+ reorder_categories: Reorder categories into specified new categories.
+ add_categories: Add new categories.
+ remove_categories: Remove specified categories.
+ remove_unused_categorie: Remove unused categories.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
@@ -908,11 +908,11 @@ def rename_categories(self, new_categories, inplace=False):
See Also
--------
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+ reorder_categories: Reorder categories into specified new categories.
+ add_categories: Add new categories.
+ remove_categories: Remove specified categories.
+ remove_unused_categories: Remove unused categories.
+ set_categories: Set new categories inplace.
Examples
--------
@@ -986,11 +986,11 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
See Also
--------
- rename_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Rename categories.
+ add_categories: Add new categories.
+ remove_categories: Remove specified categories.
+ remove_unused_categories: Remove unused categories.
+ set_categories: Set new categories inplace.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
@@ -1026,11 +1026,11 @@ def add_categories(self, new_categories, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Rename categories.
+ reorder_categories: Reorder categories into specified new categories.
+ remove_categories: Remove specified categories.
+ remove_unused_categories: Remove unused categories.
+ set_categories: Set new categories inplace.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
@@ -1075,11 +1075,11 @@ def remove_categories(self, removals, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_unused_categories
- set_categories
+ rename_categories: Rename categories.
+ reorder_categories: Reorder categories into specified new categories.
+ add_categories: Add new categories.
+ remove_unused_categories: Remove unused categories.
+ set_categories: Set new categories inplace.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
@@ -1118,11 +1118,11 @@ def remove_unused_categories(self, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- set_categories
+ rename_categories: Rename categories.
+ reorder_categories: Reorders categories into specified new categories.
+ add_categories: Add new categories.
+ remove_categories: Remove specified categories.
+ set_categories: Set new categories inplace.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
@@ -1366,7 +1366,8 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of the
+ array.
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@@ -1403,7 +1404,6 @@ def isna(self):
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
-
"""
ret = self._codes == -1
@@ -1423,10 +1423,9 @@ def notna(self):
See Also
--------
- notna : Top-level notna.
- notnull : Alias of notna.
- Categorical.isna : Boolean inverse of Categorical.notna.
-
+ notna: Top-level notna.
+ notnull: Alias of notna.
+ Categorical.isna: Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
@@ -1469,8 +1468,8 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
-
+ Series.value_counts: Return a Series containing counts of unique
+ values.
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
@@ -1544,7 +1543,7 @@ def argsort(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Return the indices that would sort this array.
Notes
-----
@@ -1597,8 +1596,9 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
See Also
--------
- Categorical.sort
- Series.sort_values
+ Categorical.sort: Sort the Category inplace by category value.
+ Series.sort_values: Sort a Series in ascending or descending
+ order by some criterion.
Examples
--------
@@ -1845,8 +1845,8 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
See Also
--------
- Series.take : Similar method for Series.
- numpy.ndarray.take : Similar method for NumPy arrays.
+ Series.take: Similar method for Series.
+ numpy.ndarray.take: Similar method for NumPy arrays.
Examples
--------
@@ -2294,9 +2294,12 @@ def unique(self):
See Also
--------
- unique
- CategoricalIndex.unique
- Series.unique
+ unique: Hash table-based unique. Uniques are returned in order of
+ appearance. This does NOT sort.
+ CategoricalIndex.unique: Return Index of unique values in the object.
+ Includes NA values. The order of the original is preserved.
+ Series.unique: Return Index of unique values in the object. Includes
+ NA values. The order of the original is preserved.
"""
@@ -2427,7 +2430,7 @@ def isin(self, values):
See Also
--------
- pandas.Series.isin : Equivalent method on Series.
+ pandas.Series.isin: Equivalent method on Series.
Examples
--------
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 94668c74c1693..86dd3a3bbbf92 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -158,10 +158,10 @@ def strftime(self, date_format):
See Also
--------
- to_datetime : Convert the given argument to datetime.
- DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
- DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
- DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
+ to_datetime: Convert the given argument to datetime.
+ DatetimeIndex.normalize: Return DatetimeIndex with times to midnight.
+ DatetimeIndex.round: Round the DatetimeIndex to the specified freq.
+ DatetimeIndex.floor: Floor the DatetimeIndex to the specified freq.
Examples
--------
@@ -670,7 +670,7 @@ def repeat(self, repeats, *args, **kwargs):
See Also
--------
- numpy.ndarray.repeat
+ numpy.ndarray.repeat: Repeat elements of an array.
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
@@ -1390,9 +1390,10 @@ def min(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.min
- Index.min : Return the minimum value in an Index.
- Series.min : Return the minimum value in a Series.
+ numpy.ndarray.min: Return the minimum of the Array along
+ a given axis.
+ Index.min: Return the minimum value in an Index.
+ Series.min: Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
@@ -1410,9 +1411,10 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.max
- Index.max : Return the maximum value in an Index.
- Series.max : Return the maximum value in a Series.
+ numpy.ndarray.max: Return the maximum of an Array
+ along a given axis.
+ Index.max: Return the maximum value in an Index.
+ Series.max: Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 75cf658423210..e90c950fadafd 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -815,8 +815,8 @@ def tz_convert(self, tz):
See Also
--------
- DatetimeIndex.tz : A timezone that has a variable offset from UTC.
- DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
+ DatetimeIndex.tz: A timezone that has a variable offset from UTC.
+ DatetimeIndex.tz_localize: Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
@@ -940,7 +940,7 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
See Also
--------
- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
+ DatetimeIndex.tz_convert: Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
@@ -1087,9 +1087,9 @@ def normalize(self):
See Also
--------
- floor : Floor the datetimes to the specified freq.
- ceil : Ceil the datetimes to the specified freq.
- round : Round the datetimes to the specified freq.
+ floor: Floor the datetimes to the specified freq.
+ ceil: Ceil the datetimes to the specified freq.
+ round: Round the datetimes to the specified freq.
Examples
--------
@@ -1341,9 +1341,9 @@ def date(self):
See Also
--------
- Series.dt.dayofweek : Alias.
- Series.dt.weekday : Alias.
- Series.dt.day_name : Returns the name of the day of the week.
+ Series.dt.dayofweek: Alias.
+ Series.dt.weekday: Alias.
+ Series.dt.day_name: Return the name of the day of the week.
Examples
--------
@@ -1387,9 +1387,9 @@ def date(self):
See Also
--------
- is_month_start : Return a boolean indicating whether the date
+ is_month_start: Return a boolean indicating whether the date
is the first day of the month.
- is_month_end : Return a boolean indicating whether the date
+ is_month_end: Return a boolean indicating whether the date
is the last day of the month.
Examples
@@ -1445,8 +1445,8 @@ def date(self):
See Also
--------
- quarter : Return the quarter of the date.
- is_quarter_end : Similar property for indicating the quarter start.
+ quarter: Return the quarter of the date.
+ is_quarter_end: Similar property for indicating the quarter start.
Examples
--------
@@ -1486,8 +1486,8 @@ def date(self):
See Also
--------
- quarter : Return the quarter of the date.
- is_quarter_start : Similar property indicating the quarter start.
+ quarter: Return the quarter of the date.
+ is_quarter_start: Similar property indicating the quarter start.
Examples
--------
@@ -1527,7 +1527,7 @@ def date(self):
See Also
--------
- is_year_end : Similar property indicating the last day of the year.
+ is_year_end: Similar property indicating the last day of the year.
Examples
--------
@@ -1570,7 +1570,7 @@ def date(self):
See Also
--------
- is_year_start : Similar property indicating the start of the year.
+ is_year_start: Similar property indicating the start of the year.
Examples
--------
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index fd90aec3b5e8c..0f393e5555ee7 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -480,7 +480,8 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
+ Series.value_counts: Return a Series containing counts of unique
+ values.
"""
@@ -521,7 +522,7 @@ def _values_for_argsort(self):
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort: Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = data.min() - 1
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 1e671c7bd956a..64e5ffd07c694 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -87,11 +87,11 @@
See Also
--------
-Index : The base pandas Index type.
-Interval : A bounded slice-like interval; the elements of an %(klass)s.
-interval_range : Function to create a fixed frequency IntervalIndex.
-cut : Bin values into discrete Intervals.
-qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
+Index: The base pandas Index type.
+Interval: A bounded slice-like interval; the elements of an %(klass)s.
+interval_range: Function to create a fixed frequency IntervalIndex.
+cut: Bin values into discrete Intervals.
+qcut: Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
------
@@ -243,9 +243,9 @@ def _from_factorized(cls, values, original):
See Also
--------
- interval_range : Function to create a fixed frequency IntervalIndex.
- %(klass)s.from_arrays : Construct from a left and right array.
- %(klass)s.from_tuples : Construct from a sequence of tuples.
+ interval_range: Function to create a fixed frequency IntervalIndex.
+ %(klass)s.from_arrays: Construct from a left and right array.
+ %(klass)s.from_tuples: Construct from a sequence of tuples.
Examples
--------
@@ -295,10 +295,10 @@ def from_breaks(cls, breaks, closed='right', copy=False, dtype=None):
See Also
--------
- interval_range : Function to create a fixed frequency IntervalIndex.
- %(klass)s.from_breaks : Construct an %(klass)s from an array of
+ interval_range: Function to create a fixed frequency IntervalIndex.
+ %(klass)s.from_breaks: Construct an %(klass)s from an array of
splits.
- %(klass)s.from_tuples : Construct an %(klass)s from an
+ %(klass)s.from_tuples: Construct an %(klass)s from an
array-like of tuples.
Notes
@@ -345,13 +345,13 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):
See Also
--------
- interval_range : Function to create a fixed frequency IntervalIndex.
- %(klass)s.from_arrays : Construct an %(klass)s from a left and
- right array.
- %(klass)s.from_breaks : Construct an %(klass)s from an array of
- splits.
- %(klass)s.from_tuples : Construct an %(klass)s from an
- array-like of tuples.
+ interval_range: Function to create a fixed frequency IntervalIndex.
+ %(klass)s.from_arrays: Construct an %(klass)s from a left and
+ right array.
+ %(klass)s.from_breaks: Construct an %(klass)s from an array of
+ splits.
+ %(klass)s.from_tuples: Construct an %(klass)s from an
+ array-like of tuples.
Examples
--------
@@ -387,11 +387,11 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):
See Also
--------
- interval_range : Function to create a fixed frequency IntervalIndex.
- %(klass)s.from_arrays : Construct an %(klass)s from a left and
- right array.
- %(klass)s.from_breaks : Construct an %(klass)s from an array of
- splits.
+ interval_range: Function to create a fixed frequency IntervalIndex.
+ %(klass)s.from_arrays: Construct an %(klass)s from a left and
+ right array.
+ %(klass)s.from_breaks: Construct an %(klass)s from an array of
+ splits.
Examples
--------
@@ -795,7 +795,8 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
+ Series.value_counts: Return a Series containing counts of
+ unique values.
"""
# TODO: implement this is a non-naive way!
from pandas.core.algorithms import value_counts
@@ -1036,7 +1037,7 @@ def repeat(self, repeats, axis=None):
See Also
--------
- Interval.overlaps : Check whether two Interval objects overlap.
+ Interval.overlaps: Check whether two Interval objects overlap.
Examples
--------
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 3ddceb8c2839d..161941b76d8ab 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -109,8 +109,8 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
See Also
--------
- period_array : Create a new PeriodArray.
- pandas.PeriodIndex : Immutable Index for period data.
+ period_array: Create a new PeriodArray.
+ PeriodIndex: Immutable Index for period data.
Notes
-----
@@ -732,8 +732,11 @@ def period_array(data, freq=None, copy=False):
See Also
--------
- PeriodArray
- pandas.PeriodIndex
+ PeriodArray: Immutable ndarray holding ordinal values indicating regular
+ periods in time such as particular years, quarters, months, etc.
+ PeriodIndex: Immutable ndarray holding ordinal values indicating
+ regular periods in time such as particular years, quarters, months,
+ etc.
Examples
--------
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 6114e578dc90f..0fdb39fbbf2b6 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -1450,7 +1450,8 @@ def all(self, axis=None, *args, **kwargs):
See Also
--------
- numpy.all
+ numpy.all: Test whether all array elements along a given axis
+ evaluate to True.
"""
nv.validate_all(args, kwargs)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f896596dd5216..607cb2ecd86f6 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -690,8 +690,10 @@ def _is_homogeneous_type(self):
See Also
--------
- DataFrame._is_homogeneous_type
- MultiIndex._is_homogeneous_type
+ DataFrame._is_homogeneous_type: Whether all the columns of a Dataframe
+ have the same dtype.
+ MultiIndex._is_homogeneous_type: Whether the levels of a MultiIndex
+ all have the same dtype.
"""
return True
@@ -804,8 +806,8 @@ def array(self):
See Also
--------
- Index.to_numpy : Similar method that always returns a NumPy array.
- Series.to_numpy : Similar method that always returns a NumPy array.
+ Index.to_numpy: Similar method that always returns a NumPy array.
+ Series.to_numpy: Similar method that always returns a NumPy array.
Notes
-----
@@ -887,9 +889,9 @@ def to_numpy(self, dtype=None, copy=False):
See Also
--------
- Series.array : Get the actual data stored within.
- Index.array : Get the actual data stored within.
- DataFrame.to_numpy : Similar method for DataFrame.
+ Series.array: Get the actual data stored within.
+ Index.array: Get the actual data stored within.
+ DataFrame.to_numpy: Similar method for DataFrame.
Notes
-----
@@ -997,9 +999,9 @@ def max(self, axis=None, skipna=True):
See Also
--------
- Index.min : Return the minimum value in an Index.
- Series.max : Return the maximum value in a Series.
- DataFrame.max : Return the maximum values in a DataFrame.
+ Index.min: Return the minimum value in an Index.
+ Series.max: Return the maximum value in a Series.
+ DataFrame.max: Return the maximum values in a DataFrame.
Examples
--------
@@ -1032,7 +1034,8 @@ def argmax(self, axis=None, skipna=True):
See Also
--------
- numpy.ndarray.argmax
+ numpy.ndarray.argmax: Return indices of the maximum value
+ along a given axis.
"""
nv.validate_minmax_axis(axis)
return nanops.nanargmax(self._values, skipna=skipna)
@@ -1054,9 +1057,9 @@ def min(self, axis=None, skipna=True):
See Also
--------
- Index.max : Return the maximum value of the object.
- Series.min : Return the minimum value in a Series.
- DataFrame.min : Return the minimum values in a DataFrame.
+ Index.max: Return the maximum value of the object.
+ Series.min: Return the minimum value in a Series.
+ DataFrame.min: Return the minimum values in a DataFrame.
Examples
--------
@@ -1093,7 +1096,8 @@ def argmin(self, axis=None, skipna=True):
See Also
--------
- numpy.ndarray.argmin
+ numpy.ndarray.argmin: Return indices of the minimum
+ value along a given axis.
"""
nv.validate_minmax_axis(axis)
return nanops.nanargmin(self._values, skipna=skipna)
@@ -1112,7 +1116,7 @@ def tolist(self):
See Also
--------
- numpy.ndarray.tolist
+ numpy.ndarray.tolist: Return an array as a list.
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
@@ -1423,7 +1427,8 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of
+ the array.
Notes
-----
@@ -1482,7 +1487,8 @@ def factorize(self, sort=False, na_sentinel=-1):
See Also
--------
- numpy.searchsorted
+ numpy.searchsorted: Find indices where elements should be inserted to
+ maintain order.
Notes
-----
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 23c3e0eaace81..2506aec4a9669 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -248,8 +248,10 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
See Also
--------
- DataFrame.query
- DataFrame.eval
+ DataFrame.query: Queries the columns of a Dataframe with a boolean
+ expression.
+ DataFrame.eval: Evaluate a string describing operations on DataFrame
+ columns.
Notes
-----
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 88bbdcf342d66..9af80abed0fff 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -153,8 +153,9 @@ class ExtensionDtype(_DtypeOpsMixin):
See Also
--------
- extensions.register_extension_dtype
- extensions.ExtensionArray
+ extensions.register_extension_dtype: Class decorator to register an
+ ExtensionType with pandas.
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 11a132c4d14ee..ed021b4bb89b9 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -196,7 +196,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
See Also
--------
- Categorical
+ Categorical: Represents a categorical variable.
Notes
-----
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b4d95055d06d..426a9da181269 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -218,9 +218,9 @@
See Also
--------
-merge_ordered : Merge with optional filling/interpolation.
-merge_asof : Merge on nearest keys.
-DataFrame.join : Similar method using indices.
+merge_ordered: Merge with optional filling/interpolation.
+merge_asof: Merge on nearest keys.
+DataFrame.join: Similar method using indices.
Notes
-----
@@ -316,9 +316,9 @@ class DataFrame(NDFrame):
See Also
--------
- DataFrame.from_records : Constructor from tuples, also record arrays.
- DataFrame.from_dict : From dicts of Series, arrays, or dicts.
- DataFrame.from_items : From sequence of (key, value) pairs
+ DataFrame.from_records: Constructor from tuples, also record arrays.
+ DataFrame.from_dict: From dicts of Series, arrays, or dicts.
+ DataFrame.from_items: From sequence of (key, value) pairs
read_csv, pandas.read_table, pandas.read_clipboard.
Examples
@@ -495,7 +495,7 @@ def shape(self):
See Also
--------
- ndarray.shape
+ ndarray.shape: Return tuple of array dimensions.
Examples
--------
@@ -675,9 +675,10 @@ def to_string(self, buf=None, columns=None, col_space=None, header=True,
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
+
See Also
--------
- to_html : Convert DataFrame to HTML.
+ to_html: Convert DataFrame to HTML.
Examples
--------
@@ -718,7 +719,8 @@ def style(self):
See Also
--------
- io.formats.style.Styler
+ io.formats.style.Styler: Class to help style a DataFrame or Series
+ according to the data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
@@ -739,9 +741,9 @@ def iteritems(self):
See Also
--------
- DataFrame.iterrows : Iterate over DataFrame rows as
+ DataFrame.iterrows: Iterate over DataFrame rows as
(index, Series) pairs.
- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
+ DataFrame.itertuples: Iterate over DataFrame rows as namedtuples
of the values.
Examples
@@ -794,8 +796,8 @@ def iterrows(self):
See Also
--------
- itertuples : Iterate over DataFrame rows as namedtuples of the values.
- iteritems : Iterate over (column name, Series) pairs.
+ itertuples: Iterate over DataFrame rows as namedtuples of the values.
+ iteritems: Iterate over (column name, Series) pairs.
Notes
-----
@@ -851,9 +853,9 @@ def itertuples(self, index=True, name="Pandas"):
See Also
--------
- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
+ DataFrame.iterrows: Iterate over DataFrame rows as (index, Series)
pairs.
- DataFrame.iteritems : Iterate over (column name, Series) pairs.
+ DataFrame.iteritems: Iterate over (column name, Series) pairs.
Notes
-----
@@ -1069,9 +1071,9 @@ def from_dict(cls, data, orient='columns', dtype=None, columns=None):
See Also
--------
- DataFrame.from_records : DataFrame from ndarray (structured
+ DataFrame.from_records: DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
- DataFrame : DataFrame object creation using constructor.
+ DataFrame: DataFrame object creation using constructor.
Examples
--------
@@ -1149,7 +1151,7 @@ def to_numpy(self, dtype=None, copy=False):
See Also
--------
- Series.to_numpy : Similar method for Series.
+ Series.to_numpy: Similar method for Series.
Examples
--------
@@ -1399,8 +1401,8 @@ def to_gbq(self, destination_table, project_id=None, chunksize=None,
See Also
--------
- pandas_gbq.to_gbq : This function in the pandas-gbq library.
- read_gbq : Read a DataFrame from Google BigQuery.
+ pandas_gbq.to_gbq: This function in the pandas-gbq library.
+ read_gbq: Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
@@ -1870,7 +1872,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
@@ -1914,8 +1916,7 @@ def to_sparse(self, fill_value=None, kind='block'):
See Also
--------
- DataFrame.to_dense :
- Converts the DataFrame back to the its dense form.
+ DataFrame.to_dense: Convert the DataFrame back to the its dense form.
Examples
--------
@@ -2034,9 +2035,9 @@ def to_stata(self, fname, convert_dates=None, write_index=True,
See Also
--------
- read_stata : Import Stata data files.
- io.stata.StataWriter : Low-level writer for Stata data files.
- io.stata.StataWriter117 : Low-level writer for version 117 files.
+ read_stata: Import Stata data files.
+ io.stata.StataWriter: Low-level writer for Stata data files.
+ io.stata.StataWriter117: Low-level writer for version 117 files.
Examples
--------
@@ -2123,10 +2124,10 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
See Also
--------
- read_parquet : Read a parquet file.
- DataFrame.to_csv : Write a csv file.
- DataFrame.to_sql : Write to a sql table.
- DataFrame.to_hdf : Write to hdf.
+ read_parquet: Read a parquet file.
+ DataFrame.to_csv: Write a csv file.
+ DataFrame.to_sql: Write to a sql table.
+ DataFrame.to_hdf: Write to hdf.
Notes
-----
@@ -2188,7 +2189,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True,
%(returns)s
See Also
--------
- to_string : Convert DataFrame to a string.
+ to_string: Convert DataFrame to a string.
"""
if (justify is not None and
@@ -2484,12 +2485,12 @@ def memory_usage(self, index=True, deep=False):
See Also
--------
- numpy.ndarray.nbytes : Total bytes consumed by the elements of an
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of an
ndarray.
- Series.memory_usage : Bytes consumed by a Series.
- Categorical : Memory-efficient array for string values with
+ Series.memory_usage: Bytes consumed by a Series.
+ Categorical: Memory-efficient array for string values with
many repeated values.
- DataFrame.info : Concise summary of a DataFrame.
+ DataFrame.info: Concise summary of a DataFrame.
Examples
--------
@@ -2570,7 +2571,7 @@ def transpose(self, *args, **kwargs):
See Also
--------
- numpy.transpose : Permute the dimensions of a given array.
+ numpy.transpose: Permute the dimensions of a given array.
Notes
-----
@@ -2983,9 +2984,9 @@ def query(self, expr, inplace=False, **kwargs):
See Also
--------
- eval : Evaluate a string describing operations on
+ eval: Evaluate a string describing operations on
DataFrame columns.
- DataFrame.eval : Evaluate a string describing operations on
+ DataFrame.eval: Evaluate a string describing operations on
DataFrame columns.
Notes
@@ -3093,11 +3094,11 @@ def eval(self, expr, inplace=False, **kwargs):
See Also
--------
- DataFrame.query : Evaluates a boolean expression to query the columns
+ DataFrame.query: Evaluate a boolean expression to query the columns
of a frame.
- DataFrame.assign : Can evaluate an expression or function to create new
+ DataFrame.assign: Can evaluate an expression or function to create new
values for a column.
- eval : Evaluate a Python expression as a string using various
+ eval: Evaluate a Python expression as a string using various
backends.
Notes
@@ -3947,7 +3948,8 @@ def rename(self, *args, **kwargs):
See Also
--------
- DataFrame.rename_axis
+ DataFrame.rename_axis: Set the name of the axis for the index or
+ columns.
Examples
--------
@@ -4051,9 +4053,9 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
See Also
--------
- DataFrame.reset_index : Opposite of set_index.
- DataFrame.reindex : Change to new indices or expand indices.
- DataFrame.reindex_like : Change to same indices as other DataFrame.
+ DataFrame.reset_index: Opposite of set_index.
+ DataFrame.reindex: Change to new indices or expand indices.
+ DataFrame.reindex_like: Change to same indices as other DataFrame.
Examples
--------
@@ -4237,9 +4239,9 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
See Also
--------
- DataFrame.set_index : Opposite of reset_index.
- DataFrame.reindex : Change to new indices or expand indices.
- DataFrame.reindex_like : Change to same indices as other DataFrame.
+ DataFrame.set_index: Opposite of reset_index.
+ DataFrame.reindex: Change to new indices or expand indices.
+ DataFrame.reindex_like: Change to same indices as other DataFrame.
Examples
--------
@@ -4838,10 +4840,10 @@ def nlargest(self, n, columns, keep='first'):
See Also
--------
- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
+ DataFrame.nsmallest: Return the first `n` rows ordered by `columns` in
ascending order.
- DataFrame.sort_values : Sort DataFrame by the values.
- DataFrame.head : Return the first `n` rows without re-ordering.
+ DataFrame.sort_values: Sort DataFrame by the values.
+ DataFrame.head: Return the first `n` rows without re-ordering.
Notes
-----
@@ -4948,10 +4950,10 @@ def nsmallest(self, n, columns, keep='first'):
See Also
--------
- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
+ DataFrame.nlargest: Return the first `n` rows ordered by `columns` in
descending order.
- DataFrame.sort_values : Sort DataFrame by the values.
- DataFrame.head : Return the first `n` rows without re-ordering.
+ DataFrame.sort_values: Sort DataFrame by the values.
+ DataFrame.head: Return the first `n` rows without re-ordering.
Examples
--------
@@ -5152,7 +5154,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
See Also
--------
- DataFrame.combine_first : Combine two DataFrame objects and default to
+ DataFrame.combine_first: Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
@@ -5310,7 +5312,7 @@ def combine_first(self, other):
See Also
--------
- DataFrame.combine : Perform series-wise operation on two DataFrames
+ DataFrame.combine: Perform series-wise operation on two DataFrames
using a given function.
Examples
@@ -5419,8 +5421,8 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
See Also
--------
- dict.update : Similar method for dictionaries.
- DataFrame.merge : For column(s)-on-columns(s) operations.
+ dict.update: Similar method for dictionaries.
+ DataFrame.merge: For column(s)-on-columns(s) operations.
Examples
--------
@@ -5559,9 +5561,9 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
See Also
--------
- DataFrame.pivot_table : Generalization of pivot that can handle
+ DataFrame.pivot_table: Generalization of pivot that can handle
duplicate values for one index/column pair.
- DataFrame.unstack : Pivot based on the index values instead of a
+ DataFrame.unstack: Pivot based on the index values instead of a
column.
Notes
@@ -5671,7 +5673,7 @@ def pivot(self, index=None, columns=None, values=None):
See Also
--------
- DataFrame.pivot : Pivot without aggregation that can handle
+ DataFrame.pivot: Pivot without aggregation that can handle
non-numeric data.
Examples
@@ -5797,11 +5799,11 @@ def stack(self, level=-1, dropna=True):
See Also
--------
- DataFrame.unstack : Unstack prescribed level(s) from index axis
+ DataFrame.unstack: Unstack prescribed level(s) from index axis
onto column axis.
- DataFrame.pivot : Reshape dataframe from long format to wide
+ DataFrame.pivot: Reshape dataframe from long format to wide
format.
- DataFrame.pivot_table : Create a spreadsheet-style pivot table
+ DataFrame.pivot_table: Create a spreadsheet-style pivot table
as a DataFrame.
Notes
@@ -5957,8 +5959,8 @@ def unstack(self, level=-1, fill_value=None):
See Also
--------
- DataFrame.pivot : Pivot a table based on column values.
- DataFrame.stack : Pivot a level of the column labels (inverse operation
+ DataFrame.pivot: Pivot a table based on column values.
+ DataFrame.stack: Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
@@ -6023,9 +6025,10 @@ def unstack(self, level=-1, fill_value=None):
See Also
--------
- %(other)s
- pivot_table
- DataFrame.pivot
+ Dataframe.pivot_table: Create a spreadsheet-style pivot table as a
+ DataFrame.
+ DataFrame.pivot: Return reshaped DataFrame organized by given index
+ column values.
Examples
--------
@@ -6230,13 +6233,13 @@ def _gotitem(self,
See Also
--------
- DataFrame.apply : Perform any type of operations.
- DataFrame.transform : Perform transformation type operations.
- core.groupby.GroupBy : Perform operations over groups.
- core.resample.Resampler : Perform operations over resampled bins.
- core.window.Rolling : Perform operations over rolling window.
- core.window.Expanding : Perform operations over expanding window.
- core.window.EWM : Perform operation over exponential weighted
+ DataFrame.apply: Perform any type of operations.
+ DataFrame.transform: Perform transformation type operations.
+ core.groupby.GroupBy: Perform operations over groups.
+ core.resample.Resampler: Perform operations over resampled bins.
+ core.window.Rolling: Perform operations over rolling window.
+ core.window.Expanding: Perform operations over expanding window.
+ core.window.EWM: Perform operation over exponential weighted
window.
""")
@@ -6510,7 +6513,7 @@ def applymap(self, func):
See Also
--------
- DataFrame.apply : Apply a function along input axis of DataFrame.
+ DataFrame.apply: Apply a function along input axis of DataFrame.
Notes
-----
@@ -6590,7 +6593,7 @@ def append(self, other, ignore_index=False,
See Also
--------
- concat : General function to concatenate DataFrame, Series
+ concat: General function to concatenate DataFrame, Series
or Panel objects.
Notes
@@ -6743,7 +6746,7 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
See Also
--------
- DataFrame.merge : For column(s)-on-columns(s) operations.
+ DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
@@ -6902,8 +6905,8 @@ def round(self, decimals=0, *args, **kwargs):
See Also
--------
- numpy.around : Round a numpy array to the given number of decimals.
- Series.round : Round a Series to the given number of decimals.
+ numpy.around: Round a numpy array to the given number of decimals.
+ Series.round: Round a Series to the given number of decimals.
Examples
--------
@@ -7015,8 +7018,10 @@ def corr(self, method='pearson', min_periods=1):
See Also
--------
- DataFrame.corrwith
- Series.corr
+ DataFrame.corrwith: Compute pairwise correlation between rows or
+ columns of DataFrame with rows or columns of Series or DataFrame.
+ Series.corr: Compute correlation with other Series, excluding
+ missing values.
Examples
--------
@@ -7103,10 +7108,10 @@ def cov(self, min_periods=None):
See Also
--------
- Series.cov : Compute covariance with another Series.
+ Series.cov: Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
- core.window.Expanding.cov : Expanding sample covariance.
- core.window.Rolling.cov : Rolling sample covariance.
+ core.window.Expanding.cov: Expanding sample covariance.
+ core.window.Rolling.cov: Rolling sample covariance.
Notes
-----
@@ -7185,7 +7190,7 @@ def cov(self, min_periods=None):
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
- with rows or columns of Series or DataFrame. DataFrames are first
+ with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
@@ -7212,7 +7217,8 @@ def corrwith(self, other, axis=0, drop=False, method='pearson'):
See Also
-------
- DataFrame.corr
+ DataFrame.corr: Compute pairwise correlation of columns excluding
+ NA/null values.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
@@ -7586,7 +7592,7 @@ def idxmin(self, axis=0, skipna=True):
See Also
--------
- Series.idxmin
+ Series.idxmin: Return the row label of the minimum value.
Notes
-----
@@ -7623,7 +7629,7 @@ def idxmax(self, axis=0, skipna=True):
See Also
--------
- Series.idxmax
+ Series.idxmax: Return the row label of the maximum value.
Notes
-----
@@ -7674,8 +7680,8 @@ def mode(self, axis=0, numeric_only=False, dropna=True):
See Also
--------
- Series.mode : Return the highest frequency value in a Series.
- Series.value_counts : Return the counts of values in a Series.
+ Series.mode: Return the highest frequency value in a Series.
+ Series.value_counts: Return the counts of values in a Series.
Examples
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ee8f9cba951b3..f04c41e609e4d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -988,7 +988,8 @@ def rename(self, *args, **kwargs):
See Also
--------
- NDFrame.rename_axis
+ NDFrame.rename_axis: Set the name of the axis for the
+ index or columns.
Examples
--------
@@ -1859,8 +1860,8 @@ def empty(self):
See Also
--------
- Series.dropna
- DataFrame.dropna
+ Series.dropna: Return a new Series with missing values removed.
+ DataFrame.dropna: Drop rows or columns which contain missing values.
Notes
-----
@@ -2212,7 +2213,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
See Also
--------
- read_json
+ read_json: Convert a JSON string to pandas object.
Examples
--------
@@ -3351,8 +3352,9 @@ def _take(self, indices, axis=0, is_copy=True):
See Also
--------
- numpy.ndarray.take
- numpy.take
+ numpy.ndarray.take: Return an array formed from the elements of
+ a at the given indices.
+ numpy.take: Take elements from an array along an axis.
"""
self._consolidate_inplace()
@@ -4527,7 +4529,8 @@ def filter(self, items=None, like=None, regex=None, axis=None):
See Also
--------
- DataFrame.loc
+ DataFrame.loc: Access a group of rows and columns by labels or a
+ boolean array.
Notes
-----
@@ -4894,9 +4897,9 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
See Also
--------
- DataFrame.apply
- DataFrame.applymap
- Series.map
+ DataFrame.apply: Apply a function along an axis of the DataFrame.
+ DataFrame.applymap: Apply a function to a Dataframe elementwise.
+ Series.map: Map values of Series according to input correspondence.
Notes
-----
@@ -5233,7 +5236,7 @@ def as_matrix(self, columns=None):
See Also
--------
- DataFrame.values
+ DataFrame.values: Return a Numpy representation of the DataFrame.
Notes
-----
@@ -5360,7 +5363,7 @@ def get_values(self):
See Also
--------
- values : Numpy representation of DataFrame.
+ Dataframe.values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
Examples
@@ -7686,7 +7689,9 @@ def asfreq(self, freq, method=None, how=None, normalize=False,
See Also
--------
- reindex
+ Dataframe.reindex: Conform DataFrame to new index with optional
+ filling logic placing NA/NaN in locations having no value in
+ the previous index.
Notes
-----
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ebba4a0a9395d..3b0aba14acacb 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -1,5 +1,5 @@
"""
-Provide basic components for groupby. These defintiions
+Provide basic components for groupby. These definitions
hold the whitelist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 683c21f7bd47a..6a1a015a5da45 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -708,9 +708,12 @@ def _selection_name(self):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.Series.groupby.apply
- pandas.Series.groupby.transform
- pandas.Series.aggregate
+ pandas.Series.groupby.apply: Apply function func group-wise and combine
+ the results together.
+ pandas.Series.groupby.transform: Call func on self producing a DataFrame
+ with transformed values and that has the same axis length as self.
+ pandas.Series.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
@@ -1251,9 +1254,12 @@ class DataFrameGroupBy(NDFrameGroupBy):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.groupby.apply
- pandas.DataFrame.groupby.transform
- pandas.DataFrame.aggregate
+ pandas.Series.groupby.apply: Apply function func group-wise and combine
+ the results together.
+ pandas.Series.groupby.transform: Call func on self producing a DataFrame
+ with transformed values and that has the same axis length as self.
+ pandas.Series.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 926da40deaff2..3da28f2887a12 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -47,9 +47,10 @@ class providing the base-class of operations.
_common_see_also = """
See Also
--------
- Series.%(name)s
- DataFrame.%(name)s
- Panel.%(name)s
+ Series.%(name)s: Group Series using a mapper or by a Series of columns.
+ DataFrame.%(name)s: Group DataFrame using a mapper or by a Series of
+ columns.
+ Panel.%(name)s: Group data on given axis, returning GroupBy object.
"""
_apply_docs = dict(
@@ -209,9 +210,9 @@ class providing the base-class of operations.
See Also
--------
-Series.pipe : Apply a function with arguments to a series.
+Series.pipe: Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
-apply : Apply function to each group instead of to the
+apply: Apply function to each group instead of to the
full %(klass)s object.
Notes
@@ -240,7 +241,9 @@ class providing the base-class of operations.
See Also
--------
-aggregate, transform
+aggregate: Aggregate using callable, string, dict, or list of string/callables.
+transform: Call func on self producing a DataFrame with transformed values
+ and that has the same axis length as self.
Notes
-----
@@ -1098,7 +1101,8 @@ def count(self):
# defined here for API doc
raise NotImplementedError
- @Substitution(name='groupby', see_also=_common_see_also)
+ @Substitution(name='groupby')
+ @Substitution(see_also=_common_see_also)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
@@ -1493,8 +1497,8 @@ def _fill(self, direction, limit=None):
See Also
--------
- pad
- backfill
+ pad: Pad strings in the Series/Index up to width.
+ backfill: Fill NA/NaN values using the specified method.
"""
# Need int value for Cython
if limit is None:
@@ -1518,10 +1522,15 @@ def pad(self, limit=None):
See Also
--------
- Series.pad
- DataFrame.pad
- Series.fillna
- DataFrame.fillna
+ Series.pad: Pad strings in the Series/Index with an
+ additional character to specified side.
+ DataFrame.pad: Conform DataFrame to new index with optional filling
+ logic, placing NA/NaN in locations having no value in the previous
+ index.
+ Series.fillna: Fill NA/NaN values using the specified method
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
+
+
"""
return self._fill('ffill', limit=limit)
ffill = pad
@@ -1538,15 +1547,16 @@ def backfill(self, limit=None):
See Also
--------
- Series.backfill
- DataFrame.backfill
- Series.fillna
- DataFrame.fillna
+ Series.backfill: Backward fill values of Series/Index.
+ DataFrame.backfill: Backfill values of Dataframe.
+ Series.fillna: Fill NA/NaN values using the specified method.
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._fill('bfill', limit=limit)
bfill = backfill
- @Substitution(name='groupby', see_also=_common_see_also)
+ @Substitution(name='groupby')
+ @Substitution(see_also=_common_see_also)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
@@ -1795,7 +1805,7 @@ def ngroup(self, ascending=True):
See Also
--------
- .cumcount : Number the rows in each group.
+ GroupBy.cumcount: Number the rows in each group.
Examples
--------
@@ -1858,7 +1868,7 @@ def cumcount(self, ascending=True):
See Also
--------
- .ngroup : Number the groups themselves.
+ GroupBy.ngroup: Number the groups themselves.
Examples
--------
@@ -2132,7 +2142,8 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
- @Substitution(name='groupby', see_also=_common_see_also)
+ @Substitution(name='groupby')
+ @Substitution(see_also=_common_see_also)
def head(self, n=5):
"""
Return first n rows of each group.
@@ -2160,7 +2171,8 @@ def head(self, n=5):
mask = self._cumcount_array() < n
return self._selected_obj[mask]
- @Substitution(name='groupby', see_also=_common_see_also)
+ @Substitution(name='groupby')
+ @Substitution(see_also=_common_see_also)
def tail(self, n=5):
"""
Return last n rows of each group.
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 602e11a08b4ed..071523dc9221a 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -212,7 +212,8 @@ def to_pytimedelta(self):
See Also
--------
- datetime.timedelta
+ datetime.timedelta: A duration expressing the difference between two
+ date, time, or datetime instances to microsecond resolution.
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index dee181fc1c569..501e36af2bd24 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -184,12 +184,16 @@ class Index(IndexOpsMixin, PandasObject):
See Also
---------
- RangeIndex : Index implementing a monotonic integer range.
- CategoricalIndex : Index of :class:`Categorical` s.
- MultiIndex : A multi-level, or hierarchical, Index.
- IntervalIndex : An Index of :class:`Interval` s.
- DatetimeIndex, TimedeltaIndex, PeriodIndex
- Int64Index, UInt64Index, Float64Index
+ RangeIndex: Index implementing a monotonic integer range.
+ CategoricalIndex: Index of class: `Categorical`.
+ MultiIndex: A multi-level, or hierarchical Index.
+ IntervalIndex: An Index of class: `Interval`.
+ DatetimeIndex: An Index of class: 'datetime64'.
+ TimedeltaIndex: An Index of class: 'Timedelta'.
+ PeriodIndex: An Index of class: 'Period'.
+ Int64Index: An Index of class: 'Int64'.
+ UInt64Index: An Index of class: 'UInt64'.
+ Float64Index: An Index of class: 'Float64'.
Notes
-----
@@ -694,7 +698,8 @@ def ravel(self, order='C'):
See Also
--------
- numpy.ndarray.ravel
+ numpy.ndarray.ravel: A 1-D array, containing the elements of the input,
+ is returned.
"""
return self._ndarray_values.ravel(order=order)
@@ -779,7 +784,7 @@ def astype(self, dtype, copy=True):
See Also
--------
- numpy.ndarray.take
+ numpy.ndarray.take: Take elements from an array along an axis.
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
@@ -844,8 +849,8 @@ def _assert_take_fillable(self, values, indices, allow_fill=True,
See Also
--------
- Series.repeat : Equivalent function for Series.
- numpy.repeat : Similar method for :class:`numpy.ndarray`.
+ Series.repeat: Equivalent function for Series.
+ numpy.repeat: Similar method for :class:`numpy.ndarray`.
Examples
--------
@@ -1121,7 +1126,7 @@ def to_flat_index(self):
See Also
--------
- MultiIndex.to_flat_index : Subclass implementation.
+ MultiIndex.to_flat_index: Subclass implementation.
"""
return self
@@ -1174,8 +1179,8 @@ def to_frame(self, index=True, name=None):
See Also
--------
- Index.to_series : Convert an Index to a Series.
- Series.to_frame : Convert Series to DataFrame.
+ Index.to_series: Convert an Index to a Series.
+ Series.to_frame: Convert Series to DataFrame.
Examples
--------
@@ -1294,7 +1299,7 @@ def set_names(self, names, level=None, inplace=False):
See Also
--------
- Index.rename : Able to set new names without level.
+ Index.rename: Able to set new names without level.
Examples
--------
@@ -1366,7 +1371,7 @@ def rename(self, name, inplace=False):
See Also
--------
- Index.set_names : Able to set new names partially and by level.
+ Index.set_names: Able to set new names partially and by level.
Examples
--------
@@ -1466,7 +1471,7 @@ def _get_level_values(self, level):
See Also
--------
- MultiIndex.get_level_values : Get values for a level of a MultiIndex.
+ MultiIndex.get_level_values: Get values for a level of a MultiIndex.
Notes
-----
@@ -1693,7 +1698,7 @@ def is_categorical(self):
See Also
--------
- CategoricalIndex : Index for categorical data.
+ CategoricalIndex: Index for categorical data.
Examples
--------
@@ -1834,10 +1839,10 @@ def isna(self):
See Also
--------
- Index.notna : Boolean inverse of isna.
- Index.dropna : Omit entries with missing values.
- isna : Top-level isna.
- Series.isna : Detect missing values in Series object.
+ Index.notna: Boolean inverse of isna.
+ Index.dropna: Omit entries with missing values.
+ isna: Top-level isna.
+ Series.isna: Detect missing values in Series object.
Examples
--------
@@ -1892,9 +1897,9 @@ def notna(self):
See Also
--------
- Index.notnull : Alias of notna.
+ Index.notnull: Alias of notna.
Index.isna: Inverse of notna.
- notna : Top-level notna.
+ notna: Top-level notna.
Examples
--------
@@ -1992,8 +1997,9 @@ def dropna(self, how='any'):
See Also
--------
- unique
- Series.unique
+ unique: Hash table-based unique. Uniques are returned in order of
+ appearance. This does NOT sort.
+ Series.unique: Return unique values of Series object.
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
@@ -2020,9 +2026,9 @@ def drop_duplicates(self, keep='first'):
See Also
--------
- Series.drop_duplicates : Equivalent method on Series.
- DataFrame.drop_duplicates : Equivalent method on DataFrame.
- Index.duplicated : Related method on Index, indicating duplicate
+ Series.drop_duplicates: Equivalent method on Series.
+ DataFrame.drop_duplicates: Equivalent method on DataFrame.
+ Index.duplicated: Related method on Index, indicating duplicate
Index values.
Examples
@@ -2076,9 +2082,9 @@ def duplicated(self, keep='first'):
See Also
--------
- Series.duplicated : Equivalent method on pandas.Series.
- DataFrame.duplicated : Equivalent method on pandas.DataFrame.
- Index.drop_duplicates : Remove duplicate values from Index.
+ Series.duplicated: Equivalent method on pandas.Series.
+ DataFrame.duplicated: Equivalent method on pandas.DataFrame.
+ Index.drop_duplicates: Remove duplicate values from Index.
Examples
--------
@@ -2124,8 +2130,8 @@ def get_duplicates(self):
See Also
--------
- Index.duplicated : Return boolean array denoting duplicates.
- Index.drop_duplicates : Return Index with duplicates removed.
+ Index.duplicated: Return boolean array denoting duplicates.
+ Index.drop_duplicates: Return Index with duplicates removed.
Examples
--------
@@ -3621,13 +3627,12 @@ def values(self):
Returns
-------
array: numpy.ndarray or ExtensionArray
+ Returns the underlying data as an ndarray.
See Also
--------
- Index.array : Reference to the underlying data.
- Index.to_numpy : A NumPy array representing the underlying data.
-
- Return the underlying data as an ndarray.
+ Index.array: Reference to the underlying data.
+ Index.to_numpy: A NumPy array representing the underlying data.
"""
return self._data.view(np.ndarray)
@@ -3657,8 +3662,9 @@ def _values(self):
See Also
--------
- values
- _ndarray_values
+ values: Only the values in the DataFrame will be returned, the
+ axes labels will be removed.
+ _ndarray_values: The data as a ndarray.
"""
return self._data
@@ -3673,7 +3679,7 @@ def get_values(self):
See Also
--------
- Index.values : The attribute that get_values wraps.
+ Index.values: The attribute that get_values wraps.
Examples
--------
@@ -3895,7 +3901,7 @@ def is_type_compatible(self, kind):
See Also
--------
- Index.isin : Returns an ndarray of boolean dtype indicating whether the
+ Index.isin: Return an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
@@ -4037,7 +4043,8 @@ def putmask(self, mask, value):
See Also
--------
- numpy.ndarray.putmask
+ numpy.ndarray.putmask: Change elements of an array based on
+ conditional and input values.
"""
values = self.values.copy()
try:
@@ -4102,11 +4109,11 @@ def asof(self, label):
See Also
--------
- Series.asof : Return the latest value in a Series up to the
+ Series.asof: Return the latest value in a Series up to the
passed index.
- merge_asof : Perform an asof merge (similar to left join but it
+ merge_asof: Perform an asof merge (similar to left join but it
matches on nearest key rather than equal key).
- Index.get_loc : An `asof` is a thin wrapper around `get_loc`
+ Index.get_loc: An `asof` is a thin wrapper around `get_loc`
with method='pad'.
Examples
@@ -4207,8 +4214,8 @@ def sort_values(self, return_indexer=False, ascending=True):
See Also
--------
- Series.sort_values : Sort values of a Series.
- DataFrame.sort_values : Sort values in a DataFrame.
+ Series.sort_values: Sort values of a Series.
+ DataFrame.sort_values: Sort values in a DataFrame.
Examples
--------
@@ -4266,7 +4273,7 @@ def shift(self, periods=1, freq=None):
See Also
--------
- Series.shift : Shift values of Series.
+ Series.shift: Shift values of Series.
Notes
-----
@@ -4320,8 +4327,8 @@ def argsort(self, *args, **kwargs):
See Also
--------
- numpy.argsort : Similar method for NumPy arrays.
- Index.sort_values : Return sorted copy of Index.
+ numpy.argsort: Similar method for NumPy arrays.
+ Index.sort_values: Return sorted copy of Index.
Examples
--------
@@ -4567,8 +4574,8 @@ def isin(self, values, level=None):
See Also
--------
- Series.isin : Same for Series.
- DataFrame.isin : Same method for DataFrames.
+ Series.isin: Same method for Series.
+ DataFrame.isin: Same method for DataFrames.
Notes
-----
@@ -4851,7 +4858,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
See Also
--------
- Index.get_loc : Get location for a single label.
+ Index.get_loc: Get location for a single label.
Notes
-----
@@ -5194,9 +5201,9 @@ def _add_logical_methods(cls):
See Also
--------
- Index.any : Return whether any element in an Index is True.
- Series.any : Return whether any element in a Series is True.
- Series.all : Return whether all elements in a Series are True.
+ Index.any: Return whether any element in an Index is True.
+ Series.any: Return whether any element in a Series is True.
+ Series.all: Return whether all elements in a Series are True.
Notes
-----
@@ -5234,8 +5241,8 @@ def _add_logical_methods(cls):
See Also
--------
- Index.all : Return whether all elements are True.
- Series.all : Return whether all elements are True.
+ Index.all: Return whether all elements are True.
+ Series.all: Return whether all elements are True.
Notes
-----
@@ -5319,7 +5326,7 @@ def ensure_index_from_sequences(sequences, names=None):
See Also
--------
- ensure_index
+ ensure_index: Ensure that an index exists.
"""
from .multi import MultiIndex
@@ -5359,7 +5366,7 @@ def ensure_index(index_like, copy=False):
See Also
--------
- ensure_index_from_sequences
+ ensure_index_from_sequences: Construct an index from sequences of data.
"""
if isinstance(index_like, Index):
if copy:
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index b494c41c3b58c..a4f1314cd6d1f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -100,9 +100,9 @@ class CategoricalIndex(Index, accessor.PandasDelegate):
See Also
--------
- Index : The base pandas Index type.
- Categorical : A categorical array.
- CategoricalDtype : Type for categorical data.
+ Index: The base pandas Index type.
+ Categorical: A categorical array.
+ CategoricalDtype: Type for categorical data.
Notes
-----
@@ -740,12 +740,12 @@ def map(self, mapper):
See Also
--------
- Index.map : Apply a mapping correspondence on an
- :class:`~pandas.Index`.
+ Index.map: Apply a mapping correspondence on an
+ :class:`Index`.
Series.map : Apply a mapping correspondence on a
- :class:`~pandas.Series`.
+ :class:`Series`.
Series.apply : Apply more complex functions on a
- :class:`~pandas.Series`.
+ :class:`Series`.
Examples
--------
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index aa7332472fc07..637d16b9ee36e 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -319,7 +319,8 @@ def min(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.min
+ numpy.ndarray.min: Return the minimum for the Array along a
+ given axis.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
@@ -355,7 +356,8 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.argmin
+ numpy.ndarray.argmin: Return indices of the minimum values along
+ the given axis of a.
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
@@ -376,7 +378,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.max
+ numpy.ndarray.max: Return the maximum along a given axis.
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
@@ -412,7 +414,8 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.argmax
+ numpy.ndarray.argmax: Return indices of the maximum values along
+ the given axis.
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index b8d052ce7be04..050b2220ee023 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -218,11 +218,11 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
See Also
---------
- Index : The base pandas Index type.
- TimedeltaIndex : Index of timedelta64 data.
- PeriodIndex : Index of Period data.
- to_datetime : Convert argument to datetime.
- date_range : Create a fixed-frequency DatetimeIndex.
+ Index: The base pandas Index type.
+ TimedeltaIndex: Index of timedelta64 data.
+ PeriodIndex: Index of Period data.
+ to_datetime: Convert argument to datetime.
+ date_range: Create a fixed-frequency DatetimeIndex.
"""
_typ = 'datetimeindex'
_join_precedence = 10
@@ -1328,7 +1328,9 @@ def indexer_at_time(self, time, asof=False):
See Also
--------
- indexer_between_time, DataFrame.at_time
+ indexer_between_time: Return index locations of values between
+ particular times of day.
+ DataFrame.at_time: Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
@@ -1367,7 +1369,9 @@ def indexer_between_time(self, start_time, end_time, include_start=True,
See Also
--------
- indexer_at_time, DataFrame.between_time
+ indexer_between_time: Return index locations of values between
+ particular times of day.
+ DataFrame.at_time: Select values at particular time of day.
"""
start_time = tools.to_time(start_time)
end_time = tools.to_time(end_time)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 616c17cd16f9a..8234a89956aaf 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -174,12 +174,12 @@ class MultiIndex(Index):
See Also
--------
- MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
- MultiIndex.from_product : Create a MultiIndex from the cartesian product
+ MultiIndex.from_arrays: Convert list of arrays to MultiIndex.
+ MultiIndex.from_product: Create a MultiIndex from the cartesian product
of iterables.
- MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
- MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
- Index : The base pandas Index type.
+ MultiIndex.from_tuples: Convert list of tuples to a MultiIndex.
+ MultiIndex.from_frame: Make a MultiIndex from a DataFrame.
+ Index: The base pandas Index type.
Examples
---------
@@ -311,10 +311,10 @@ def from_arrays(cls, arrays, sortorder=None, names=None):
See Also
--------
- MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
- MultiIndex.from_product : Make a MultiIndex from cartesian product
+ MultiIndex.from_tuples: Convert list of tuples to MultiIndex.
+ MultiIndex.from_product: Make a MultiIndex from cartesian product
of iterables.
- MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
+ MultiIndex.from_frame: Make a MultiIndex from a DataFrame.
Examples
--------
@@ -371,10 +371,10 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
See Also
--------
- MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
- MultiIndex.from_product : Make a MultiIndex from cartesian product
+ MultiIndex.from_arrays: Convert list of arrays to MultiIndex.
+ MultiIndex.from_product: Make a MultiIndex from cartesian product
of iterables.
- MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
+ MultiIndex.from_frame: Make a MultiIndex from a DataFrame.
Examples
--------
@@ -428,9 +428,9 @@ def from_product(cls, iterables, sortorder=None, names=None):
See Also
--------
- MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
- MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
- MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
+ MultiIndex.from_arrays: Convert list of arrays to MultiIndex.
+ MultiIndex.from_tuples: Convert list of tuples to MultiIndex.
+ MultiIndex.from_frame: Make a MultiIndex from a DataFrame.
Examples
--------
@@ -480,9 +480,9 @@ def from_frame(cls, df, sortorder=None, names=None):
See Also
--------
- MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
- MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
- MultiIndex.from_product : Make a MultiIndex from cartesian product
+ MultiIndex.from_arrays: Convert list of arrays to MultiIndex.
+ MultiIndex.from_tuples: Convert list of tuples to MultiIndex.
+ MultiIndex.from_product: Make a MultiIndex from cartesian product
of iterables.
Examples
@@ -549,8 +549,9 @@ def _is_homogeneous_type(self):
See Also
--------
- Index._is_homogeneous_type
- DataFrame._is_homogeneous_type
+ Index._is_homogeneous_type: Whether the Index has a single dtype.
+ DataFrame._is_homogeneous_type: Whether the Dataframe has a single
+ dtype.
Examples
--------
@@ -1455,7 +1456,8 @@ def to_frame(self, index=True, name=None):
See Also
--------
- DataFrame
+ DataFrame: Two-dimensional size-mutable, potentially heterogeneous
+ tabular data structure with labeled axes.
"""
from pandas import DataFrame
@@ -1971,8 +1973,8 @@ def swaplevel(self, i=-2, j=-1):
See Also
--------
- Series.swaplevel : Swap levels i and j in a MultiIndex.
- Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
+ Series.swaplevel: Swap levels i and j in a MultiIndex.
+ Dataframe.swaplevel: Swap levels i and j in a MultiIndex on a
particular axis.
Examples
@@ -2302,8 +2304,8 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
See Also
--------
- MultiIndex.get_loc : Get location for a label or a tuple of labels.
- MultiIndex.get_locs : Get location for a label/slice/list/mask or a
+ MultiIndex.get_loc: Get location for a label or a tuple of labels.
+ MultiIndex.get_locs: Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
@@ -2375,10 +2377,10 @@ def get_loc(self, key, method=None):
See Also
--------
- Index.get_loc : The get_loc method for (single-level) index.
- MultiIndex.slice_locs : Get slice location given start label(s) and
+ Index.get_loc: The get_loc method for (single-level) index.
+ MultiIndex.slice_locs: Get slice location given start label(s) and
end label(s).
- MultiIndex.get_locs : Get location for a label/slice/list/mask or a
+ MultiIndex.get_locs: Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
@@ -2477,8 +2479,8 @@ def get_loc_level(self, key, level=0, drop_level=True):
See Also
---------
- MultiIndex.get_loc : Get location for a label or a tuple of labels.
- MultiIndex.get_locs : Get location for a label/slice/list/mask or a
+ MultiIndex.get_loc: Get location for a label or a tuple of labels.
+ MultiIndex.get_locs: Get location for a label/slice/list/mask or a
sequence of such.
"""
@@ -2702,8 +2704,8 @@ def get_locs(self, seq):
See Also
--------
- MultiIndex.get_loc : Get location for a label or a tuple of labels.
- MultiIndex.slice_locs : Get slice location given start label(s) and
+ MultiIndex.get_loc: Get location for a label or a tuple of labels.
+ MultiIndex.slice_locs: Get slice location given start label(s) and
end label(s).
"""
from .numeric import Int64Index
@@ -2829,7 +2831,8 @@ def equals(self, other):
See Also
--------
- equal_levels
+ equal_levels: Return True if the levels of both MultiIndex objects
+ are the same.
"""
if self.is_(other):
return True
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 5aafe9734b6a0..770c71228eeaa 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -56,8 +56,8 @@ class RangeIndex(Int64Index):
See Also
--------
- Index : The base pandas Index type.
- Int64Index : Index of int64 data.
+ Index: The base pandas Index type.
+ Int64Index: Index of int64 data.
"""
_typ = 'rangeindex'
@@ -241,7 +241,8 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of the
+ array.
"""
return self.nbytes
@@ -318,7 +319,7 @@ def argsort(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Return the indices that would sort this array.
"""
nv.validate_argsort(args, kwargs)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 4e2c04dba8b04..4dbcc17274478 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -3167,7 +3167,8 @@ def _putmask_smart(v, m, n):
See Also
--------
- ndarray.putmask
+ ndarray.putmask: Change elements of an array based on conditional
+ and input values.
"""
# we cannot use np.asarray() here as we cannot have conversions
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index dbdabecafae3a..da9b3beb552f0 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -81,7 +81,7 @@ def _maybe_match_name(a, b):
See Also
--------
- pandas.core.common.consensus_name_attr
+ pandas.core.common.consensus_name_attr: Get name of attribute.
"""
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
@@ -471,7 +471,7 @@ def _get_op_name(op, special):
See Also
--------
-Series.{reverse}
+Series.{reverse}: Equivalent operation but in reverse.
Examples
--------
@@ -578,14 +578,14 @@ def _get_op_name(op, special):
See Also
--------
-DataFrame.add : Add DataFrames.
-DataFrame.sub : Subtract DataFrames.
-DataFrame.mul : Multiply DataFrames.
-DataFrame.div : Divide DataFrames (float division).
-DataFrame.truediv : Divide DataFrames (float division).
-DataFrame.floordiv : Divide DataFrames (integer division).
-DataFrame.mod : Calculate modulo (remainder after division).
-DataFrame.pow : Calculate exponential power.
+DataFrame.add: Add DataFrames.
+DataFrame.sub: Subtract DataFrames.
+DataFrame.mul: Multiply DataFrames.
+DataFrame.div: Divide DataFrames (float division).
+DataFrame.truediv: Divide DataFrames (float division).
+DataFrame.floordiv: Divide DataFrames (integer division).
+DataFrame.mod: Calculate modulo (remainder after division).
+DataFrame.pow: Calculate exponential power.
Notes
-----
@@ -727,15 +727,15 @@ def _get_op_name(op, special):
See Also
--------
-DataFrame.eq : Compare DataFrames for equality elementwise.
-DataFrame.ne : Compare DataFrames for inequality elementwise.
-DataFrame.le : Compare DataFrames for less than inequality
+DataFrame.eq: Compare DataFrames for equality elementwise.
+DataFrame.ne: Compare DataFrames for inequality elementwise.
+DataFrame.le: Compare DataFrames for less than inequality
or equality elementwise.
-DataFrame.lt : Compare DataFrames for strictly less than
+DataFrame.lt: Compare DataFrames for strictly less than
inequality elementwise.
-DataFrame.ge : Compare DataFrames for greater than inequality
+DataFrame.ge: Compare DataFrames for greater than inequality
or equality elementwise.
-DataFrame.gt : Compare DataFrames for strictly greater than
+DataFrame.gt: Compare DataFrames for strictly greater than
inequality elementwise.
Notes
@@ -862,7 +862,7 @@ def _get_op_name(op, special):
See Also
--------
-Panel.{reverse}
+Panel.{reverse}: Equivalent operation but in reverse.
"""
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1555542079d80..aa3f727d2ea0c 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -681,7 +681,7 @@ def round(self, decimals=0, *args, **kwargs):
See Also
--------
- numpy.around
+ numpy.around: Evenly round to the given number of decimals.
"""
nv.validate_round(args, kwargs)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index b3b28d7772713..952a9b7acf3a6 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -110,7 +110,7 @@ def __iter__(self):
See Also
--------
- GroupBy.__iter__
+ GroupBy.__iter__: Groupby iterator.
"""
self._set_binner()
return super(Resampler, self).__iter__()
@@ -213,9 +213,12 @@ def pipe(self, func, *args, **kwargs):
_agg_see_also_doc = dedent("""
See Also
--------
- DataFrame.groupby.aggregate
- DataFrame.resample.transform
- DataFrame.aggregate
+ DataFrame.groupby.aggregate: Aggregate using callable, string, dict, or
+ list of string/callables.
+ DataFrame.resample.transform: Call function producing a like-indexed
+ Series oneach group and return a Series with the transformed values.
+ DataFrame.aggregate: Aggregate using one or more operations over the
+ specified axis.
""")
_agg_examples_doc = dedent("""
@@ -421,8 +424,8 @@ def pad(self, limit=None):
See Also
--------
- Series.fillna
- DataFrame.fillna
+ Series.fillna: Fill NA/NaN values using the specified method.
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._upsample('pad', limit=limit)
ffill = pad
@@ -455,8 +458,8 @@ def nearest(self, limit=None):
See Also
--------
- backfill : Backward fill the new missing values in the resampled data.
- pad : Forward fill ``NaN`` values.
+ backfill: Backward fill the new missing values in the resampled data.
+ pad: Forward fill ``NaN`` values.
Examples
--------
@@ -512,14 +515,14 @@ def backfill(self, limit=None):
See Also
--------
- bfill : Alias of backfill.
- fillna : Fill NaN values using the specified method, which can be
+ bfill: Alias of backfill.
+ fillna: Fill NaN values using the specified method, which can be
'backfill'.
- nearest : Fill NaN values with nearest neighbor starting from center.
- pad : Forward fill NaN values.
- Series.fillna : Fill NaN values in the Series using the
+ nearest: Fill NaN values with nearest neighbor starting from center.
+ pad: Forward fill NaN values.
+ Series.fillna: Fill NaN values in the Series using the
specified method, which can be 'backfill'.
- DataFrame.fillna : Fill NaN values in the DataFrame using the
+ DataFrame.fillna: Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
@@ -625,14 +628,14 @@ def fillna(self, method, limit=None):
See Also
--------
- backfill : Backward fill NaN values in the resampled data.
- pad : Forward fill NaN values in the resampled data.
- nearest : Fill NaN values in the resampled data
+ backfill: Backward fill NaN values in the resampled data.
+ pad: Forward fill NaN values in the resampled data.
+ nearest: Fill NaN values in the resampled data
with nearest neighbor starting from center.
- interpolate : Fill NaN values using interpolation.
- Series.fillna : Fill NaN values in the Series using the
+ interpolate: Fill NaN values using interpolation.
+ Series.fillna: Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
- DataFrame.fillna : Fill NaN values in the DataFrame using the
+ DataFrame.fillna: Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
References
@@ -783,8 +786,8 @@ def asfreq(self, fill_value=None):
See Also
--------
- Series.asfreq
- DataFrame.asfreq
+ Series.asfreq: Convert TimeSeries to specified frequency.
+ DataFrame.asfreq: Convert TimeSeries to specified frequency.
"""
return self._upsample('asfreq', fill_value=fill_value)
@@ -833,9 +836,11 @@ def quantile(self, q=0.5, **kwargs):
See Also
--------
- Series.quantile
- DataFrame.quantile
- DataFrameGroupBy.quantile
+ Series.quantile: Return value at the given quantile.
+ DataFrame.quantile: Return values at the given quantile over
+ requested axis.
+ DataFrameGroupBy.quantile Return values at the given quantile
+ over requested axis.
"""
return self._downsample('quantile', q=q, **kwargs)
@@ -1041,7 +1046,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
self._set_binner()
@@ -1172,7 +1177,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index fb50a3c60f705..e5390589f9890 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -184,8 +184,9 @@ def merge_ordered(left, right, on=None,
See Also
--------
- merge
- merge_asof
+ merge: Merge DataFrame or named Series objects with a database-style join.
+ merge_asof: Perform an asof merge. This is similar to a left-join except
+ that we match on nearest key rather than equal keys.
Examples
--------
@@ -324,8 +325,9 @@ def merge_asof(left, right, on=None,
See Also
--------
- merge
- merge_ordered
+ merge: Merge DataFrame or named Series objects with a database-style join.
+ merge_ordered: Perform merge with optional filling/interpolation designed
+ for ordered data like time series data.
Examples
--------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cada6663ce651..16f567fd8993e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -449,8 +449,8 @@ def values(self):
See Also
--------
- Series.array : Reference to the underlying data.
- Series.to_numpy : A NumPy array representing the underlying data.
+ Series.array: Reference to the underlying data.
+ Series.to_numpy: A NumPy array representing the underlying data.
Examples
--------
@@ -521,7 +521,8 @@ def ravel(self, order='C'):
See Also
--------
- numpy.ndarray.ravel
+ numpy.ndarray.ravel: Return a 1-D array containing the elements
+ of the input.
"""
return self._values.ravel(order=order)
@@ -533,7 +534,8 @@ def compress(self, condition, *args, **kwargs):
See Also
--------
- numpy.ndarray.compress
+ numpy.ndarray.compress: Return selected slices of this array
+ along given axis.
"""
msg = ("Series.compress(condition) is deprecated. "
"Use 'Series[condition]' or "
@@ -557,7 +559,7 @@ def nonzero(self):
See Also
--------
- numpy.nonzero
+ numpy.nonzero: Return the indices of the elements that are non-zero.
Examples
--------
@@ -590,7 +592,8 @@ def put(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.put
+ numpy.ndarray.put: Replace specified elements of an array with
+ given values.
"""
self._values.put(*args, **kwargs)
@@ -621,7 +624,7 @@ def view(self, dtype=None):
See Also
--------
- numpy.ndarray.view : Equivalent numpy function to create a new view of
+ numpy.ndarray.view: Equivalent numpy function to create a new view of
the same data in memory.
Notes
@@ -693,9 +696,9 @@ def __array__(self, dtype=None):
See Also
--------
- array : Create a new array from data.
- Series.array : Zero-copy view to the array backing the Series.
- Series.to_numpy : Series method for similar behavior.
+ array: Create a new array from data.
+ Series.array: Zero-copy view to the array backing the Series.
+ Series.to_numpy: Series method for similar behavior.
Examples
--------
@@ -1131,8 +1134,8 @@ def repeat(self, repeats, axis=None):
See Also
--------
- Index.repeat : Equivalent function for Index.
- numpy.repeat : Similar method for :class:`numpy.ndarray`.
+ Index.repeat: Equivalent function for Index.
+ numpy.repeat: Similar method for :class:`numpy.ndarray`.
Examples
--------
@@ -1652,8 +1655,8 @@ def unique(self):
See Also
--------
- unique : Top-level unique method for any 1-d array-like object.
- Index.unique : Return Index with unique values from an Index object.
+ unique: Top-level unique method for any 1-d array-like object.
+ Index.unique: Return Index with unique values from an Index object.
Notes
-----
@@ -1722,9 +1725,9 @@ def drop_duplicates(self, keep='first', inplace=False):
See Also
--------
- Index.drop_duplicates : Equivalent method on Index.
- DataFrame.drop_duplicates : Equivalent method on DataFrame.
- Series.duplicated : Related method on Series, indicating duplicate
+ Index.drop_duplicates: Equivalent method on Index.
+ DataFrame.drop_duplicates: Equivalent method on DataFrame.
+ Series.duplicated: Related method on Series, indicating duplicate
Series values.
Examples
@@ -1801,9 +1804,9 @@ def duplicated(self, keep='first'):
See Also
--------
- Index.duplicated : Equivalent method on pandas.Index.
- DataFrame.duplicated : Equivalent method on pandas.DataFrame.
- Series.drop_duplicates : Remove duplicate values from Series.
+ Index.duplicated: Equivalent method on pandas.Index.
+ DataFrame.duplicated: Equivalent method on pandas.DataFrame.
+ Series.drop_duplicates: Remove duplicate values from Series.
Examples
--------
@@ -1883,11 +1886,11 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs):
See Also
--------
- numpy.argmin : Return indices of the minimum values
+ numpy.argmin: Return indices of the minimum values
along the given axis.
- DataFrame.idxmin : Return index of first occurrence of minimum
+ DataFrame.idxmin: Return index of first occurrence of minimum
over requested axis.
- Series.idxmax : Return index *label* of the first occurrence
+ Series.idxmax: Return index *label* of the first occurrence
of maximum of values.
Notes
@@ -1953,11 +1956,11 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
See Also
--------
- numpy.argmax : Return indices of the maximum values
+ numpy.argmax: Return indices of the maximum values
along the given axis.
- DataFrame.idxmax : Return index of first occurrence of maximum
+ DataFrame.idxmax: Return index of first occurrence of maximum
over requested axis.
- Series.idxmin : Return index *label* of the first occurrence
+ Series.idxmin: Return index *label* of the first occurrence
of minimum of values.
Notes
@@ -2033,8 +2036,8 @@ def round(self, decimals=0, *args, **kwargs):
See Also
--------
- numpy.around : Round values of an np.array.
- DataFrame.round : Round values of a DataFrame.
+ numpy.around: Round values of an np.array.
+ DataFrame.round: Round values of a DataFrame.
Examples
--------
@@ -2081,8 +2084,9 @@ def quantile(self, q=0.5, interpolation='linear'):
See Also
--------
- core.window.Rolling.quantile
- numpy.percentile
+ core.window.Rolling.quantile: Calculate the rolling quantile.
+ numpy.percentile: Compute the n-th percentile of the data
+ along the specified axis.
Examples
--------
@@ -2275,10 +2279,10 @@ def autocorr(self, lag=1):
See Also
--------
- Series.corr : Compute the correlation between two Series.
- Series.shift : Shift index by desired number of periods.
- DataFrame.corr : Compute pairwise correlation of columns.
- DataFrame.corrwith : Compute pairwise correlation between rows or
+ Series.corr: Compute the correlation between two Series.
+ Series.shift: Shift index by desired number of periods.
+ DataFrame.corr: Compute pairwise correlation of columns.
+ DataFrame.corrwith: Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
@@ -2423,7 +2427,7 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
See Also
--------
- concat : General function to concatenate DataFrame, Series
+ concat: General function to concatenate DataFrame, Series
or Panel objects.
Notes
@@ -2553,7 +2557,7 @@ def combine(self, other, func, fill_value=None):
See Also
--------
- Series.combine_first : Combine Series values, choosing the calling
+ Series.combine_first: Combine Series values, choosing the calling
Series' values first.
Examples
@@ -2647,7 +2651,7 @@ def combine_first(self, other):
See Also
--------
- Series.combine : Perform elementwise operation on two Series
+ Series.combine: Perform elementwise operation on two Series
using a given function.
Notes
@@ -2757,9 +2761,9 @@ def sort_values(self, axis=0, ascending=True, inplace=False,
See Also
--------
- Series.sort_index : Sort by the Series indices.
- DataFrame.sort_values : Sort DataFrame by the values along either axis.
- DataFrame.sort_index : Sort DataFrame by indices.
+ Series.sort_index: Sort by the Series indices.
+ DataFrame.sort_values: Sort DataFrame by the values along either axis.
+ DataFrame.sort_index: Sort DataFrame by indices.
Examples
--------
@@ -2930,7 +2934,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
--------
DataFrame.sort_index: Sort DataFrame by the index.
DataFrame.sort_values: Sort DataFrame by the value.
- Series.sort_values : Sort Series by the value.
+ Series.sort_values: Sort Series by the value.
Examples
--------
@@ -3069,7 +3073,7 @@ def argsort(self, axis=0, kind='quicksort', order=None):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Return the indices that would sort an array.
"""
values = self._values
mask = isna(values)
@@ -3392,9 +3396,9 @@ def map(self, arg, na_action=None):
See Also
--------
- Series.apply : For applying more complex functions on a Series.
- DataFrame.apply : Apply a function row-/column-wise.
- DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
+ Series.apply: For applying more complex functions on a Series.
+ DataFrame.apply: Apply a function row-/column-wise.
+ DataFrame.applymap: Apply a function elementwise on a whole DataFrame.
Notes
-----
@@ -3466,8 +3470,8 @@ def _gotitem(self, key, ndim, subset=None):
_agg_see_also_doc = dedent("""
See Also
--------
- Series.apply : Invoke function on a Series.
- Series.transform : Transform function producing a Series with like indexes.
+ Series.apply: Invoke function on a Series.
+ Series.transform: Transform function producing a Series with like indexes.
""")
_agg_examples_doc = dedent("""
@@ -3764,7 +3768,7 @@ def rename(self, index=None, **kwargs):
See Also
--------
- Series.rename_axis : Set the name of the axis.
+ Series.rename_axis: Set the name of the axis.
Examples
--------
@@ -3843,10 +3847,10 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
See Also
--------
- Series.reindex : Return only specified index labels of Series.
- Series.dropna : Return series without null values.
- Series.drop_duplicates : Return Series with duplicate values removed.
- DataFrame.drop : Drop specified labels from rows or columns.
+ Series.reindex: Return only specified index labels of Series.
+ Series.dropna: Return series without null values.
+ Series.drop_duplicates: Return Series with duplicate values removed.
+ DataFrame.drop: Drop specified labels from rows or columns.
Examples
--------
@@ -3956,9 +3960,9 @@ def memory_usage(self, index=True, deep=False):
See Also
--------
- numpy.ndarray.nbytes : Total bytes consumed by the elements of the
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of the
array.
- DataFrame.memory_usage : Bytes consumed by a DataFrame.
+ DataFrame.memory_usage: Bytes consumed by a DataFrame.
Examples
--------
@@ -4042,7 +4046,7 @@ def isin(self, values):
See Also
--------
- DataFrame.isin : Equivalent method on DataFrame.
+ DataFrame.isin: Equivalent method on DataFrame.
Examples
--------
@@ -4097,8 +4101,8 @@ def between(self, left, right, inclusive=True):
See Also
--------
- Series.gt : Greater than of series and other.
- Series.lt : Less than of series and other.
+ Series.gt: Greater than of series and other.
+ Series.lt: Less than of series and other.
Notes
-----
@@ -4199,7 +4203,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
"""
# We're calling `DataFrame.from_csv` in the implementation,
@@ -4312,10 +4316,10 @@ def dropna(self, axis=0, inplace=False, **kwargs):
See Also
--------
Series.isna: Indicate missing values.
- Series.notna : Indicate existing (non-missing) values.
- Series.fillna : Replace missing values.
- DataFrame.dropna : Drop rows or columns which contain NA values.
- Index.dropna : Drop missing indices.
+ Series.notna: Indicate existing (non-missing) values.
+ Series.fillna: Replace missing values.
+ DataFrame.dropna: Drop rows or columns which contain NA values.
+ Index.dropna: Drop missing indices.
Examples
--------
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 9e29fdb94c1e0..ac0fc818bf8c1 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -333,8 +333,8 @@ def aggregate(self, arg, *args, **kwargs):
See Also
--------
- Series.sum : Reducing sum for Series.
- DataFrame.sum : Reducing sum for DataFrame.
+ Series.sum: Reducing sum for Series.
+ DataFrame.sum: Reducing sum for DataFrame.
Examples
--------
@@ -481,8 +481,8 @@ class Window(_Window):
See Also
--------
- expanding : Provides expanding transformations.
- ewm : Provides exponential weighted functions.
+ expanding : Provide expanding transformations.
+ ewm : Provide exponential weighted functions.
Notes
-----
@@ -697,8 +697,10 @@ def f(arg, *args, **kwargs):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.rolling.aggregate
- pandas.DataFrame.aggregate
+ pandas.DataFrame.rolling.aggregate: Aggregate using one or more
+ operations over the specified axis.
+ pandas.DataFrame.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
@@ -1322,9 +1324,9 @@ def kurt(self, **kwargs):
See Also
--------
- Series.quantile : Computes value at the given quantile over all data
+ Series.quantile : Compute value at the given quantile over all data
in Series.
- DataFrame.quantile : Computes values at the given quantile over
+ DataFrame.quantile : Compute values at the given quantile over
requested axis in DataFrame.
Examples
@@ -1626,8 +1628,8 @@ def _validate_freq(self):
_agg_see_also_doc = dedent("""
See Also
--------
- Series.rolling
- DataFrame.rolling
+ Series.rolling: Provide rolling window calculations.
+ DataFrame.rolling: Provide rolling window calculations.
""")
_agg_examples_doc = dedent("""
@@ -1853,8 +1855,8 @@ class Expanding(_Rolling_and_Expanding):
See Also
--------
- rolling : Provides rolling window calculations.
- ewm : Provides exponential weighted functions.
+ rolling : Provide rolling window calculations.
+ ewm : Provide exponential weighted functions.
Notes
-----
@@ -1916,9 +1918,12 @@ def _get_window(self, other=None):
_agg_see_also_doc = dedent("""
See Also
--------
- DataFrame.expanding.aggregate
- DataFrame.rolling.aggregate
- DataFrame.aggregate
+ DataFrame.expanding.aggregate: Aggregate using one or more operations
+ over the specified axis.
+ DataFrame.rolling.aggregate: Aggregate using one or more operations over
+ the specified axis.
+ DataFrame.aggregate: Aggregate using one or more operations over the
+ specified axis.
""")
_agg_examples_doc = dedent("""
@@ -2160,8 +2165,8 @@ class EWM(_Rolling):
See Also
--------
- rolling : Provides rolling window calculations.
- expanding : Provides expanding transformations.
+ rolling : Provide rolling window calculations.
+ expanding : Provide expanding transformations.
Notes
-----
@@ -2230,7 +2235,8 @@ def _constructor(self):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.rolling.aggregate
+ pandas.DataFrame.rolling.aggregate: Aggregate using one or more operations
+ over the specified axis.
""")
_agg_examples_doc = dedent("""
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index c8b5dc6b9b7c0..33df9151a99bb 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -81,7 +81,8 @@ class Styler(object):
See Also
--------
- DataFrame.style
+ DataFrame.style: Property returning a Styler object containing methods
+ for building a styled HTML representation fo the DataFrame.
Notes
-----
@@ -643,7 +644,9 @@ def applymap(self, func, subset=None, **kwargs):
See Also
--------
- Styler.where
+ Styler.where: Apply a function elementwise, updating the HTML
+ representation with a style which is selected in accordance with
+ the return value of a function.
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
@@ -677,7 +680,8 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
See Also
--------
- Styler.applymap
+ Styler.applymap: Apply a function elementwise, updating the HTML
+ representation with the result.
"""
if other is None:
@@ -737,7 +741,8 @@ def export(self):
See Also
--------
- Styler.use
+ Styler.use: Set the styles on the current Styler, possibly
+ using styles from Styler.export.
"""
return self._todo
@@ -757,7 +762,8 @@ def use(self, styles):
See Also
--------
- Styler.export
+ Styler.export: Export the styles to applied to the
+ current Styler.
"""
self._todo.extend(styles)
return self
@@ -1260,8 +1266,8 @@ def pipe(self, func, *args, **kwargs):
See Also
--------
- DataFrame.pipe : Analogous method for DataFrame.
- Styler.apply : Apply a function row-wise, column-wise, or table-wise to
+ DataFrame.pipe: Analogous method for DataFrame.
+ Styler.apply: Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 347bb3eec54af..2f74cc43c25ef 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -528,8 +528,10 @@ class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
See Also
--------
- pandas.io.html._HtmlFrameParser
- pandas.io.html._LxmlFrameParser
+ pandas.io.html._HtmlFrameParser: Base class for parsers that parse
+ HTML into DataFrames.
+ pandas.io.html._LxmlFrameParser: Base class for parsers that parse
+ LXML into DataFrames.
Notes
-----
@@ -639,8 +641,9 @@ class _LxmlFrameParser(_HtmlFrameParser):
See Also
--------
- _HtmlFrameParser
- _BeautifulSoupLxmlFrameParser
+ _HtmlFrameParser: Base class for parsers that parse HTML into DataFrames.
+ _BeautifulSoupLxmlFrameParser: Base class for parsers that parse LXML
+ into DataFrames.
Notes
-----
@@ -707,7 +710,8 @@ def _build_doc(self):
See Also
--------
- pandas.io.html._HtmlFrameParser._build_doc
+ pandas.io.html._HtmlFrameParser._build_doc: Return a tree-like object
+ that can be used to iterate over the DOM.
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
@@ -1043,7 +1047,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
Notes
-----
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 4bae067ee5196..ff8516abeb1de 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -360,7 +360,7 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
See Also
--------
- DataFrame.to_json
+ DataFrame.to_json: Writes a Dataframe Object to JSON.
Notes
-----
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 02fba52eac7f7..2db12859805db 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -227,8 +227,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
See Also
--------
- read_sql_query : Read SQL query into a DataFrame.
- read_sql : Read SQL query or database table into a DataFrame.
+ read_sql_query: Read SQL query into a DataFrame.
+ read_sql: Read SQL query or database table into a DataFrame.
Notes
-----
@@ -310,7 +310,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
- read_sql
+ read_sql: Read SQL query or database table into a DataFrame.
Notes
-----
@@ -1038,8 +1038,8 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
See Also
--------
- pandas.read_sql_table
- SQLDatabase.read_query
+ pandas.read_sql_table: Read SQL database table into a DataFrame
+ SQLDatabase.read_query : Read SQL query into a DataFrame.
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
@@ -1100,7 +1100,7 @@ def read_query(self, sql, index_col=None, coerce_float=True,
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
- read_sql
+ read_sql: Read SQL query or database table into a DataFrame.
"""
args = _convert_params(sql, params)
diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index aaa7aa04acf48..4d12fef255071 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -68,7 +68,7 @@ def register(explicit=True):
See Also
--------
- deregister_matplotlib_converter
+ register_matplotlib_converter: Alias of register.
"""
# Renamed in pandas.plotting.__init__
global _WARN
@@ -98,7 +98,7 @@ def deregister():
See Also
--------
- deregister_matplotlib_converters
+ deregister_matplotlib_converters: Alias of deregister.
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 1b782b430a1a7..30d587f46ea57 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -77,7 +77,7 @@ def to_offset(freq):
See Also
--------
- DateOffset
+ DateOffset: Standard kind of date increment used for a date range.
Examples
--------
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index f208ce37a3b14..8cdf7534cf1fa 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -186,7 +186,10 @@ def __add__(date):
See Also
--------
- dateutil.relativedelta.relativedelta
+ dateutil.relativedelta.relativedelta: The relativedelta type is
+ designed to be applied to an existing datetime and can replace
+ specific components of that datetime, or represents an interval
+ of time.
Examples
--------
| - [x] closes #25337
- [x] Updated code_checks.sh
Went through every file and included "See Also" reference, PEP8 compliant. According to the "validate_docstring.py" script the were SA04 errors but having checked every file I have been unable to locate them. Assistance may be required.
Implemented feedback from original pull request.
Updated code_checks.sh to reflect changes. | https://api.github.com/repos/pandas-dev/pandas/pulls/25354 | 2019-02-17T15:08:34Z | 2019-03-19T04:18:16Z | null | 2019-03-19T04:18:16Z |
TST: xfail excel styler tests, xref GH25351 | diff --git a/pandas/_libs/src/compat_helper.h b/pandas/_libs/src/compat_helper.h
index 462f53392adee..078069fb48af2 100644
--- a/pandas/_libs/src/compat_helper.h
+++ b/pandas/_libs/src/compat_helper.h
@@ -29,8 +29,8 @@ the macro, which restores compat.
#ifndef PYPY_VERSION
# if PY_VERSION_HEX < 0x03070000 && defined(PySlice_GetIndicesEx)
# undef PySlice_GetIndicesEx
-# endif
-#endif
+# endif // PY_VERSION_HEX
+#endif // PYPY_VERSION
PANDAS_INLINE int slice_get_indices(PyObject *s,
Py_ssize_t length,
@@ -44,7 +44,7 @@ PANDAS_INLINE int slice_get_indices(PyObject *s,
#else
return PySlice_GetIndicesEx((PySliceObject *)s, length, start,
stop, step, slicelength);
-#endif
+#endif // PY_VERSION_HEX
}
#endif // PANDAS__LIBS_SRC_COMPAT_HELPER_H_
diff --git a/pandas/_libs/src/inline_helper.h b/pandas/_libs/src/inline_helper.h
index 397ec8e7b2cb8..e203a05d2eb56 100644
--- a/pandas/_libs/src/inline_helper.h
+++ b/pandas/_libs/src/inline_helper.h
@@ -19,7 +19,7 @@ The full license is in the LICENSE file, distributed with this software.
#define PANDAS_INLINE static inline
#else
#define PANDAS_INLINE
- #endif
-#endif
+ #endif // __GNUC__
+#endif // PANDAS_INLINE
#endif // PANDAS__LIBS_SRC_INLINE_HELPER_H_
diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h
index b71131bee7008..6fcd2ed0a9ea0 100644
--- a/pandas/_libs/src/parse_helper.h
+++ b/pandas/_libs/src/parse_helper.h
@@ -30,7 +30,7 @@ int to_double(char *item, double *p_value, char sci, char decimal,
#if PY_VERSION_HEX < 0x02060000
#define PyBytes_Check PyString_Check
#define PyBytes_AS_STRING PyString_AS_STRING
-#endif
+#endif // PY_VERSION_HEX
int floatify(PyObject *str, double *result, int *maybe_int) {
int status;
diff --git a/pandas/_libs/src/parser/io.c b/pandas/_libs/src/parser/io.c
index 19271c78501ba..f578ce138e274 100644
--- a/pandas/_libs/src/parser/io.c
+++ b/pandas/_libs/src/parser/io.c
@@ -15,7 +15,7 @@ The full license is in the LICENSE file, distributed with this software.
#ifndef O_BINARY
#define O_BINARY 0
-#endif /* O_BINARY */
+#endif // O_BINARY
/*
On-disk FILE, uncompressed
@@ -277,4 +277,4 @@ void *buffer_mmap_bytes(void *source, size_t nbytes, size_t *bytes_read,
return NULL;
}
-#endif
+#endif // HAVE_MMAP
diff --git a/pandas/_libs/src/parser/io.h b/pandas/_libs/src/parser/io.h
index d22e8ddaea88d..074322c7bdf78 100644
--- a/pandas/_libs/src/parser/io.h
+++ b/pandas/_libs/src/parser/io.h
@@ -25,7 +25,7 @@ typedef struct _file_source {
#if !defined(_WIN32) && !defined(HAVE_MMAP)
#define HAVE_MMAP
-#endif
+#endif // HAVE_MMAP
typedef struct _memory_map {
int fd;
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index a86af7c5416de..6acf3c3de0c91 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -1480,7 +1480,7 @@ int main(int argc, char *argv[]) {
return 0;
}
-#endif
+#endif // TEST
// ---------------------------------------------------------------------------
// Implementation of xstrtod
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index c32c061c7fa89..ce9dd39b16222 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -42,7 +42,7 @@ See LICENSE for the license
#if defined(_MSC_VER)
#define strtoll _strtoi64
-#endif
+#endif // _MSC_VER
/*
@@ -75,7 +75,7 @@ See LICENSE for the license
#define TRACE(X) printf X;
#else
#define TRACE(X)
-#endif
+#endif // VERBOSE
#define PARSER_OUT_OF_MEMORY -1
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c
index 866c9ca9d3ac7..87866d804503e 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c
@@ -30,7 +30,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#if PY_MAJOR_VERSION >= 3
#define PyInt_AsLong PyLong_AsLong
-#endif
+#endif // PyInt_AsLong
const npy_datetimestruct _NS_MIN_DTS = {
1677, 9, 21, 0, 12, 43, 145225, 0, 0};
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
index 05ccdd13598fb..207da4b8f8340 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c
@@ -609,7 +609,7 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen,
tmplen = _snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year);
#else
tmplen = snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year);
-#endif
+#endif // _WIN32
/* If it ran out of space or there isn't space for the NULL terminator */
if (tmplen < 0 || tmplen > sublen) {
goto string_too_short;
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 8c92db734168b..09b2d86bde3d3 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2413,7 +2413,10 @@ def style(df):
['', '', '']],
index=df.index, columns=df.columns)
- def assert_equal_style(cell1, cell2):
+ def assert_equal_style(cell1, cell2, engine):
+ if engine in ['xlsxwriter', 'openpyxl']:
+ pytest.xfail(reason=("GH25351: failing on some attribute "
+ "comparisons in {}".format(engine)))
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
@@ -2457,7 +2460,7 @@ def custom_converter(css):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
n_cells += 1
# ensure iteration actually happened:
@@ -2515,7 +2518,7 @@ def custom_converter(css):
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
@@ -2533,7 +2536,7 @@ def custom_converter(css):
assert not cell1.font.bold
assert cell2.font.bold
else:
- assert_equal_style(cell1, cell2)
+ assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
diff --git a/pandas/util/move.c b/pandas/util/move.c
index 9bb662d50cb3f..188d7b79b35d2 100644
--- a/pandas/util/move.c
+++ b/pandas/util/move.c
@@ -19,15 +19,15 @@ The full license is in the LICENSE file, distributed with this software.
/* in python 3, we cannot intern bytes objects so this is always false */
#define PyString_CHECK_INTERNED(cs) 0
-#endif /* !COMPILING_IN_PY2 */
+#endif // !COMPILING_IN_PY2
#ifndef Py_TPFLAGS_HAVE_GETCHARBUFFER
#define Py_TPFLAGS_HAVE_GETCHARBUFFER 0
-#endif
+#endif // Py_TPFLAGS_HAVE_GETCHARBUFFER
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
-#endif
+#endif // Py_TPFLAGS_HAVE_NEWBUFFER
static PyObject *badmove; /* bad move exception class */
@@ -85,14 +85,14 @@ static PyBufferProcs stolenbuf_as_buffer = {
(getbufferproc) stolenbuf_getbuffer,
};
-#else /* Python 3 */
+#else // Python 3
static PyBufferProcs stolenbuf_as_buffer = {
(getbufferproc) stolenbuf_getbuffer,
NULL,
};
-#endif /* COMPILING_IN_PY2 */
+#endif // COMPILING_IN_PY2
PyDoc_STRVAR(stolenbuf_doc,
"A buffer that is wrapping a stolen bytes object's buffer.");
@@ -208,7 +208,7 @@ static PyModuleDef move_module = {
-1,
methods,
};
-#endif /* !COMPILING_IN_PY2 */
+#endif // !COMPILING_IN_PY2
PyDoc_STRVAR(
badmove_doc,
@@ -231,7 +231,7 @@ PyInit__move(void)
#else
#define ERROR_RETURN
init_move(void)
-#endif /* !COMPILING_IN_PY2 */
+#endif // !COMPILING_IN_PY2
{
PyObject *m;
@@ -250,7 +250,7 @@ init_move(void)
if (!(m = PyModule_Create(&move_module)))
#else
if (!(m = Py_InitModule(MODULE_NAME, methods)))
-#endif /* !COMPILING_IN_PY2 */
+#endif // !COMPILING_IN_PY2
{
return ERROR_RETURN;
}
@@ -269,5 +269,5 @@ init_move(void)
#if !COMPILING_IN_PY2
return m;
-#endif /* !COMPILING_IN_PY2 */
+#endif // !COMPILING_IN_PY2
}
| xref #25351 | https://api.github.com/repos/pandas-dev/pandas/pulls/25352 | 2019-02-17T15:02:02Z | 2019-02-17T17:27:07Z | 2019-02-17T17:27:07Z | 2019-02-17T17:29:52Z |
DOC: Fixing SA04 errors as per #25337 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index ac6aade106ce6..ec0f97441dad9 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -242,7 +242,7 @@ fi
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, PR10, EX04, RT04, RT05, SS05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,RT05,SS05,SA05
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,RT05,SS05,SA05,SA04
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c5c8f47ad6dba..e18dac07e3fcf 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -295,8 +295,8 @@ def unique(values):
See Also
--------
- Index.unique
- Series.unique
+ Index.unique: Return unique values in the index.
+ Series.unique: Return unique values of Series object.
Examples
--------
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 7aaefef3d03e5..8fd9c31d3b5b5 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -171,8 +171,10 @@ def _from_factorized(cls, values, original):
See Also
--------
- pandas.factorize
- ExtensionArray.factorize
+ pandas.factorize: Encode the object as an enumerated type or
+ categorical variable.
+ ExtensionArray.factorize: Encode the extension array as
+ an enumerated type.
"""
raise AbstractMethodError(cls)
@@ -377,7 +379,7 @@ def _values_for_argsort(self):
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort: Return the indices that would sort this array.
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
@@ -776,8 +778,8 @@ def take(self, indices, allow_fill=False, fill_value=None):
See Also
--------
- numpy.take
- pandas.api.extensions.take
+ numpy.take: Take elements from an array along an axis.
+ pandas.api.extensions.take: Take elements from an array.
Examples
--------
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index d7d0882bbcc94..11e10ef664527 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -421,12 +421,12 @@ def categories(self):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Renames categories .
+ reorder_categories: Reorders categories into specified new categories .
+ add_categories: Adds new categories .
+ remove_categories: Removes specified categories .
+ remove_unused_categories: Removes unused categories .
+ set_categories: Sets new categories inplace .
"""
return self.dtype.categories
@@ -836,11 +836,12 @@ def set_categories(self, new_categories, ordered=None, rename=False,
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
+ rename_categories: Renames categories .
+ reorder_categories: Reorders categories into specified new categories .
+ add_categories: Adds new categories .
+ remove_categories: Removes specified categories .
+ remove_unused_categories: Removes unused categories .
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
@@ -908,11 +909,13 @@ def rename_categories(self, new_categories, inplace=False):
See Also
--------
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+
+ reorder_categories: Reorders categories into specified new categories .
+ add_categories: Adds new categories .
+ remove_categories: Removes specified categories .
+ remove_unused_categories: Removes unused categories .
+ set_categories: Sets new categories inplace .
+
Examples
--------
@@ -986,11 +989,12 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
See Also
--------
- rename_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Renames categories .
+ add_categories: Adds new categories .
+ remove_categories: Removes specified categories .
+ remove_unused_categories: Removes unused categories .
+ set_categories: Sets new categories inplace .
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
@@ -1026,11 +1030,12 @@ def add_categories(self, new_categories, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- remove_categories
- remove_unused_categories
- set_categories
+ rename_categories: Renames categories .
+ reorder_categories: Reorders categories into specified new categories .
+ remove_categories: Removes specified categories .
+ remove_unused_categories: Removes unused categories .
+ set_categories: Sets new categories inplace .
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
@@ -1075,11 +1080,12 @@ def remove_categories(self, removals, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_unused_categories
- set_categories
+ rename_categories: Renames categories .
+ reorder_categories: Reorders categories into specified new categories .
+ add_categories: Adds new categories .
+ remove_unused_categories: Removes unused categories .
+ set_categories: Sets new categories inplace .
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
@@ -1118,11 +1124,12 @@ def remove_unused_categories(self, inplace=False):
See Also
--------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- set_categories
+ rename_categories: Renames categories .
+ reorder_categories: Reorders categories into specified new categories .
+ add_categories: Adds new categories .
+ remove_categories: Removes specified categories .
+ set_categories: Sets new categories inplace .
+
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
@@ -1366,7 +1373,7 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of the array.
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@@ -1469,7 +1476,7 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
+ Series.value_counts: Return a Series containing counts of unique values.
"""
from numpy import bincount
@@ -1544,7 +1551,7 @@ def argsort(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Returns the indices that would sort this array.
Notes
-----
@@ -1597,8 +1604,9 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
See Also
--------
- Categorical.sort
- Series.sort_values
+ Categorical.sort: Sorts the Category inplace by category value.
+ Series.sort_values: Sort a Series in ascending or descending
+ order by some criterion.
Examples
--------
@@ -2294,9 +2302,10 @@ def unique(self):
See Also
--------
- unique
+ unique: Hash table-based unique. Uniques are returned in order of
+ appearance. This does NOT sort.
CategoricalIndex.unique
- Series.unique
+ Series.unique: Return Index of unique values in the object.
"""
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 84536ac72a455..a59ad1000abb8 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -670,7 +670,7 @@ def repeat(self, repeats, *args, **kwargs):
See Also
--------
- numpy.ndarray.repeat
+ numpy.ndarray.repeat: Repeat elements of an array.
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
@@ -1390,7 +1390,8 @@ def min(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.min
+ numpy.ndarray.min: Return the minimum of the Array along
+ a given axis.
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
@@ -1410,7 +1411,8 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.max
+ numpy.ndarray.max: Return the maximum of an Array
+ along a given axis.
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index fd90aec3b5e8c..666b56c9958ab 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -480,7 +480,7 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
+ Series.value_counts: Return a Series containing counts of unique values.
"""
@@ -521,7 +521,7 @@ def _values_for_argsort(self):
See Also
--------
- ExtensionArray.argsort
+ ExtensionArray.argsort: Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = data.min() - 1
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 1e671c7bd956a..9dc1e86d16487 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -795,7 +795,8 @@ def value_counts(self, dropna=True):
See Also
--------
- Series.value_counts
+ Series.value_counts: Return a Series containing counts of
+ unique values.
"""
# TODO: implement this is a non-naive way!
from pandas.core.algorithms import value_counts
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 3ddceb8c2839d..3fdd86e088d6a 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -732,8 +732,10 @@ def period_array(data, freq=None, copy=False):
See Also
--------
- PeriodArray
- pandas.PeriodIndex
+ PeriodArray:Immutable ndarray holding ordinal values indicating regular
+ periods in time such as particular years, quarters, months, etc.
+ pandas.PeriodIndex: Immutable ndarray holding ordinal values indicating
+ regular periods in time such as particular years, quarters, months, etc.
Examples
--------
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 6114e578dc90f..470fc61662e40 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -1450,7 +1450,8 @@ def all(self, axis=None, *args, **kwargs):
See Also
--------
- numpy.all
+ numpy.all: Test whether all array elements along a given axis
+ evaluate to True.
"""
nv.validate_all(args, kwargs)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5a98e83c65884..5695f57d8eda6 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -690,8 +690,8 @@ def _is_homogeneous_type(self):
See Also
--------
- DataFrame._is_homogeneous_type
- MultiIndex._is_homogeneous_type
+ DataFrame._is_homogeneous_type: Whether all the columns of a dataframe have the same dtype.
+ MultiIndex._is_homogeneous_type: Whether the levels of a MultiIndex all have the same dtype.
"""
return True
@@ -1032,7 +1032,7 @@ def argmax(self, axis=None, skipna=True):
See Also
--------
- numpy.ndarray.argmax
+ numpy.ndarray.argmax: Returns indices of the maximum value along a given axis.
"""
nv.validate_minmax_axis(axis)
return nanops.nanargmax(self._values, skipna=skipna)
@@ -1089,7 +1089,7 @@ def argmin(self, axis=None, skipna=True):
See Also
--------
- numpy.ndarray.argmin
+ numpy.ndarray.argmin: Returns indices of the minimum value along a given axis.
"""
nv.validate_minmax_axis(axis)
return nanops.nanargmin(self._values, skipna=skipna)
@@ -1104,7 +1104,7 @@ def tolist(self):
See Also
--------
- numpy.ndarray.tolist
+ numpy.ndarray.tolist: Returns an array as a list
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
@@ -1415,7 +1415,7 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: Total bytes consumed by the elements of the array.
Notes
-----
@@ -1474,7 +1474,7 @@ def factorize(self, sort=False, na_sentinel=-1):
See Also
--------
- numpy.searchsorted
+ numpy.searchsorted:Find indices where elements should be inserted to maintain order.
Notes
-----
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 23c3e0eaace81..2d97595d63126 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -248,8 +248,8 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
See Also
--------
- DataFrame.query
- DataFrame.eval
+ DataFrame.query: Queries the columns of a Dataframe with a boolean expression.
+ DataFrame.eval: Evaluate a string describing operations on DataFrame columns.
Notes
-----
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 88bbdcf342d66..6eb31bb8c6a33 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -153,8 +153,8 @@ class ExtensionDtype(_DtypeOpsMixin):
See Also
--------
- extensions.register_extension_dtype
- extensions.ExtensionArray
+ extensions.register_extension_dtype: Class decorator to register an ExtensionType with pandas.
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 640d43f3b0e03..54922c8e8a3d4 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -196,7 +196,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
See Also
--------
- Categorical
+ Categorical: Represents a categorical variable .
Notes
-----
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cf97c94f6d129..c8072bcea65df 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -495,7 +495,7 @@ def shape(self):
See Also
--------
- ndarray.shape
+ ndarray.shape: Returns tuple of array dimensions.
Examples
--------
@@ -718,7 +718,7 @@ def style(self):
See Also
--------
- io.formats.style.Styler
+ io.formats.style.Styler: Class to help style a DataFrame or Series according to the data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
@@ -1870,7 +1870,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
@@ -3940,7 +3940,7 @@ def rename(self, *args, **kwargs):
See Also
--------
- DataFrame.rename_axis
+ DataFrame.rename_axis: Set the name of the axis for the index or columns.
Examples
--------
@@ -5978,8 +5978,9 @@ def unstack(self, level=-1, fill_value=None):
See Also
--------
%(other)s
- pivot_table
- DataFrame.pivot
+ pivot_table: Create a spreadsheet-style pivot table as a DataFrame.
+ DataFrame.pivot: Return reshaped DataFrame organized by given index
+ / column values.
Examples
--------
@@ -6966,8 +6967,10 @@ def corr(self, method='pearson', min_periods=1):
See Also
--------
- DataFrame.corrwith
- Series.corr
+ DataFrame.corrwith: Compute pairwise correlation between rows or
+ columns of DataFrame with rows or columns of Series or DataFrame.
+ Series.corr: Compute correlation with other Series, excluding
+ missing values.
Examples
--------
@@ -7163,7 +7166,8 @@ def corrwith(self, other, axis=0, drop=False, method='pearson'):
See Also
-------
- DataFrame.corr
+ DataFrame.corr: Compute pairwise correlation of columns,
+ excluding NA/null values.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
@@ -7537,7 +7541,7 @@ def idxmin(self, axis=0, skipna=True):
See Also
--------
- Series.idxmin
+ Series.idxmin Returns the row label of the minimum value.
Notes
-----
@@ -7574,7 +7578,7 @@ def idxmax(self, axis=0, skipna=True):
See Also
--------
- Series.idxmax
+ Series.idxmax: Returns the row label of the maximum value.
Notes
-----
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b1fcbba7bd7ec..839bab6e8d5ae 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -988,7 +988,8 @@ def rename(self, *args, **kwargs):
See Also
--------
- NDFrame.rename_axis
+ NDFrame.rename_axis: Set the name of the axis for the
+ index or columns.
Examples
--------
@@ -1860,8 +1861,8 @@ def empty(self):
See Also
--------
- Series.dropna
- DataFrame.dropna
+ Series.dropna: Return a new Series with missing values removed.
+ DataFrame.dropna: Drop rows or columns which contain missing values.
Notes
-----
@@ -2213,7 +2214,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
See Also
--------
- read_json
+ read_json: Convert a JSON string to pandas object.
Examples
--------
@@ -3348,8 +3349,9 @@ def _take(self, indices, axis=0, is_copy=True):
See Also
--------
- numpy.ndarray.take
- numpy.take
+ numpy.ndarray.take: Return an array formed from the elements of
+ a at the given indices.
+ numpy.take: Take elements from an array along an axis.
"""
self._consolidate_inplace()
@@ -4524,7 +4526,8 @@ def filter(self, items=None, like=None, regex=None, axis=None):
See Also
--------
- DataFrame.loc
+ DataFrame.loc: Access a group of rows and columns by
+ label(s) or a boolean array.
Notes
-----
@@ -4891,9 +4894,9 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
See Also
--------
- DataFrame.apply
- DataFrame.applymap
- Series.map
+ DataFrame.apply: Apply a function along an axis of the DataFrame.
+ DataFrame.applymap: Apply a function to a Dataframe elementwise.
+ Series.map: Map values of Series according to input correspondence.
Notes
-----
@@ -5226,7 +5229,7 @@ def as_matrix(self, columns=None):
See Also
--------
- DataFrame.values
+ DataFrame.values: Return a Numpy representation of the DataFrame.
Notes
-----
@@ -5353,7 +5356,7 @@ def get_values(self):
See Also
--------
- values : Numpy representation of DataFrame.
+ Dataframe.values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
Examples
@@ -5990,7 +5993,11 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
See Also
--------
interpolate : Fill NaN values using interpolation.
- reindex, asfreq
+ reindex: Conform DataFrame to new index with optional filling
+ logic, placing NA/NaN in locations having no value in the
+ previous index.
+ asfreq: Convert TimeSeries to specified frequency. Uses method arg
+ to fill in missing values
Examples
--------
@@ -7672,7 +7679,8 @@ def asfreq(self, freq, method=None, how=None, normalize=False,
See Also
--------
- reindex
+ Dataframe.reindex:Conform DataFrame to new index with optional filling logic,
+ placing NA/NaN in locations having no value in the previous index.
Notes
-----
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ebba4a0a9395d..3b0aba14acacb 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -1,5 +1,5 @@
"""
-Provide basic components for groupby. These defintiions
+Provide basic components for groupby. These definitions
hold the whitelist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 27e13e86a6e9e..0329c32c926e5 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -708,9 +708,12 @@ def _selection_name(self):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.Series.groupby.apply
- pandas.Series.groupby.transform
- pandas.Series.aggregate
+ pandas.Series.groupby.apply: Apply function func group-wise and combine
+ the results together.
+ pandas.Series.groupby.transform: Call func on self producing a DataFrame
+ with transformed values and that has the same axis length as self.
+ pandas.Series.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
@@ -1251,9 +1254,12 @@ class DataFrameGroupBy(NDFrameGroupBy):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.groupby.apply
- pandas.DataFrame.groupby.transform
- pandas.DataFrame.aggregate
+ pandas.Series.groupby.apply: Apply function func group-wise and combine
+ the results together.
+ pandas.Series.groupby.transform: Call func on self producing a DataFrame
+ with transformed values and that has the same axis length as self.
+ pandas.Series.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c63bc5164e25b..879734356d2af 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -237,7 +237,9 @@ class providing the base-class of operations.
See Also
--------
-aggregate, transform
+aggregate: Aggregate using callable, string, dict, or list of string/callables.
+transform: Call func on self producing a DataFrame with transformed values
+ and that has the same axis length as self.
Notes
-----
@@ -1474,8 +1476,8 @@ def _fill(self, direction, limit=None):
See Also
--------
- pad
- backfill
+ pad: Pad strings in the Series/Index up to width.
+ backfill: Fill NA/NaN values using the specified method.
"""
# Need int value for Cython
if limit is None:
@@ -1499,10 +1501,14 @@ def pad(self, limit=None):
See Also
--------
- Series.pad
- DataFrame.pad
- Series.fillna
- DataFrame.fillna
+ Series.pad: Pad strings in the Series/Index with an
+ additional character to specified side.
+ DataFrame.pad: Conform DataFrame to new index with optional filling logic,
+ placing NA/NaN in locations having no value in the previous index.
+ Series.fillna: Fill NA/NaN values using the specified method
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
+
+
"""
return self._fill('ffill', limit=limit)
ffill = pad
@@ -1519,10 +1525,10 @@ def backfill(self, limit=None):
See Also
--------
- Series.backfill
- DataFrame.backfill
- Series.fillna
- DataFrame.fillna
+ Series.backfill: Backward fill values of Series/Index
+ DataFrame.backfill: Backfill values of Dataframe
+ Series.fillna: Fill NA/NaN values using the specified method.
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._fill('bfill', limit=limit)
bfill = backfill
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 602e11a08b4ed..8096583cc5784 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -212,7 +212,8 @@ def to_pytimedelta(self):
See Also
--------
- datetime.timedelta
+ datetime.timedelta: A duration expressing the difference between two date,
+ time, or datetime instances to microsecond resolution.
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f2c8ac6e9b413..cb09452e6d3d9 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -692,7 +692,8 @@ def ravel(self, order='C'):
See Also
--------
- numpy.ndarray.ravel
+ numpy.ndarray.ravel: A 1-D array, containing the elements of the input,
+ is returned.
"""
return self._ndarray_values.ravel(order=order)
@@ -777,7 +778,7 @@ def astype(self, dtype, copy=True):
See Also
--------
- numpy.ndarray.take
+ numpy.ndarray.take: Take elements from an array along an axis.
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
@@ -1990,8 +1991,10 @@ def dropna(self, how='any'):
See Also
--------
- unique
- Series.unique
+ unique: Hash table-based unique.
+ Uniques are returned in order of appearance. This does NOT sort.
+
+ Series.unique:Return unique values of Series object.
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
@@ -3654,8 +3657,8 @@ def _values(self):
See Also
--------
- values
- _ndarray_values
+ values: Only the values in the DataFrame will be returned, the axes labels will be removed.
+ _ndarray_values: The data as a ndarray
"""
return self._data
@@ -4034,7 +4037,8 @@ def putmask(self, mask, value):
See Also
--------
- numpy.ndarray.putmask
+ numpy.ndarray.putmask: Changes elements of an array based on conditional
+ and input values.
"""
values = self.values.copy()
try:
@@ -5302,7 +5306,7 @@ def ensure_index_from_sequences(sequences, names=None):
See Also
--------
- ensure_index
+ ensure_index: Ensures that an index exists
"""
from .multi import MultiIndex
@@ -5342,7 +5346,7 @@ def ensure_index(index_like, copy=False):
See Also
--------
- ensure_index_from_sequences
+ ensure_index_from_sequences: Construct an index from sequences of data.
"""
if isinstance(index_like, Index):
if copy:
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index c6d31339f950d..c0ff6224c95b4 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -77,7 +77,8 @@ class CategoricalIndex(Index, accessor.PandasDelegate):
See Also
--------
- Categorical, Index
+ Categorical: Represents a categorical variable
+ Index: Immutable ndarray implementing an ordered, sliceable set.
"""
_typ = 'categoricalindex'
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index aa7332472fc07..b1cf5800a3511 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -319,7 +319,8 @@ def min(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.min
+ numpy.ndarray.min: Return the minimum for the Array along a
+ given axis.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
@@ -355,7 +356,8 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.argmin
+ numpy.ndarray.argmin: Return indices of the minimum values along
+ the given axis of a.
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
@@ -376,7 +378,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.max
+ numpy.ndarray.max: Return the maximum along a given axis.
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
@@ -412,7 +414,8 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.argmax
+ numpy.ndarray.argmax: Return indices of the maximum values along
+ the given axis.
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index df91c71cfe238..ce38439806f98 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1300,7 +1300,8 @@ def indexer_at_time(self, time, asof=False):
See Also
--------
- indexer_between_time, DataFrame.at_time
+ indexer_between_time: Return index locations of values between particular times of day .
+ DataFrame.at_time: Select values at particular time of day .
"""
from dateutil.parser import parse
@@ -1340,7 +1341,8 @@ def indexer_between_time(self, start_time, end_time, include_start=True,
See Also
--------
- indexer_at_time, DataFrame.between_time
+ indexer_between_time: Return index locations of values between particular times of day .
+ DataFrame.at_time: Select values at particular time of day .
"""
start_time = tools.to_time(start_time)
end_time = tools.to_time(end_time)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index efb77b5d155a1..2e74936cba942 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -543,8 +543,8 @@ def _is_homogeneous_type(self):
See Also
--------
- Index._is_homogeneous_type
- DataFrame._is_homogeneous_type
+ Index._is_homogeneous_type: Whether the Index has a single dtype.
+ DataFrame._is_homogeneous_type: Whether the Dataframe has a single dtype.
Examples
--------
@@ -1449,7 +1449,8 @@ def to_frame(self, index=True, name=None):
See Also
--------
- DataFrame
+ DataFrame: Two-dimensional size-mutable, potentially heterogeneous tabular
+ data structure with labeled axes
"""
from pandas import DataFrame
@@ -2823,7 +2824,8 @@ def equals(self, other):
See Also
--------
- equal_levels
+ equal_levels: Return True if the levels of both MultiIndex objects
+ are the same
"""
if self.is_(other):
return True
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 5aafe9734b6a0..0a8238794e091 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -241,7 +241,7 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes: otal bytes consumed by the elements of the array.
"""
return self.nbytes
@@ -318,7 +318,7 @@ def argsort(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Returns the indices that would sort this array.
"""
nv.validate_argsort(args, kwargs)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 4e2c04dba8b04..e63d2be86e38f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -3167,7 +3167,8 @@ def _putmask_smart(v, m, n):
See Also
--------
- ndarray.putmask
+ ndarray.putmask: Changes elements of an array based on conditional
+ and input values.
"""
# we cannot use np.asarray() here as we cannot have conversions
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1555542079d80..aa3f727d2ea0c 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -681,7 +681,7 @@ def round(self, decimals=0, *args, **kwargs):
See Also
--------
- numpy.around
+ numpy.around: Evenly round to the given number of decimals.
"""
nv.validate_round(args, kwargs)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index b3b28d7772713..3ebf594b7eaf1 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -110,7 +110,7 @@ def __iter__(self):
See Also
--------
- GroupBy.__iter__
+ GroupBy.__iter__: Groupby iterator
"""
self._set_binner()
return super(Resampler, self).__iter__()
@@ -213,9 +213,11 @@ def pipe(self, func, *args, **kwargs):
_agg_see_also_doc = dedent("""
See Also
--------
- DataFrame.groupby.aggregate
- DataFrame.resample.transform
- DataFrame.aggregate
+ DataFrame.groupby.aggregate: Aggregate using callable, string, dict, or list of
+ string/callables
+ DataFrame.resample.transform: Call function producing a like-indexed Series on
+ each group and return a Series with the transformed values
+ DataFrame.aggregate: Aggregate using one or more operations over the specified axis.
""")
_agg_examples_doc = dedent("""
@@ -421,8 +423,8 @@ def pad(self, limit=None):
See Also
--------
- Series.fillna
- DataFrame.fillna
+ Series.fillna: Fill NA/NaN values using the specified method.
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
"""
return self._upsample('pad', limit=limit)
ffill = pad
@@ -783,8 +785,8 @@ def asfreq(self, fill_value=None):
See Also
--------
- Series.asfreq
- DataFrame.asfreq
+ Series.asfreq: Convert TimeSeries to specified frequency.
+ DataFrame.asfreq: Convert TimeSeries to specified frequency.
"""
return self._upsample('asfreq', fill_value=fill_value)
@@ -833,9 +835,9 @@ def quantile(self, q=0.5, **kwargs):
See Also
--------
- Series.quantile
- DataFrame.quantile
- DataFrameGroupBy.quantile
+ Series.quantile: Return value at the given quantile.
+ DataFrame.quantile: Return values at the given quantile over requested axis.
+ DataFrameGroupBy.quantile Return values at the given quantile over requested axis.
"""
return self._downsample('quantile', q=q, **kwargs)
@@ -1041,7 +1043,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
self._set_binner()
@@ -1172,7 +1174,7 @@ def _upsample(self, method, limit=None, fill_value=None):
See Also
--------
- .fillna
+ .fillna: Fill NA/NaN values using the specified method.
"""
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 53671e00e88b4..1c98eb4c18c33 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -89,10 +89,12 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
See Also
--------
- Series.append
- DataFrame.append
- DataFrame.join
- DataFrame.merge
+ Series.append: Concatenate two or more Series.
+ DataFrame.append: Append rows of other to the end of caller, returning a new
+ object.
+ DataFrame.join: Join columns of another DataFrame.
+ DataFrame.merge: Merge DataFrame or named Series objects with a database-style
+ join.
Notes
-----
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index fb50a3c60f705..b7cdf54000a8f 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -184,8 +184,9 @@ def merge_ordered(left, right, on=None,
See Also
--------
- merge
- merge_asof
+ merge: Merge DataFrame or named Series objects with a database-style join.
+ merge_asof: Perform an asof merge. This is similar to a left-join except that
+ we match on nearest key rather than equal keys.
Examples
--------
@@ -324,8 +325,9 @@ def merge_asof(left, right, on=None,
See Also
--------
- merge
- merge_ordered
+ merge: Merge DataFrame or named Series objects with a database-style join.
+ merge_ordered: Perform merge with optional filling/interpolation designed
+ for ordered data like time series data.
Examples
--------
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f436b3b92a359..05c3b2be1fd4b 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -740,7 +740,8 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
See Also
--------
- Series.str.get_dummies
+ Series.str.get_dummies: Split each string in the Series by sep and
+ return a frame of dummy/indicator variables.
Examples
--------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 31c6247436418..305559731467d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -521,7 +521,8 @@ def ravel(self, order='C'):
See Also
--------
- numpy.ndarray.ravel
+ numpy.ndarray.ravel: Returns a 1-D array containing the elements
+ of the input.
"""
return self._values.ravel(order=order)
@@ -533,7 +534,8 @@ def compress(self, condition, *args, **kwargs):
See Also
--------
- numpy.ndarray.compress
+ numpy.ndarray.compress: Return selected slices of this array
+ along given axis.
"""
msg = ("Series.compress(condition) is deprecated. "
"Use 'Series[condition]' or "
@@ -557,7 +559,7 @@ def nonzero(self):
See Also
--------
- numpy.nonzero
+ numpy.nonzero: Return the indices of the elements that are non-zero.
Examples
--------
@@ -590,7 +592,8 @@ def put(self, *args, **kwargs):
See Also
--------
- numpy.ndarray.put
+ numpy.ndarray.put: Replaces specified elements of an array with
+ given values.
"""
self._values.put(*args, **kwargs)
@@ -2079,8 +2082,9 @@ def quantile(self, q=0.5, interpolation='linear'):
See Also
--------
- core.window.Rolling.quantile
- numpy.percentile
+ core.window.Rolling.quantile: Calculate the rolling quantile.
+ numpy.percentile: Compute the n-th percentile of the data
+ along the specified axis.
Examples
--------
@@ -3071,7 +3075,7 @@ def argsort(self, axis=0, kind='quicksort', order=None):
See Also
--------
- numpy.ndarray.argsort
+ numpy.ndarray.argsort: Returns the indices that would sort an array.
"""
values = self._values
mask = isna(values)
@@ -4197,7 +4201,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
"""
# We're calling `DataFrame.from_csv` in the implementation,
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 183a91c952140..0c7e551d12172 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -980,7 +980,8 @@ def str_get_dummies(arr, sep='|'):
See Also
--------
- get_dummies
+ get_dummies: Convert categorical variable into dummy/indicator
+ variables
Examples
--------
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 9e29fdb94c1e0..13a78d96204be 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -697,8 +697,10 @@ def f(arg, *args, **kwargs):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.rolling.aggregate
- pandas.DataFrame.aggregate
+ pandas.DataFrame.rolling.aggregate: Aggregate using one or more
+ operations over the specified axis.
+ pandas.DataFrame.aggregate: Aggregate using one or more operations over
+ the specified axis.
""")
_agg_examples_doc = dedent("""
@@ -1626,8 +1628,8 @@ def _validate_freq(self):
_agg_see_also_doc = dedent("""
See Also
--------
- Series.rolling
- DataFrame.rolling
+ Series.rolling: Provides rolling window calculations.
+ DataFrame.rolling: Provides rolling window calculations.
""")
_agg_examples_doc = dedent("""
@@ -1916,9 +1918,12 @@ def _get_window(self, other=None):
_agg_see_also_doc = dedent("""
See Also
--------
- DataFrame.expanding.aggregate
- DataFrame.rolling.aggregate
- DataFrame.aggregate
+ DataFrame.expanding.aggregate: Aggregate using one or more operations
+ over the specified axis.
+ DataFrame.rolling.aggregate: Aggregate using one or more operations over
+ the specified axis.
+ DataFrame.aggregate: Aggregate using one or more operations over the
+ specified axis.
""")
_agg_examples_doc = dedent("""
@@ -2230,7 +2235,8 @@ def _constructor(self):
_agg_see_also_doc = dedent("""
See Also
--------
- pandas.DataFrame.rolling.aggregate
+ pandas.DataFrame.rolling.aggregate: Aggregate using one or more operations
+ over the specified axis.
""")
_agg_examples_doc = dedent("""
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index c8b5dc6b9b7c0..48b412839a9c2 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -81,7 +81,8 @@ class Styler(object):
See Also
--------
- DataFrame.style
+ DataFrame.style: Property returning a Styler object containing methods
+ for building a styled HTML representation fo the DataFrame
Notes
-----
@@ -643,7 +644,9 @@ def applymap(self, func, subset=None, **kwargs):
See Also
--------
- Styler.where
+ Styler.where: Apply a function elementwise, updating the HTML
+ representation with a style which is selected in accordance with
+ the return value of a function.
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
@@ -677,7 +680,8 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
See Also
--------
- Styler.applymap
+ Styler.applymap: Apply a function elementwise, updating the HTML
+ representation with the result.
"""
if other is None:
@@ -737,7 +741,8 @@ def export(self):
See Also
--------
- Styler.use
+ Styler.use: Set the styles on the current Styler, possibly
+ using styles from Styler.export.
"""
return self._todo
@@ -757,7 +762,8 @@ def use(self, styles):
See Also
--------
- Styler.export
+ Styler.export: Export the styles to applied to the
+ current Styler.
"""
self._todo.extend(styles)
return self
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 347bb3eec54af..e017d3364c4be 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -528,8 +528,10 @@ class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
See Also
--------
- pandas.io.html._HtmlFrameParser
- pandas.io.html._LxmlFrameParser
+ pandas.io.html._HtmlFrameParser: Base class for parsers that parse
+ HTML into DataFrames.
+ pandas.io.html._LxmlFrameParser: Base class for parsers that parse
+ LXML into DataFrames.
Notes
-----
@@ -639,8 +641,9 @@ class _LxmlFrameParser(_HtmlFrameParser):
See Also
--------
- _HtmlFrameParser
- _BeautifulSoupLxmlFrameParser
+ _HtmlFrameParser: Base class for parsers that parse HTML into DataFrames.
+ _BeautifulSoupLxmlFrameParser: Base class for parsers that parse LXML
+ into DataFrames.
Notes
-----
@@ -707,7 +710,8 @@ def _build_doc(self):
See Also
--------
- pandas.io.html._HtmlFrameParser._build_doc
+ pandas.io.html._HtmlFrameParser._build_doc: Return a tree-like object
+ that can be used to iterate over the DOM.
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
@@ -1043,7 +1047,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None,
See Also
--------
- read_csv
+ read_csv: Read a comma-separated values (csv) file into DataFrame.
Notes
-----
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 4bbccc8339d7c..019c20a91daf6 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -346,7 +346,7 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
See Also
--------
- DataFrame.to_json
+ DataFrame.to_json: Writes a Dataframe Object to JSON
Notes
-----
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aaface5415384..f08cb9b002d2f 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -223,7 +223,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
- read_sql
+ read_sql: Read SQL query or database table into a DataFrame.
Notes
-----
@@ -301,7 +301,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
- read_sql
+ read_sql: Read SQL query or database table into a DataFrame.
Notes
-----
@@ -1029,8 +1029,8 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
See Also
--------
- pandas.read_sql_table
- SQLDatabase.read_query
+ pandas.read_sql_table: Read SQL database table into a DataFrame
+ SQLDatabase.read_query : Read SQL query into a DataFrame.
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
@@ -1091,7 +1091,7 @@ def read_query(self, sql, index_col=None, coerce_float=True,
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
- read_sql
+ read_sql: Read SQL query or database table into a DataFrame.
"""
args = _convert_params(sql, params)
diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index aaa7aa04acf48..5908cdf0a16e3 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -68,7 +68,7 @@ def register(explicit=True):
See Also
--------
- deregister_matplotlib_converter
+ register_matplotlib_converter: Alias of register .
"""
# Renamed in pandas.plotting.__init__
global _WARN
@@ -98,7 +98,7 @@ def deregister():
See Also
--------
- deregister_matplotlib_converters
+ deregister_matplotlib_converters: alias of deregister.
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 4802447cbc99d..170b0b78092a5 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -77,7 +77,7 @@ def to_offset(freq):
See Also
--------
- DateOffset
+ DateOffset: Standard kind of date increment used for a date range.
Examples
--------
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index f208ce37a3b14..7a239a2592a13 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -186,7 +186,10 @@ def __add__(date):
See Also
--------
- dateutil.relativedelta.relativedelta
+ dateutil.relativedelta.relativedelta: The relativedelta type is
+ designed to be applied to an existing datetime and can replace
+ specific components of that datetime, or represents an interval
+ of time.
Examples
--------
diff --git a/pandas/validation_errors.json b/pandas/validation_errors.json
new file mode 100644
index 0000000000000..e69de29bb2d1d
| - [ ] Went through every file and filled in missing references in See Also section
- [ ] Updated code_check.sh to reflect it.
To do this I went through every file and corrected the SA04 errors.
When executing "validation_docstring.py" as per the "Pandas documentation validation errors" page, SA04 errors were still appearing but I haven't been able to locate them. Assistance may be required. | https://api.github.com/repos/pandas-dev/pandas/pulls/25350 | 2019-02-17T14:18:11Z | 2019-02-17T15:04:30Z | null | 2019-02-17T15:04:30Z |
TST: use a fixed seed to have the same uniques across python versions | diff --git a/ci/deps/azure-27-compat.yaml b/ci/deps/azure-27-compat.yaml
index 8899e22bdf6cf..986855c464852 100644
--- a/ci/deps/azure-27-compat.yaml
+++ b/ci/deps/azure-27-compat.yaml
@@ -20,6 +20,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- pip:
- html5lib==1.0b2
- beautifulsoup4==4.2.1
diff --git a/ci/deps/azure-27-locale.yaml b/ci/deps/azure-27-locale.yaml
index 0846ef5e8264e..f73079ecbe3d2 100644
--- a/ci/deps/azure-27-locale.yaml
+++ b/ci/deps/azure-27-locale.yaml
@@ -22,6 +22,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- hypothesis>=3.58.0
- pip:
- html5lib==1.0b2
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index c7d2334623501..6b8d38fd25082 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -28,6 +28,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- moto
- pip:
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index b5a05c49b8083..569b71dae003b 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -27,6 +27,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- pip:
- hypothesis>=3.58.0
- moto # latest moto in conda-forge fails with 3.7, move to conda dependencies when this is fixed
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 99ae228f25de3..a37be124cc546 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -8,6 +8,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- hypothesis>=3.58.0
- pip:
- "git+git://github.com/dateutil/dateutil.git"
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml
index 58abbabce3d86..d1fe926744ecd 100644
--- a/ci/deps/azure-macos-35.yaml
+++ b/ci/deps/azure-macos-35.yaml
@@ -24,6 +24,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- pip:
- python-dateutil==2.5.3
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-windows-27.yaml b/ci/deps/azure-windows-27.yaml
index b1533b071fa74..74faeed83c387 100644
--- a/ci/deps/azure-windows-27.yaml
+++ b/ci/deps/azure-windows-27.yaml
@@ -27,5 +27,6 @@ dependencies:
- cython>=0.28.2
- pytest
- pytest-xdist
+ - pytest-mock
- moto
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 7b132a134c44e..94d67b3d37788 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -25,4 +25,5 @@ dependencies:
- cython>=0.28.2
- pytest
- pytest-xdist
+ - pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml
index 2624797b24fa1..4915c003bce4e 100644
--- a/ci/deps/travis-27.yaml
+++ b/ci/deps/travis-27.yaml
@@ -41,6 +41,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- moto==1.3.4
- hypothesis>=3.58.0
- pip:
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 2b38465c04512..2a7692f10752c 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -30,6 +30,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- moto
- pip:
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index a6ffdb95e5e7c..7934d179c8618 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -27,5 +27,6 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - pytest-mock
- moto
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml
index 74db888d588f4..857c3fadfdaeb 100644
--- a/ci/deps/travis-36.yaml
+++ b/ci/deps/travis-36.yaml
@@ -36,6 +36,7 @@ dependencies:
- pytest
- pytest-xdist
- pytest-cov
+ - pytest-mock
- hypothesis>=3.58.0
- pip:
- brotlipy
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index c503124d8cd26..125750191de7d 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -14,6 +14,7 @@ dependencies:
- pytz
- pytest
- pytest-xdist
+ - pytest-mock
- hypothesis>=3.58.0
- s3fs
- pip:
diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py
index 45c5e982c1c48..a3e0e195f4864 100644
--- a/pandas/tests/io/formats/test_console.py
+++ b/pandas/tests/io/formats/test_console.py
@@ -78,13 +78,12 @@ def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
@pytest.mark.parametrize("size", ['', ['']])
-def test_terminal_unknown_dimensions(monkeypatch, size):
- mock = pytest.importorskip("unittest.mock")
+def test_terminal_unknown_dimensions(monkeypatch, size, mocker):
def communicate(*args, **kwargs):
return size
- monkeypatch.setattr('subprocess.Popen', mock.Mock())
+ monkeypatch.setattr('subprocess.Popen', mocker.Mock())
monkeypatch.setattr('subprocess.Popen.return_value.returncode', None)
monkeypatch.setattr(
'subprocess.Popen.return_value.communicate', communicate)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index ceccb48194f85..71b100401ec21 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1160,9 +1160,13 @@ def test_resample_nunique_with_date_gap():
@pytest.mark.parametrize('k', [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
+
+ # use a fixed seed to always have the same uniques
+ prng = np.random.RandomState(1234)
+
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
- ts = Series(np.random.randint(0, n // k, n).astype('int64'),
- index=np.random.choice(dr, n))
+ ts = Series(prng.randint(0, n // k, n).astype('int64'),
+ index=prng.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
| https://api.github.com/repos/pandas-dev/pandas/pulls/25346 | 2019-02-16T19:24:34Z | 2019-02-16T20:41:05Z | 2019-02-16T20:41:05Z | 2019-03-11T16:50:51Z | |
Backport PR #25304 on branch 0.24.x (BUG: Fix passing of numeric_only argument for categorical reduce) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 0c66df3129b2d..8e59c2300e7ca 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -25,6 +25,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
+- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 35b662eaae9a5..3f38785e6619e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2173,7 +2173,7 @@ def _reverse_indexer(self):
return result
# reduction ops #
- def _reduce(self, name, axis=0, skipna=True, **kwargs):
+ def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0c8e697c572e8..3ed4e2e12ed73 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3610,8 +3610,12 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
if axis is not None:
self._get_axis_number(axis)
- # dispatch to ExtensionArray interface
- if isinstance(delegate, ExtensionArray):
+ if isinstance(delegate, Categorical):
+ # TODO deprecate numeric_only argument for Categorical and use
+ # skipna as well, see GH25303
+ return delegate._reduce(name, numeric_only=numeric_only, **kwds)
+ elif isinstance(delegate, ExtensionArray):
+ # dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
elif is_datetime64_dtype(delegate):
# use DatetimeIndex implementation to handle skipna correctly
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 173f719edd465..8520855d14918 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -960,6 +960,27 @@ def test_min_max(self):
assert np.isnan(_min)
assert _max == 1
+ def test_min_max_numeric_only(self):
+ # TODO deprecate numeric_only argument for Categorical and use
+ # skipna as well, see GH25303
+ cat = Series(Categorical(
+ ["a", "b", np.nan, "a"], categories=['b', 'a'], ordered=True))
+
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == "a"
+
+ _min = cat.min(numeric_only=True)
+ _max = cat.max(numeric_only=True)
+ assert _min == "b"
+ assert _max == "a"
+
+ _min = cat.min(numeric_only=False)
+ _max = cat.max(numeric_only=False)
+ assert np.isnan(_min)
+ assert _max == "a"
+
class TestSeriesMode(object):
# Note: the name TestSeriesMode indicates these tests
| Backport PR #25304: BUG: Fix passing of numeric_only argument for categorical reduce | https://api.github.com/repos/pandas-dev/pandas/pulls/25344 | 2019-02-16T18:50:01Z | 2019-02-16T19:52:37Z | 2019-02-16T19:52:37Z | 2019-02-16T19:52:38Z |
Backport PR #25088 on branch 0.24.x (Fixes Formatting Exception) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f17c4974cd450..d0308c82daffc 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -53,6 +53,7 @@ Bug Fixes
**I/O**
+- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`);
- Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`)
- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`)
- Bug where float indexes could have misaligned values when printing (:issue:`25061`)
diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py
index bb34259d710c7..cf2383955d593 100644
--- a/pandas/io/formats/terminal.py
+++ b/pandas/io/formats/terminal.py
@@ -15,6 +15,7 @@
import os
import shutil
+import subprocess
from pandas.compat import PY3
@@ -94,22 +95,29 @@ def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width
# -height-of-a-terminal-window
+
try:
- import subprocess
proc = subprocess.Popen(["tput", "cols"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
- output = proc.communicate(input=None)
- cols = int(output[0])
+ output_cols = proc.communicate(input=None)
proc = subprocess.Popen(["tput", "lines"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
- output = proc.communicate(input=None)
- rows = int(output[0])
- return (cols, rows)
+ output_rows = proc.communicate(input=None)
except OSError:
return None
+ try:
+ # Some terminals (e.g. spyder) may report a terminal size of '',
+ # making the `int` fail.
+
+ cols = int(output_cols[0])
+ rows = int(output_rows[0])
+ return cols, rows
+ except (ValueError, IndexError):
+ return None
+
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py
index 055763bf62d6e..45c5e982c1c48 100644
--- a/pandas/tests/io/formats/test_console.py
+++ b/pandas/tests/io/formats/test_console.py
@@ -1,6 +1,9 @@
+import subprocess # noqa: F401
+
import pytest
from pandas.io.formats.console import detect_console_encoding
+from pandas.io.formats.terminal import _get_terminal_size_tput
class MockEncoding(object): # TODO(py27): replace with mock
@@ -72,3 +75,19 @@ def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
context.setattr('sys.stdout', MockEncoding(std))
context.setattr('sys.getdefaultencoding', lambda: 'sysDefaultEncoding')
assert detect_console_encoding() == 'sysDefaultEncoding'
+
+
+@pytest.mark.parametrize("size", ['', ['']])
+def test_terminal_unknown_dimensions(monkeypatch, size):
+ mock = pytest.importorskip("unittest.mock")
+
+ def communicate(*args, **kwargs):
+ return size
+
+ monkeypatch.setattr('subprocess.Popen', mock.Mock())
+ monkeypatch.setattr('subprocess.Popen.return_value.returncode', None)
+ monkeypatch.setattr(
+ 'subprocess.Popen.return_value.communicate', communicate)
+ result = _get_terminal_size_tput()
+
+ assert result is None
| Backport PR #25088: Fixes Formatting Exception | https://api.github.com/repos/pandas-dev/pandas/pulls/25343 | 2019-02-16T16:34:59Z | 2019-02-16T17:57:13Z | 2019-02-16T17:57:13Z | 2019-02-16T17:59:38Z |
Backport PR #25264 on branch 0.24.x (BUG: groupby.transform retains timezone information) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f17c4974cd450..0c78cf01ad300 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -78,7 +78,7 @@ Bug Fixes
**Reshaping**
--
+- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index c5142a4ee98cc..5be98b013384d 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -965,7 +965,7 @@ def _transform_fast(self, func, func_nm):
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
- out = algorithms.take_1d(func().values, ids)
+ out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index f120402e6e8ca..b645073fcf72a 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -834,3 +834,14 @@ def demean_rename(x):
tm.assert_frame_equal(result, expected)
result_single = df.groupby('group').value.transform(demean_rename)
tm.assert_series_equal(result_single, expected['value'])
+
+
+@pytest.mark.parametrize('func', [min, max, np.min, np.max, 'first', 'last'])
+def test_groupby_transform_timezone_column(func):
+ # GH 24198
+ ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore')
+ result = pd.DataFrame({'end_time': [ts], 'id': [1]})
+ result['max_end_time'] = result.groupby('id').end_time.transform(func)
+ expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id',
+ 'max_end_time'])
+ tm.assert_frame_equal(result, expected)
| Backport PR #25264: BUG: groupby.transform retains timezone information | https://api.github.com/repos/pandas-dev/pandas/pulls/25342 | 2019-02-16T16:32:35Z | 2019-02-16T17:28:04Z | 2019-02-16T17:28:04Z | 2019-02-16T17:28:38Z |
CLN: Excel Module Cleanups pandas-dev #25271 | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index ed5943e9a1698..8f7bf8e0466f9 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -590,9 +590,8 @@ def __new__(cls, path, engine=None, **kwargs):
if engine == 'auto':
engine = _get_default_writer(ext)
except KeyError:
- error = ValueError("No engine for filetype: '{ext}'"
- .format(ext=ext))
- raise error
+ raise ValueError("No engine for filetype: '{ext}'"
+ .format(ext=ext))
cls = get_writer(engine)
return object.__new__(cls)
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index 1aeaf70f0832e..630503765b34a 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -7,6 +7,7 @@
from pandas.core import config
+# Default extentions to use default Excel writers classes.
_writer_extensions = ["xlsx", "xls", "xlsm"]
@@ -31,6 +32,8 @@ def register_writer(klass):
def _get_default_writer(ext):
+ """Get default an instantiated writer class using extention.
+ """
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
@@ -230,8 +233,6 @@ def _fill_mi_header(row, control_row):
return _maybe_convert_to_string(row), control_row
-# fill blank if index_col not None
-
def _pop_header_name(row, index_col):
"""
| - [X] closes #25271
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25340 | 2019-02-16T11:39:27Z | 2019-02-16T16:38:44Z | null | 2019-02-16T16:38:49Z |
ENH: .to_latex(longtable=True) latex caption and label support | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d271081aeaa51..c48944d85354d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2740,9 +2740,9 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
- column_format=None, longtable=None, escape=None,
- encoding=None, decimal='.', multicolumn=None,
- multicolumn_format=None, multirow=None):
+ column_format=None, longtable=None, lt_caption=None,
+ lt_label=None, escape=None, encoding=None, decimal='.',
+ multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
@@ -2790,6 +2790,10 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
+ lt_caption : str, optional
+ The caption to use when longtable is True
+ lt_label : str, optional
+ The label to use with \ref{} when longtable is True
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
@@ -2863,6 +2867,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
+ lt_caption=lt_caption, lt_label=lt_label,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index f8ee9c273fd59..b3c1ceb30f4ef 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -679,8 +679,9 @@ def _join_multiline(self, *strcols):
st = ed
return '\n\n'.join(str_lst)
- def to_latex(self, column_format=None, longtable=False, encoding=None,
- multicolumn=False, multicolumn_format=None, multirow=False):
+ def to_latex(self, column_format=None, longtable=False, lt_caption=None,
+ lt_label=None, encoding=None, multicolumn=False,
+ multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
@@ -688,6 +689,8 @@ def to_latex(self, column_format=None, longtable=False, encoding=None,
from pandas.io.formats.latex import LatexFormatter
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
+ lt_caption=lt_caption,
+ lt_label=lt_label,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 90be3364932a2..f2ebcb5d4a19a 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -4,6 +4,8 @@
"""
from __future__ import print_function
+from warnings import warn
+
import numpy as np
from pandas.compat import map, range, u, zip
@@ -34,12 +36,15 @@ class LatexFormatter(TableFormatter):
"""
def __init__(self, formatter, column_format=None, longtable=False,
- multicolumn=False, multicolumn_format=None, multirow=False):
+ lt_caption=None, lt_label=None, multicolumn=False,
+ multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.column_format = column_format
self.longtable = longtable
+ self.lt_caption = lt_caption
+ self.lt_label = lt_label
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
@@ -113,6 +118,26 @@ def pad_empties(x):
else:
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
.format(fmt=column_format))
+
+ if self.lt_caption is None and self.lt_label is None:
+ pass
+ else:
+ if self.lt_caption is not None:
+ buf.write('\\caption{{{}}}'.format(self.lt_caption))
+ else:
+ pass
+
+ if self.lt_label is not None:
+ buf.write('\\label{{{}}}'.format(self.lt_label))
+ else:
+ warn('no LaTeX label has been provided; '
+ 'referencing with \\ref{} will not work')
+
+ # a double-backslash is required at the end of the line
+ # as discussed here:
+ # https://tex.stackexchange.com/questions/219138
+ buf.write('\\\\\n')
+
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
When creating a latex table with `DataFrame.to_latex(longtable=False)` the output is written inside a latex `tabular` environment and stored in some file like `pandas_tabular.tex`; the user can conveniently typeset the table in a main `report.tex` file complete with caption and label as follows:
```tex
\begin{table}
\caption{the caption}
\label{the label}
\input{pandas_tabular.tex}
\end{table}
```
This is good because the `pandas_tabular.tex` file can be re-created and the main `report.tex` simply needs to be recompiled to get the updated output.
The problem when creating a latex [longtable](https://ctan.org/pkg/longtable) with `DataFrame.to_latex(longtable=True)` is the caption and label need to go inside the latex `longtable` environment which is stored in a some file like `pandas_longtable.tex`. The latex `longtable` environment does not go inside a `table` environment like the `tabular` environment does; this means that setting the caption and label requires the user to edit the `pandas_longtable.tex` file after its creation. This does not support an automated workflow like we have with the `tabular` environment.
**This PR adds caption and label support to `DataFrame.to_latex(longtable=True)` with the arguments `lt_caption` and `lt_label`. Example usage is described below.**
The following python code creates some data in a `DataFrame` and writes it to disk in `tabular` and `longtable` latex environments:
```python
import numpy as np
import pandas as pd
# create some example data with more rows than would fit on a single page
df = pd.DataFrame(np.random.randn(60,3))
# write the first 5 rows to regular table in a latex tabular environment
df.head().to_latex(
'pandas_tabular.tex',
)
# write the whole table in the latex longtable environment c/w caption and label
df.to_latex(
'pandas_longtable.tex',
longtable=True,
lt_caption='table in \\texttt{longtable} environment',
lt_label='tab:longtable',
)
```
The following latex code is contained in a main `report.tex` and is used to typset both tables:
```tex
\documentclass{article}
\usepackage{longtable}
\usepackage{booktabs}
\begin{document}
% typeset the table in the tabular environment
Table \ref{tab:tabular} is a \texttt{tabular} and has 5 rows:
\begin{table}[h]
\centering
\caption{table in \texttt{tabular} environment}
\label{tab:tabular}
\input{pandas_tabular.tex}
\end{table}
% typeset the table in the longtable environment
Table \ref{tab:longtable} is a \texttt{longtable} and has 60 rows:
\input{pandas_longtable.tex}
\end{document}
```
Using `DataFrame.to_latex(longtable=True)` with the new arguments `lt_caption` and `lt_label` means we don't have to edit `pandas_longtable.tex` after its creation to get the caption and label working. This functionality also works with `Series.to_latex(longtable=True)`.
PDF output is shown below:

| https://api.github.com/repos/pandas-dev/pandas/pulls/25339 | 2019-02-16T04:50:20Z | 2019-02-25T04:35:48Z | null | 2019-02-25T05:23:38Z |
Interval dtype fix | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 8e59c2300e7ca..f528c058d2868 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -26,6 +26,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
+- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 640d43f3b0e03..11a132c4d14ee 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -931,13 +931,18 @@ def construct_from_string(cls, string):
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
- if (isinstance(string, compat.string_types) and
- (string.startswith('interval') or
- string.startswith('Interval'))):
- return cls(string)
+ if not isinstance(string, compat.string_types):
+ msg = "a string needs to be passed, got type {typ}"
+ raise TypeError(msg.format(typ=type(string)))
+
+ if (string.lower() == 'interval' or
+ cls._match.search(string) is not None):
+ return cls(string)
- msg = "a string needs to be passed, got type {typ}"
- raise TypeError(msg.format(typ=type(string)))
+ msg = ('Incorrectly formatted string passed to constructor. '
+ 'Valid formats include Interval or Interval[dtype] '
+ 'where dtype is numeric, datetime, or timedelta')
+ raise TypeError(msg)
@property
def type(self):
@@ -978,7 +983,7 @@ def is_dtype(cls, dtype):
return True
else:
return False
- except ValueError:
+ except (ValueError, TypeError):
return False
else:
return False
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 710f215686eab..1c1442d6f2f23 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -511,10 +511,11 @@ def test_construction_not_supported(self, subtype):
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
- def test_construction_errors(self):
+ @pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
+ def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
- IntervalDtype('xx')
+ IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
@@ -523,7 +524,7 @@ def test_construction_from_string(self):
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
- 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
+ 0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
@@ -532,10 +533,12 @@ def test_construction_from_string_errors(self, string):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
- 'interval[foo]'])
+ 'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
- msg = 'could not construct IntervalDtype'
+ msg = ("Incorrectly formatted string passed to constructor. "
+ r"Valid formats include Interval or Interval\[dtype\] "
+ "where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@@ -559,6 +562,7 @@ def test_is_dtype(self):
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
+ assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 4d3c9926fc5ae..b2aac441db195 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -563,6 +563,13 @@ def test_comp_ops_df_compat(self):
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
+ def test_compare_series_interval_keyword(self):
+ # GH 25338
+ s = Series(['IntervalA', 'IntervalB', 'IntervalC'])
+ result = s == 'IntervalA'
+ expected = Series([True, False, False])
+ assert_series_equal(result, expected)
+
class TestSeriesFlexComparisonOps(object):
| - [x] closes #25283
- [x] test added to ensure pd.api.types.IntervalDtype('IntervalA') throws TypeError
- [x] test added to ensure pd.api.types.IntervalDtype.construct_from_string('IntervalA') throws TypeError
- [x] test added to ensure pd.api.types.IntervalDtype.is_dtype('IntervalA') returns false
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] test added to ensure * `Series(['IntervalA', 'IntervalB', 'IntervalC']) == 'IntervalA'` returns the expected boolean series
- [x] what's new note added to doc/source/whatsnew/v0.24.2.rst documenting regression fix | https://api.github.com/repos/pandas-dev/pandas/pulls/25338 | 2019-02-15T23:53:58Z | 2019-02-20T03:51:05Z | 2019-02-20T03:51:05Z | 2019-02-20T04:00:57Z |
TST: Split test_excel.py into sub test files #24472 | diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/io/excel/reader/test_base.py b/pandas/tests/io/excel/reader/test_base.py
new file mode 100644
index 0000000000000..66b5aa9758454
--- /dev/null
+++ b/pandas/tests/io/excel/reader/test_base.py
@@ -0,0 +1,1089 @@
+from collections import OrderedDict
+import contextlib
+from datetime import datetime, time
+from functools import partial
+import os
+import warnings
+
+import numpy as np
+import pytest
+
+from pandas.compat import iteritems, range
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame, Index, MultiIndex, Series
+import pandas.util.testing as tm
+from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
+
+from pandas.io.common import URLError
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
+from pandas.tests.io.excel.test_base import SharedItems
+
+
+@contextlib.contextmanager
+def ignore_xlrd_time_clock_warning():
+ """
+ Context manager to ignore warnings raised by the xlrd library,
+ regarding the deprecation of `time.clock` in Python 3.7.
+ """
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ action='ignore',
+ message='time.clock has been deprecated',
+ category=DeprecationWarning)
+ yield
+
+
+class ReadingTestsBase(SharedItems):
+ # This is based on ExcelWriterBase
+
+ @pytest.fixture(autouse=True, params=['xlrd', None])
+ def set_engine(self, request):
+ func_name = "get_exceldf"
+ old_func = getattr(self, func_name)
+ new_func = partial(old_func, engine=request.param)
+ setattr(self, func_name, new_func)
+ yield
+ setattr(self, func_name, old_func)
+
+ @td.skip_if_no("xlrd", "1.0.1") # see gh-22682
+ def test_usecols_int(self, ext):
+
+ df_ref = self.get_csv_refdf("test1")
+ df_ref = df_ref.reindex(columns=["A", "B", "C"])
+
+ # usecols as int
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ with ignore_xlrd_time_clock_warning():
+ df1 = self.get_exceldf("test1", ext, "Sheet1",
+ index_col=0, usecols=3)
+
+ # usecols as int
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ with ignore_xlrd_time_clock_warning():
+ df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
+ index_col=0, usecols=3)
+
+ # parse_cols instead of usecols, usecols as int
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ with ignore_xlrd_time_clock_warning():
+ df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
+ index_col=0, parse_cols=3)
+
+ # TODO add index to xls file)
+ tm.assert_frame_equal(df1, df_ref, check_names=False)
+ tm.assert_frame_equal(df2, df_ref, check_names=False)
+ tm.assert_frame_equal(df3, df_ref, check_names=False)
+
+ @td.skip_if_no('xlrd', '1.0.1') # GH-22682
+ def test_usecols_list(self, ext):
+
+ dfref = self.get_csv_refdf('test1')
+ dfref = dfref.reindex(columns=['B', 'C'])
+ df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
+ usecols=[0, 2, 3])
+ df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, usecols=[0, 2, 3])
+
+ with tm.assert_produces_warning(FutureWarning):
+ with ignore_xlrd_time_clock_warning():
+ df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, parse_cols=[0, 2, 3])
+
+ # TODO add index to xls file)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+ tm.assert_frame_equal(df3, dfref, check_names=False)
+
+ @td.skip_if_no('xlrd', '1.0.1') # GH-22682
+ def test_usecols_str(self, ext):
+
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = dfref.reindex(columns=['A', 'B', 'C'])
+ df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
+ usecols='A:D')
+ df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, usecols='A:D')
+
+ with tm.assert_produces_warning(FutureWarning):
+ with ignore_xlrd_time_clock_warning():
+ df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, parse_cols='A:D')
+
+ # TODO add index to xls, read xls ignores index name ?
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+ tm.assert_frame_equal(df4, df1, check_names=False)
+
+ df1 = dfref.reindex(columns=['B', 'C'])
+ df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
+ usecols='A,C,D')
+ df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, usecols='A,C,D')
+ # TODO add index to xls file
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+
+ df1 = dfref.reindex(columns=['B', 'C'])
+ df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
+ usecols='A,C:D')
+ df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0, usecols='A,C:D')
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+
+ @pytest.mark.parametrize("usecols", [
+ [0, 1, 3], [0, 3, 1],
+ [1, 0, 3], [1, 3, 0],
+ [3, 0, 1], [3, 1, 0],
+ ])
+ def test_usecols_diff_positional_int_columns_order(self, ext, usecols):
+ expected = self.get_csv_refdf("test1")[["A", "C"]]
+ result = self.get_exceldf("test1", ext, "Sheet1",
+ index_col=0, usecols=usecols)
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ @pytest.mark.parametrize("usecols", [
+ ["B", "D"], ["D", "B"]
+ ])
+ def test_usecols_diff_positional_str_columns_order(self, ext, usecols):
+ expected = self.get_csv_refdf("test1")[["B", "D"]]
+ expected.index = range(len(expected))
+
+ result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols)
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ def test_read_excel_without_slicing(self, ext):
+ expected = self.get_csv_refdf("test1")
+ result = self.get_exceldf("test1", ext, "Sheet1", index_col=0)
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ def test_usecols_excel_range_str(self, ext):
+ expected = self.get_csv_refdf("test1")[["C", "D"]]
+ result = self.get_exceldf("test1", ext, "Sheet1",
+ index_col=0, usecols="A,D:E")
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ def test_usecols_excel_range_str_invalid(self, ext):
+ msg = "Invalid column name: E1"
+
+ with pytest.raises(ValueError, match=msg):
+ self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1")
+
+ def test_index_col_label_error(self, ext):
+ msg = "list indices must be integers.*, not str"
+
+ with pytest.raises(TypeError, match=msg):
+ self.get_exceldf("test1", ext, "Sheet1", index_col=["A"],
+ usecols=["A", "C"])
+
+ def test_index_col_empty(self, ext):
+ # see gh-9208
+ result = self.get_exceldf("test1", ext, "Sheet3",
+ index_col=["A", "B", "C"])
+ expected = DataFrame(columns=["D", "E", "F"],
+ index=MultiIndex(levels=[[]] * 3,
+ codes=[[]] * 3,
+ names=["A", "B", "C"]))
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("index_col", [None, 2])
+ def test_index_col_with_unnamed(self, ext, index_col):
+ # see gh-18792
+ result = self.get_exceldf("test1", ext, "Sheet4",
+ index_col=index_col)
+ expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]],
+ columns=["Unnamed: 0", "col1", "col2"])
+ if index_col:
+ expected = expected.set_index(expected.columns[index_col])
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_usecols_pass_non_existent_column(self, ext):
+ msg = ("Usecols do not match columns, "
+ "columns expected but not found: " + r"\['E'\]")
+
+ with pytest.raises(ValueError, match=msg):
+ self.get_exceldf("test1", ext, usecols=["E"])
+
+ def test_usecols_wrong_type(self, ext):
+ msg = ("'usecols' must either be list-like of "
+ "all strings, all unicode, all integers or a callable.")
+
+ with pytest.raises(ValueError, match=msg):
+ self.get_exceldf("test1", ext, usecols=["E1", 0])
+
+ def test_excel_stop_iterator(self, ext):
+
+ parsed = self.get_exceldf('test2', ext, 'Sheet1')
+ expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
+ tm.assert_frame_equal(parsed, expected)
+
+ def test_excel_cell_error_na(self, ext):
+
+ parsed = self.get_exceldf('test3', ext, 'Sheet1')
+ expected = DataFrame([[np.nan]], columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ def test_excel_passes_na(self, ext):
+
+ excel = self.get_excelfile('test4', ext)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
+ na_values=['apple'])
+ expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
+ na_values=['apple'])
+ expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ # 13967
+ excel = self.get_excelfile('test5', ext)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
+ na_values=['apple'])
+ expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
+ na_values=['apple'])
+ expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ @td.skip_if_no('xlrd', '1.0.1') # GH-22682
+ def test_deprecated_sheetname(self, ext):
+ # gh-17964
+ excel = self.get_excelfile('test1', ext)
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ read_excel(excel, sheetname='Sheet1')
+
+ with pytest.raises(TypeError):
+ read_excel(excel, sheet='Sheet1')
+
+ @td.skip_if_no('xlrd', '1.0.1') # GH-22682
+ def test_excel_table_sheet_by_index(self, ext):
+
+ excel = self.get_excelfile('test1', ext)
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = read_excel(excel, 0, index_col=0)
+ df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df1 = excel.parse(0, index_col=0)
+ df2 = excel.parse(1, skiprows=[1], index_col=0)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
+ tm.assert_frame_equal(df3, df4)
+
+ df3 = excel.parse(0, index_col=0, skipfooter=1)
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
+
+ import xlrd
+ with pytest.raises(xlrd.XLRDError):
+ read_excel(excel, 'asdf')
+
+ def test_excel_table(self, ext):
+
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
+ df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
+ index_col=0)
+ # TODO add index to file
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
+ skipfooter=1)
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
+
+ def test_reader_special_dtypes(self, ext):
+
+ expected = DataFrame.from_dict(OrderedDict([
+ ("IntCol", [1, 2, -3, 4, 0]),
+ ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
+ ("BoolCol", [True, False, True, True, False]),
+ ("StrCol", [1, 2, 3, 4, 5]),
+ # GH5394 - this is why convert_float isn't vectorized
+ ("Str2Col", ["a", 3, "c", "d", "e"]),
+ ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
+ datetime(1905, 1, 1), datetime(2013, 12, 14),
+ datetime(2015, 3, 14)])
+ ]))
+ basename = 'test_types'
+
+ # should read in correctly and infer types
+ actual = self.get_exceldf(basename, ext, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ # if not coercing number, then int comes in as float
+ float_expected = expected.copy()
+ float_expected["IntCol"] = float_expected["IntCol"].astype(float)
+ float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
+ actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
+ tm.assert_frame_equal(actual, float_expected)
+
+ # check setting Index (assuming xls and xlsx are the same here)
+ for icol, name in enumerate(expected.columns):
+ actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
+ exp = expected.set_index(name)
+ tm.assert_frame_equal(actual, exp)
+
+ # convert_float and converters should be different but both accepted
+ expected["StrCol"] = expected["StrCol"].apply(str)
+ actual = self.get_exceldf(
+ basename, ext, 'Sheet1', converters={"StrCol": str})
+ tm.assert_frame_equal(actual, expected)
+
+ no_convert_float = float_expected.copy()
+ no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
+ actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
+ converters={"StrCol": str})
+ tm.assert_frame_equal(actual, no_convert_float)
+
+ # GH8212 - support for converters and missing values
+ def test_reader_converters(self, ext):
+
+ basename = 'test_converters'
+
+ expected = DataFrame.from_dict(OrderedDict([
+ ("IntCol", [1, 2, -3, -1000, 0]),
+ ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
+ ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
+ ("StrCol", ['1', np.nan, '3', '4', '5']),
+ ]))
+
+ converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
+ 'FloatCol': lambda x: 10 * x if x else np.nan,
+ 2: lambda x: 'Found' if x != '' else 'Not found',
+ 3: lambda x: str(x) if x else '',
+ }
+
+ # should read in correctly and set types of single cells (not array
+ # dtypes)
+ actual = self.get_exceldf(basename, ext, 'Sheet1',
+ converters=converters)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_reader_dtype(self, ext):
+ # GH 8212
+ basename = 'testdtype'
+ actual = self.get_exceldf(basename, ext)
+
+ expected = DataFrame({
+ 'a': [1, 2, 3, 4],
+ 'b': [2.5, 3.5, 4.5, 5.5],
+ 'c': [1, 2, 3, 4],
+ 'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
+ columns=['a', 'b', 'c', 'd'])
+
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.get_exceldf(basename, ext,
+ dtype={'a': 'float64',
+ 'b': 'float32',
+ 'c': str})
+
+ expected['a'] = expected['a'].astype('float64')
+ expected['b'] = expected['b'].astype('float32')
+ expected['c'] = ['001', '002', '003', '004']
+ tm.assert_frame_equal(actual, expected)
+
+ with pytest.raises(ValueError):
+ self.get_exceldf(basename, ext, dtype={'d': 'int64'})
+
+ @pytest.mark.parametrize("dtype,expected", [
+ (None,
+ DataFrame({
+ "a": [1, 2, 3, 4],
+ "b": [2.5, 3.5, 4.5, 5.5],
+ "c": [1, 2, 3, 4],
+ "d": [1.0, 2.0, np.nan, 4.0]
+ })),
+ ({"a": "float64",
+ "b": "float32",
+ "c": str,
+ "d": str
+ },
+ DataFrame({
+ "a": Series([1, 2, 3, 4], dtype="float64"),
+ "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
+ "c": ["001", "002", "003", "004"],
+ "d": ["1", "2", np.nan, "4"]
+ })),
+ ])
+ def test_reader_dtype_str(self, ext, dtype, expected):
+ # see gh-20377
+ basename = "testdtype"
+
+ actual = self.get_exceldf(basename, ext, dtype=dtype)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_reading_all_sheets(self, ext):
+ # Test reading all sheetnames by setting sheetname to None,
+ # Ensure a dict is returned.
+ # See PR #9450
+ basename = 'test_multisheet'
+ dfs = self.get_exceldf(basename, ext, sheet_name=None)
+ # ensure this is not alphabetical to test order preservation
+ expected_keys = ['Charlie', 'Alpha', 'Beta']
+ tm.assert_contains_all(expected_keys, dfs.keys())
+ # Issue 9930
+ # Ensure sheet order is preserved
+ assert expected_keys == list(dfs.keys())
+
+ def test_reading_multiple_specific_sheets(self, ext):
+ # Test reading specific sheetnames by specifying a mixed list
+ # of integers and strings, and confirm that duplicated sheet
+ # references (positions/names) are removed properly.
+ # Ensure a dict is returned
+ # See PR #9450
+ basename = 'test_multisheet'
+ # Explicitly request duplicates. Only the set should be returned.
+ expected_keys = [2, 'Charlie', 'Charlie']
+ dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
+ expected_keys = list(set(expected_keys))
+ tm.assert_contains_all(expected_keys, dfs.keys())
+ assert len(expected_keys) == len(dfs.keys())
+
+ def test_reading_all_sheets_with_blank(self, ext):
+ # Test reading all sheetnames by setting sheetname to None,
+ # In the case where some sheets are blank.
+ # Issue #11711
+ basename = 'blank_with_header'
+ dfs = self.get_exceldf(basename, ext, sheet_name=None)
+ expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
+ tm.assert_contains_all(expected_keys, dfs.keys())
+
+ # GH6403
+ def test_read_excel_blank(self, ext):
+ actual = self.get_exceldf('blank', ext, 'Sheet1')
+ tm.assert_frame_equal(actual, DataFrame())
+
+ def test_read_excel_blank_with_header(self, ext):
+ expected = DataFrame(columns=['col_1', 'col_2'])
+ actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ @pytest.mark.parametrize("header,expected", [
+ (None, DataFrame([np.nan] * 4)),
+ (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
+ ])
+ def test_read_one_empty_col_no_header(self, ext, header, expected):
+ # xref gh-12292
+ filename = "no_header"
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+
+ with ensure_clean(ext) as path:
+ df.to_excel(path, filename, index=False, header=False)
+ result = read_excel(path, filename, usecols=[0], header=header)
+
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ @pytest.mark.parametrize("header,expected", [
+ (None, DataFrame([0] + [np.nan] * 4)),
+ (0, DataFrame([np.nan] * 4))
+ ])
+ def test_read_one_empty_col_with_header(self, ext, header, expected):
+ filename = "with_header"
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+
+ with ensure_clean(ext) as path:
+ df.to_excel(path, 'with_header', index=False, header=True)
+ result = read_excel(path, filename, usecols=[0], header=header)
+
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no('openpyxl')
+ @td.skip_if_no('xlwt')
+ def test_set_column_names_in_parameter(self, ext):
+ # GH 12870 : pass down column names associated with
+ # keyword argument names
+ refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
+ [3, 'baz']], columns=['a', 'b'])
+
+ with ensure_clean(ext) as pth:
+ with ExcelWriter(pth) as writer:
+ refdf.to_excel(writer, 'Data_no_head',
+ header=False, index=False)
+ refdf.to_excel(writer, 'Data_with_head', index=False)
+
+ refdf.columns = ['A', 'B']
+
+ with ExcelFile(pth) as reader:
+ xlsdf_no_head = read_excel(reader, 'Data_no_head',
+ header=None, names=['A', 'B'])
+ xlsdf_with_head = read_excel(reader, 'Data_with_head',
+ index_col=None, names=['A', 'B'])
+
+ tm.assert_frame_equal(xlsdf_no_head, refdf)
+ tm.assert_frame_equal(xlsdf_with_head, refdf)
+
+ def test_date_conversion_overflow(self, ext):
+ # GH 10001 : pandas.ExcelFile ignore parse_dates=False
+ expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
+ [pd.Timestamp('2016-03-16'), 'Jack Black'],
+ [1e+20, 'Timothy Brown']],
+ columns=['DateColWithBigInt', 'StringCol'])
+
+ result = self.get_exceldf('testdateoverflow', ext)
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no("xlrd", "1.0.1") # see gh-22682
+ def test_sheet_name_and_sheetname(self, ext):
+ # gh-10559: Minor improvement: Change "sheet_name" to "sheetname"
+ # gh-10969: DOC: Consistent var names (sheetname vs sheet_name)
+ # gh-12604: CLN GH10559 Rename sheetname variable to sheet_name
+ # gh-20920: ExcelFile.parse() and pd.read_xlsx() have different
+ # behavior for "sheetname" argument
+ filename = "test1"
+ sheet_name = "Sheet1"
+
+ df_ref = self.get_csv_refdf(filename)
+ df1 = self.get_exceldf(filename, ext,
+ sheet_name=sheet_name, index_col=0) # doc
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with ignore_xlrd_time_clock_warning():
+ df2 = self.get_exceldf(filename, ext, index_col=0,
+ sheetname=sheet_name) # backward compat
+
+ excel = self.get_excelfile(filename, ext)
+ df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ df2_parse = excel.parse(index_col=0,
+ sheetname=sheet_name) # backward compat
+
+ tm.assert_frame_equal(df1, df_ref, check_names=False)
+ tm.assert_frame_equal(df2, df_ref, check_names=False)
+ tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
+ tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
+
+ def test_sheet_name_both_raises(self, ext):
+ with pytest.raises(TypeError, match="Cannot specify both"):
+ self.get_exceldf('test1', ext, sheetname='Sheet1',
+ sheet_name='Sheet1')
+
+ excel = self.get_excelfile('test1', ext)
+ with pytest.raises(TypeError, match="Cannot specify both"):
+ excel.parse(sheetname='Sheet1',
+ sheet_name='Sheet1')
+
+ def test_excel_read_buffer(self, ext):
+
+ pth = os.path.join(self.dirpath, 'test1' + ext)
+ expected = read_excel(pth, 'Sheet1', index_col=0)
+ with open(pth, 'rb') as f:
+ actual = read_excel(f, 'Sheet1', index_col=0)
+ tm.assert_frame_equal(expected, actual)
+
+ with open(pth, 'rb') as f:
+ xls = ExcelFile(f)
+ actual = read_excel(xls, 'Sheet1', index_col=0)
+ tm.assert_frame_equal(expected, actual)
+
+ def test_bad_engine_raises(self, ext):
+ bad_engine = 'foo'
+ with pytest.raises(ValueError, match="Unknown engine: foo"):
+ read_excel('', engine=bad_engine)
+
+ @tm.network
+ def test_read_from_http_url(self, ext):
+ url = ('https://raw.github.com/pandas-dev/pandas/master/'
+ 'pandas/tests/io/data/test1' + ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1', ext)
+ tm.assert_frame_equal(url_table, local_table)
+
+ @td.skip_if_not_us_locale
+ def test_read_from_s3_url(self, ext, s3_resource):
+ # Bucket "pandas-test" created in tests/io/conftest.py
+ file_name = os.path.join(self.dirpath, 'test1' + ext)
+
+ with open(file_name, "rb") as f:
+ s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext,
+ Body=f)
+
+ url = ('s3://pandas-test/test1' + ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1', ext)
+ tm.assert_frame_equal(url_table, local_table)
+
+ @pytest.mark.slow
+ # ignore warning from old xlrd
+ @pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
+ def test_read_from_file_url(self, ext):
+
+ # FILE
+ localtable = os.path.join(self.dirpath, 'test1' + ext)
+ local_table = read_excel(localtable)
+
+ try:
+ url_table = read_excel('file://localhost/' + localtable)
+ except URLError:
+ # fails on some systems
+ import platform
+ pytest.skip("failing on %s" %
+ ' '.join(platform.uname()).strip())
+
+ tm.assert_frame_equal(url_table, local_table)
+
+ @td.skip_if_no('pathlib')
+ def test_read_from_pathlib_path(self, ext):
+
+ # GH12655
+ from pathlib import Path
+
+ str_path = os.path.join(self.dirpath, 'test1' + ext)
+ expected = read_excel(str_path, 'Sheet1', index_col=0)
+
+ path_obj = Path(self.dirpath, 'test1' + ext)
+ actual = read_excel(path_obj, 'Sheet1', index_col=0)
+
+ tm.assert_frame_equal(expected, actual)
+
+ @td.skip_if_no('py.path')
+ def test_read_from_py_localpath(self, ext):
+
+ # GH12655
+ from py.path import local as LocalPath
+
+ str_path = os.path.join(self.dirpath, 'test1' + ext)
+ expected = read_excel(str_path, 'Sheet1', index_col=0)
+
+ abs_dir = os.path.abspath(self.dirpath)
+ path_obj = LocalPath(abs_dir).join('test1' + ext)
+ actual = read_excel(path_obj, 'Sheet1', index_col=0)
+
+ tm.assert_frame_equal(expected, actual)
+
+ def test_reader_closes_file(self, ext):
+
+ pth = os.path.join(self.dirpath, 'test1' + ext)
+ f = open(pth, 'rb')
+ with ExcelFile(f) as xlsx:
+ # parses okay
+ read_excel(xlsx, 'Sheet1', index_col=0)
+
+ assert f.closed
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ def test_creating_and_reading_multiple_sheets(self, ext):
+ # see gh-9450
+ #
+ # Test reading multiple sheets, from a runtime
+ # created Excel file with multiple sheets.
+ def tdf(col_sheet_name):
+ d, i = [11, 22, 33], [1, 2, 3]
+ return DataFrame(d, i, columns=[col_sheet_name])
+
+ sheets = ["AAA", "BBB", "CCC"]
+
+ dfs = [tdf(s) for s in sheets]
+ dfs = dict(zip(sheets, dfs))
+
+ with ensure_clean(ext) as pth:
+ with ExcelWriter(pth) as ew:
+ for sheetname, df in iteritems(dfs):
+ df.to_excel(ew, sheetname)
+
+ dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
+
+ for s in sheets:
+ tm.assert_frame_equal(dfs[s], dfs_returned[s])
+
+ def test_reader_seconds(self, ext):
+
+ # Test reading times with and without milliseconds. GH5945.
+ expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
+ time(2, 45, 56, 100000),
+ time(4, 29, 49, 200000),
+ time(6, 13, 42, 300000),
+ time(7, 57, 35, 400000),
+ time(9, 41, 28, 500000),
+ time(11, 25, 21, 600000),
+ time(13, 9, 14, 700000),
+ time(14, 53, 7, 800000),
+ time(16, 37, 0, 900000),
+ time(18, 20, 54)]})
+
+ actual = self.get_exceldf('times_1900', ext, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.get_exceldf('times_1904', ext, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_multiindex(self, ext):
+ # see gh-4679
+ mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
+ mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
+
+ # "mi_column" sheet
+ expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True],
+ [2, 3.5, pd.Timestamp("2015-01-02"), False],
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
+ [4, 5.5, pd.Timestamp("2015-01-04"), True]],
+ columns=mi)
+
+ actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ # "mi_index" sheet
+ expected.index = mi
+ expected.columns = ["a", "b", "c", "d"]
+
+ actual = read_excel(mi_file, "mi_index", index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ # "both" sheet
+ expected.columns = mi
+
+ actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ # "mi_index_name" sheet
+ expected.columns = ["a", "b", "c", "d"]
+ expected.index = mi.set_names(["ilvl1", "ilvl2"])
+
+ actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ # "mi_column_name" sheet
+ expected.index = list(range(4))
+ expected.columns = mi.set_names(["c1", "c2"])
+ actual = read_excel(mi_file, "mi_column_name",
+ header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ # see gh-11317
+ # "name_with_int" sheet
+ expected.columns = mi.set_levels(
+ [1, 2], level=1).set_names(["c1", "c2"])
+
+ actual = read_excel(mi_file, "name_with_int",
+ index_col=0, header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ # "both_name" sheet
+ expected.columns = mi.set_names(["c1", "c2"])
+ expected.index = mi.set_names(["ilvl1", "ilvl2"])
+
+ actual = read_excel(mi_file, "both_name",
+ index_col=[0, 1], header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ # "both_skiprows" sheet
+ actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1],
+ header=[0, 1], skiprows=2)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_multiindex_header_only(self, ext):
+ # see gh-11733.
+ #
+ # Don't try to parse a header name if there isn't one.
+ mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
+ result = read_excel(mi_file, "index_col_none", header=[0, 1])
+
+ exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
+ expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no("xlsxwriter")
+ def test_read_excel_multiindex_empty_level(self, ext):
+ # see gh-12453
+ with ensure_clean(ext) as path:
+ df = DataFrame({
+ ("One", "x"): {0: 1},
+ ("Two", "X"): {0: 3},
+ ("Two", "Y"): {0: 7},
+ ("Zero", ""): {0: 0}
+ })
+
+ expected = DataFrame({
+ ("One", "x"): {0: 1},
+ ("Two", "X"): {0: 3},
+ ("Two", "Y"): {0: 7},
+ ("Zero", "Unnamed: 4_level_1"): {0: 0}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ df = pd.DataFrame({
+ ("Beg", ""): {0: 0},
+ ("Middle", "x"): {0: 1},
+ ("Tail", "X"): {0: 3},
+ ("Tail", "Y"): {0: 7}
+ })
+
+ expected = pd.DataFrame({
+ ("Beg", "Unnamed: 1_level_1"): {0: 0},
+ ("Middle", "x"): {0: 1},
+ ("Tail", "X"): {0: 3},
+ ("Tail", "Y"): {0: 7}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ @td.skip_if_no("xlsxwriter")
+ @pytest.mark.parametrize("c_idx_names", [True, False])
+ @pytest.mark.parametrize("r_idx_names", [True, False])
+ @pytest.mark.parametrize("c_idx_levels", [1, 3])
+ @pytest.mark.parametrize("r_idx_levels", [1, 3])
+ def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
+ c_idx_levels, r_idx_levels):
+ # see gh-4679
+ with ensure_clean(ext) as pth:
+ if c_idx_levels == 1 and c_idx_names:
+ pytest.skip("Column index name cannot be "
+ "serialized unless it's a MultiIndex")
+
+ # Empty name case current read in as
+ # unnamed levels, not Nones.
+ check_names = r_idx_names or r_idx_levels <= 1
+
+ df = mkdf(5, 5, c_idx_names, r_idx_names,
+ c_idx_levels, r_idx_levels)
+ df.to_excel(pth)
+
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[0, :] = np.nan
+ df.to_excel(pth)
+
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[-1, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ def test_excel_old_index_format(self, ext):
+ # see gh-4679
+ filename = "test_index_name_pre17" + ext
+ in_file = os.path.join(self.dirpath, filename)
+
+ # We detect headers to determine if index names exist, so
+ # that "index" name in the "names" version of the data will
+ # now be interpreted as rows that include null data.
+ data = np.array([[None, None, None, None, None],
+ ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
+ ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
+ ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
+ ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
+ ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
+ columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
+ mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1",
+ "R_l0_g2", "R_l0_g3", "R_l0_g4"],
+ ["R1", "R_l1_g0", "R_l1_g1",
+ "R_l1_g2", "R_l1_g3", "R_l1_g4"]],
+ codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
+ names=[None, None])
+ si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2",
+ "R_l0_g3", "R_l0_g4"], name=None)
+
+ expected = pd.DataFrame(data, index=si, columns=columns)
+
+ actual = pd.read_excel(in_file, "single_names", index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = mi
+
+ actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ # The analogous versions of the "names" version data
+ # where there are explicitly no names for the indices.
+ data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
+ ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
+ ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
+ ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
+ ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
+ columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
+ mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2",
+ "R_l0_g3", "R_l0_g4"],
+ ["R_l1_g0", "R_l1_g1", "R_l1_g2",
+ "R_l1_g3", "R_l1_g4"]],
+ codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
+ names=[None, None])
+ si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2",
+ "R_l0_g3", "R_l0_g4"], name=None)
+
+ expected = pd.DataFrame(data, index=si, columns=columns)
+
+ actual = pd.read_excel(in_file, "single_no_names", index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = mi
+
+ actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ def test_read_excel_bool_header_arg(self, ext):
+ # GH 6114
+ for arg in [True, False]:
+ with pytest.raises(TypeError):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
+ header=arg)
+
+ def test_read_excel_chunksize(self, ext):
+ # GH 8011
+ with pytest.raises(NotImplementedError):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
+ chunksize=100)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ def test_read_excel_parse_dates(self, ext):
+ # see gh-11544, gh-12051
+ df = DataFrame(
+ {"col": [1, 2, 3],
+ "date_strings": pd.date_range("2012-01-01", periods=3)})
+ df2 = df.copy()
+ df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
+
+ with ensure_clean(ext) as pth:
+ df2.to_excel(pth)
+
+ res = read_excel(pth, index_col=0)
+ tm.assert_frame_equal(df2, res)
+
+ res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
+ tm.assert_frame_equal(df, res)
+
+ date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
+ res = read_excel(pth, parse_dates=["date_strings"],
+ date_parser=date_parser, index_col=0)
+ tm.assert_frame_equal(df, res)
+
+ def test_read_excel_skiprows_list(self, ext):
+ # GH 4903
+ actual = pd.read_excel(os.path.join(self.dirpath,
+ 'testskiprows' + ext),
+ 'skiprows_list', skiprows=[0, 2])
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns=['a', 'b', 'c', 'd'])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(os.path.join(self.dirpath,
+ 'testskiprows' + ext),
+ 'skiprows_list', skiprows=np.array([0, 2]))
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_nrows(self, ext):
+ # GH 16645
+ num_rows_to_pull = 5
+ actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
+ nrows=num_rows_to_pull)
+ expected = pd.read_excel(os.path.join(self.dirpath,
+ 'test1' + ext))
+ expected = expected[:num_rows_to_pull]
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
+ # GH 16645
+ expected = pd.read_excel(os.path.join(self.dirpath,
+ 'test1' + ext))
+ num_records_in_file = len(expected)
+ num_rows_to_pull = num_records_in_file + 10
+ actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
+ nrows=num_rows_to_pull)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_nrows_non_integer_parameter(self, ext):
+ # GH 16645
+ msg = "'nrows' must be an integer >=0"
+ with pytest.raises(ValueError, match=msg):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
+ nrows='5')
+
+ def test_read_excel_squeeze(self, ext):
+ # GH 12157
+ f = os.path.join(self.dirpath, 'test_squeeze' + ext)
+
+ actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
+ expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
+ expected.index.name = 'a'
+ tm.assert_series_equal(actual, expected)
+
+ actual = pd.read_excel(f, 'two_columns', squeeze=True)
+ expected = pd.DataFrame({'a': [4, 5, 6],
+ 'b': [2, 3, 4]})
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(f, 'one_column', squeeze=True)
+ expected = pd.Series([1, 2, 3], name='a')
+ tm.assert_series_equal(actual, expected)
+
+
+@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
+class TestXlrdReader(ReadingTestsBase):
+ """
+ This is the base class for the xlrd tests, and 3 different file formats
+ are supported: xls, xlsx, xlsm
+ """
+
+ @td.skip_if_no("xlwt")
+ def test_read_xlrd_book(self, ext):
+ import xlrd
+ df = self.frame
+
+ engine = "xlrd"
+ sheet_name = "SheetA"
+
+ with ensure_clean(ext) as pth:
+ df.to_excel(pth, sheet_name)
+ book = xlrd.open_workbook(pth)
+
+ with ExcelFile(book, engine=engine) as xl:
+ result = read_excel(xl, sheet_name, index_col=0)
+ tm.assert_frame_equal(df, result)
+
+ result = read_excel(book, sheet_name=sheet_name,
+ engine=engine, index_col=0)
+ tm.assert_frame_equal(df, result)
diff --git a/pandas/tests/io/excel/test_base.py b/pandas/tests/io/excel/test_base.py
new file mode 100644
index 0000000000000..711fcf607fd5f
--- /dev/null
+++ b/pandas/tests/io/excel/test_base.py
@@ -0,0 +1,85 @@
+import os
+
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+import pandas.util.testing as tm
+
+from pandas.io.excel import ExcelFile, read_excel
+from pandas.io.parsers import read_csv
+
+_seriesd = tm.getSeriesData()
+_tsd = tm.getTimeSeriesData()
+_frame = DataFrame(_seriesd)[:10]
+_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
+_tsframe = tm.makeTimeDataFrame()[:5]
+_mixed_frame = _frame.copy()
+_mixed_frame['foo'] = 'bar'
+
+
+@td.skip_if_no('xlrd', '1.0.0')
+class SharedItems(object):
+
+ @pytest.fixture(autouse=True)
+ def setup_method(self, datapath):
+ self.dirpath = datapath("io", "data")
+ self.frame = _frame.copy()
+ self.frame2 = _frame2.copy()
+ self.tsframe = _tsframe.copy()
+ self.mixed_frame = _mixed_frame.copy()
+
+ def get_csv_refdf(self, basename):
+ """
+ Obtain the reference data from read_csv with the Python engine.
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ dfref : DataFrame
+ """
+ pref = os.path.join(self.dirpath, basename + '.csv')
+ dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
+ return dfref
+
+ def get_excelfile(self, basename, ext):
+ """
+ Return test data ExcelFile instance.
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ excel : io.excel.ExcelFile
+ """
+ return ExcelFile(os.path.join(self.dirpath, basename + ext))
+
+ def get_exceldf(self, basename, ext, *args, **kwds):
+ """
+ Return test data DataFrame.
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ df : DataFrame
+ """
+ pth = os.path.join(self.dirpath, basename + ext)
+ return read_excel(pth, *args, **kwds)
diff --git a/pandas/tests/io/excel/writer/__init__.py b/pandas/tests/io/excel/writer/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/io/excel/writer/test_base.py b/pandas/tests/io/excel/writer/test_base.py
new file mode 100644
index 0000000000000..0c726a28d1f21
--- /dev/null
+++ b/pandas/tests/io/excel/writer/test_base.py
@@ -0,0 +1,940 @@
+from datetime import date, datetime, timedelta
+from functools import partial
+
+import numpy as np
+from numpy import nan
+import pytest
+
+from pandas.compat import BytesIO, map, range, u
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame, Index, MultiIndex
+from pandas.core.config import get_option, set_option
+import pandas.util.testing as tm
+from pandas.util.testing import ensure_clean
+
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
+from pandas.tests.io.excel.test_base import SharedItems
+
+
+class _WriterBase(SharedItems):
+
+ @pytest.fixture(autouse=True)
+ def set_engine_and_path(self, request, merge_cells, engine, ext):
+ """Fixture to set engine and open file for use in each test case
+
+ Rather than requiring `engine=...` to be provided explicitly as an
+ argument in each test, this fixture sets a global option to dictate
+ which engine should be used to write Excel files. After executing
+ the test it rolls back said change to the global option.
+
+ It also uses a context manager to open a temporary excel file for
+ the function to write to, accessible via `self.path`
+
+ Notes
+ -----
+ This fixture will run as part of each test method defined in the
+ class and any subclasses, on account of the `autouse=True`
+ argument
+ """
+ option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
+ prev_engine = get_option(option_name)
+ set_option(option_name, engine)
+ with ensure_clean(ext) as path:
+ self.path = path
+ yield
+ set_option(option_name, prev_engine) # Roll back option change
+
+
+@pytest.mark.parametrize("merge_cells", [True, False])
+@pytest.mark.parametrize("engine,ext", [
+ pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
+ not td.safe_import('openpyxl'), reason='No openpyxl')),
+ pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
+ not td.safe_import('openpyxl'), reason='No openpyxl')),
+ pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
+ not td.safe_import('xlwt'), reason='No xlwt')),
+ pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
+ not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
+])
+class TestExcelWriter(_WriterBase):
+ # Base class for test cases to run with different Excel writers.
+
+ def test_excel_sheet_by_name_raise(self, *_):
+ import xlrd
+
+ gt = DataFrame(np.random.randn(10, 2))
+ gt.to_excel(self.path)
+
+ xl = ExcelFile(self.path)
+ df = read_excel(xl, 0, index_col=0)
+
+ tm.assert_frame_equal(gt, df)
+
+ with pytest.raises(xlrd.XLRDError):
+ read_excel(xl, "0")
+
+ def test_excel_writer_context_manager(self, *_):
+ with ExcelWriter(self.path) as writer:
+ self.frame.to_excel(writer, "Data1")
+ self.frame2.to_excel(writer, "Data2")
+
+ with ExcelFile(self.path) as reader:
+ found_df = read_excel(reader, "Data1", index_col=0)
+ found_df2 = read_excel(reader, "Data2", index_col=0)
+
+ tm.assert_frame_equal(found_df, self.frame)
+ tm.assert_frame_equal(found_df2, self.frame2)
+
+ def test_roundtrip(self, merge_cells, engine, ext):
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(self.path, 'test1')
+ self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(self.path, 'test1', header=False)
+ self.frame.to_excel(self.path, 'test1', index=False)
+
+ # test roundtrip
+ self.frame.to_excel(self.path, 'test1')
+ recons = read_excel(self.path, 'test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(self.path, 'test1', index=False)
+ recons = read_excel(self.path, 'test1', index_col=None)
+ recons.index = self.frame.index
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(self.path, 'test1', na_rep='NA')
+ recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ # GH 3611
+ self.frame.to_excel(self.path, 'test1', na_rep='88')
+ recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(self.path, 'test1', na_rep='88')
+ recons = read_excel(self.path, 'test1', index_col=0,
+ na_values=[88, 88.0])
+ tm.assert_frame_equal(self.frame, recons)
+
+ # GH 6573
+ self.frame.to_excel(self.path, 'Sheet1')
+ recons = read_excel(self.path, index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(self.path, '0')
+ recons = read_excel(self.path, index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ # GH 8825 Pandas Series should provide to_excel method
+ s = self.frame["A"]
+ s.to_excel(self.path)
+ recons = read_excel(self.path, index_col=0)
+ tm.assert_frame_equal(s.to_frame(), recons)
+
+ def test_mixed(self, merge_cells, engine, ext):
+ self.mixed_frame.to_excel(self.path, 'test1')
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1', index_col=0)
+ tm.assert_frame_equal(self.mixed_frame, recons)
+
+ def test_ts_frame(self, *_):
+ df = tm.makeTimeDataFrame()[:5]
+
+ df.to_excel(self.path, "test1")
+ reader = ExcelFile(self.path)
+
+ recons = read_excel(reader, "test1", index_col=0)
+ tm.assert_frame_equal(df, recons)
+
+ def test_basics_with_nan(self, merge_cells, engine, ext):
+ self.frame['A'][:5] = nan
+ self.frame.to_excel(self.path, 'test1')
+ self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(self.path, 'test1', header=False)
+ self.frame.to_excel(self.path, 'test1', index=False)
+
+ @pytest.mark.parametrize("np_type", [
+ np.int8, np.int16, np.int32, np.int64])
+ def test_int_types(self, merge_cells, engine, ext, np_type):
+ # Test np.int values read come back as int
+ # (rather than float which is Excel's format).
+ frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
+ dtype=np_type)
+ frame.to_excel(self.path, "test1")
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, "test1", index_col=0)
+
+ int_frame = frame.astype(np.int64)
+ tm.assert_frame_equal(int_frame, recons)
+
+ recons2 = read_excel(self.path, "test1", index_col=0)
+ tm.assert_frame_equal(int_frame, recons2)
+
+ # Test with convert_float=False comes back as float.
+ float_frame = frame.astype(float)
+ recons = read_excel(self.path, "test1",
+ convert_float=False, index_col=0)
+ tm.assert_frame_equal(recons, float_frame,
+ check_index_type=False,
+ check_column_type=False)
+
+ @pytest.mark.parametrize("np_type", [
+ np.float16, np.float32, np.float64])
+ def test_float_types(self, merge_cells, engine, ext, np_type):
+ # Test np.float values read come back as float.
+ frame = DataFrame(np.random.random_sample(10), dtype=np_type)
+ frame.to_excel(self.path, "test1")
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, "test1", index_col=0).astype(np_type)
+
+ tm.assert_frame_equal(frame, recons, check_dtype=False)
+
+ @pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
+ def test_bool_types(self, merge_cells, engine, ext, np_type):
+ # Test np.bool values read come back as float.
+ frame = (DataFrame([1, 0, True, False], dtype=np_type))
+ frame.to_excel(self.path, "test1")
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, "test1", index_col=0).astype(np_type)
+
+ tm.assert_frame_equal(frame, recons)
+
+ def test_inf_roundtrip(self, *_):
+ frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
+ frame.to_excel(self.path, "test1")
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, "test1", index_col=0)
+
+ tm.assert_frame_equal(frame, recons)
+
+ def test_sheets(self, merge_cells, engine, ext):
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(self.path, 'test1')
+ self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(self.path, 'test1', header=False)
+ self.frame.to_excel(self.path, 'test1', index=False)
+
+ # Test writing to separate sheets
+ writer = ExcelWriter(self.path)
+ self.frame.to_excel(writer, 'test1')
+ self.tsframe.to_excel(writer, 'test2')
+ writer.save()
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+ recons = read_excel(reader, 'test2', index_col=0)
+ tm.assert_frame_equal(self.tsframe, recons)
+ assert 2 == len(reader.sheet_names)
+ assert 'test1' == reader.sheet_names[0]
+ assert 'test2' == reader.sheet_names[1]
+
+ def test_colaliases(self, merge_cells, engine, ext):
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(self.path, 'test1')
+ self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(self.path, 'test1', header=False)
+ self.frame.to_excel(self.path, 'test1', index=False)
+
+ # column aliases
+ col_aliases = Index(['AA', 'X', 'Y', 'Z'])
+ self.frame2.to_excel(self.path, 'test1', header=col_aliases)
+ reader = ExcelFile(self.path)
+ rs = read_excel(reader, 'test1', index_col=0)
+ xp = self.frame2.copy()
+ xp.columns = col_aliases
+ tm.assert_frame_equal(xp, rs)
+
+ def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(self.path, 'test1')
+ self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(self.path, 'test1', header=False)
+ self.frame.to_excel(self.path, 'test1', index=False)
+
+ # test index_label
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(self.path, 'test1',
+ index_label=['test'],
+ merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ assert frame.index.names == recons.index.names
+
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(self.path,
+ 'test1',
+ index_label=['test', 'dummy', 'dummy2'],
+ merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ assert frame.index.names == recons.index.names
+
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(self.path,
+ 'test1',
+ index_label='test',
+ merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ tm.assert_frame_equal(frame, recons.astype(bool))
+
+ self.frame.to_excel(self.path,
+ 'test1',
+ columns=['A', 'B', 'C', 'D'],
+ index=False, merge_cells=merge_cells)
+ # take 'A' and 'B' as indexes (same row as cols 'C', 'D')
+ df = self.frame.copy()
+ df = df.set_index(['A', 'B'])
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1', index_col=[0, 1])
+ tm.assert_frame_equal(df, recons, check_less_precise=True)
+
+ def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
+ df = DataFrame(np.random.randn(10, 4))
+ df.index.name = 'foo'
+
+ df.to_excel(self.path, merge_cells=merge_cells)
+
+ xf = ExcelFile(self.path)
+ result = read_excel(xf, xf.sheet_names[0],
+ index_col=0)
+
+ tm.assert_frame_equal(result, df)
+ assert result.index.name == 'foo'
+
+ def test_excel_roundtrip_datetime(self, merge_cells, *_):
+ # datetime.date, not sure what to test here exactly
+ tsf = self.tsframe.copy()
+
+ tsf.index = [x.date() for x in self.tsframe.index]
+ tsf.to_excel(self.path, "test1", merge_cells=merge_cells)
+
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, "test1", index_col=0)
+
+ tm.assert_frame_equal(self.tsframe, recons)
+
+ def test_excel_date_datetime_format(self, merge_cells, engine, ext):
+ # see gh-4133
+ #
+ # Excel output format strings
+ df = DataFrame([[date(2014, 1, 31),
+ date(1999, 9, 24)],
+ [datetime(1998, 5, 26, 23, 33, 4),
+ datetime(2014, 2, 28, 13, 5, 13)]],
+ index=["DATE", "DATETIME"], columns=["X", "Y"])
+ df_expected = DataFrame([[datetime(2014, 1, 31),
+ datetime(1999, 9, 24)],
+ [datetime(1998, 5, 26, 23, 33, 4),
+ datetime(2014, 2, 28, 13, 5, 13)]],
+ index=["DATE", "DATETIME"], columns=["X", "Y"])
+
+ with ensure_clean(ext) as filename2:
+ writer1 = ExcelWriter(self.path)
+ writer2 = ExcelWriter(filename2,
+ date_format="DD.MM.YYYY",
+ datetime_format="DD.MM.YYYY HH-MM-SS")
+
+ df.to_excel(writer1, "test1")
+ df.to_excel(writer2, "test1")
+
+ writer1.close()
+ writer2.close()
+
+ reader1 = ExcelFile(self.path)
+ reader2 = ExcelFile(filename2)
+
+ rs1 = read_excel(reader1, "test1", index_col=0)
+ rs2 = read_excel(reader2, "test1", index_col=0)
+
+ tm.assert_frame_equal(rs1, rs2)
+
+ # Since the reader returns a datetime object for dates,
+ # we need to use df_expected to check the result.
+ tm.assert_frame_equal(rs2, df_expected)
+
+ def test_to_excel_interval_no_labels(self, *_):
+ # see gh-19242
+ #
+ # Test writing Interval without labels.
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ dtype=np.int64)
+ expected = frame.copy()
+
+ frame["new"] = pd.cut(frame[0], 10)
+ expected["new"] = pd.cut(expected[0], 10).astype(str)
+
+ frame.to_excel(self.path, "test1")
+ reader = ExcelFile(self.path)
+
+ recons = read_excel(reader, "test1", index_col=0)
+ tm.assert_frame_equal(expected, recons)
+
+ def test_to_excel_interval_labels(self, *_):
+ # see gh-19242
+ #
+ # Test writing Interval with labels.
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ dtype=np.int64)
+ expected = frame.copy()
+ intervals = pd.cut(frame[0], 10, labels=["A", "B", "C", "D", "E",
+ "F", "G", "H", "I", "J"])
+ frame["new"] = intervals
+ expected["new"] = pd.Series(list(intervals))
+
+ frame.to_excel(self.path, "test1")
+ reader = ExcelFile(self.path)
+
+ recons = read_excel(reader, "test1", index_col=0)
+ tm.assert_frame_equal(expected, recons)
+
+ def test_to_excel_timedelta(self, *_):
+ # see gh-19242, gh-9155
+ #
+ # Test writing timedelta to xls.
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ columns=["A"], dtype=np.int64)
+ expected = frame.copy()
+
+ frame["new"] = frame["A"].apply(lambda x: timedelta(seconds=x))
+ expected["new"] = expected["A"].apply(
+ lambda x: timedelta(seconds=x).total_seconds() / float(86400))
+
+ frame.to_excel(self.path, "test1")
+ reader = ExcelFile(self.path)
+
+ recons = read_excel(reader, "test1", index_col=0)
+ tm.assert_frame_equal(expected, recons)
+
+ def test_to_excel_periodindex(self, merge_cells, engine, ext):
+ frame = self.tsframe
+ xp = frame.resample('M', kind='period').mean()
+
+ xp.to_excel(self.path, 'sht1')
+
+ reader = ExcelFile(self.path)
+ rs = read_excel(reader, 'sht1', index_col=0)
+ tm.assert_frame_equal(xp, rs.to_period('M'))
+
+ def test_to_excel_multiindex(self, merge_cells, engine, ext):
+ frame = self.frame
+ arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
+ new_index = MultiIndex.from_arrays(arrays,
+ names=['first', 'second'])
+ frame.index = new_index
+
+ frame.to_excel(self.path, 'test1', header=False)
+ frame.to_excel(self.path, 'test1', columns=['A', 'B'])
+
+ # round trip
+ frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ df = read_excel(reader, 'test1', index_col=[0, 1])
+ tm.assert_frame_equal(frame, df)
+
+ # GH13511
+ def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
+ frame = pd.DataFrame({'A': [None, 2, 3],
+ 'B': [10, 20, 30],
+ 'C': np.random.sample(3)})
+ frame = frame.set_index(['A', 'B'])
+
+ frame.to_excel(self.path, merge_cells=merge_cells)
+ df = read_excel(self.path, index_col=[0, 1])
+ tm.assert_frame_equal(frame, df)
+
+ # Test for Issue 11328. If column indices are integers, make
+ # sure they are handled correctly for either setting of
+ # merge_cells
+ def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
+ frame = self.frame
+ arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
+ new_index = MultiIndex.from_arrays(arrays,
+ names=['first', 'second'])
+ frame.index = new_index
+
+ new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
+ (50, 1), (50, 2)])
+ frame.columns = new_cols_index
+ header = [0, 1]
+ if not merge_cells:
+ header = 0
+
+ # round trip
+ frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ df = read_excel(reader, 'test1', header=header,
+ index_col=[0, 1])
+ if not merge_cells:
+ fm = frame.columns.format(sparsify=False,
+ adjoin=False, names=False)
+ frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
+ tm.assert_frame_equal(frame, df)
+
+ def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
+ # try multiindex with dates
+ tsframe = self.tsframe.copy()
+ new_index = [tsframe.index, np.arange(len(tsframe.index))]
+ tsframe.index = MultiIndex.from_arrays(new_index)
+
+ tsframe.index.names = ['time', 'foo']
+ tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
+ reader = ExcelFile(self.path)
+ recons = read_excel(reader, 'test1',
+ index_col=[0, 1])
+
+ tm.assert_frame_equal(tsframe, recons)
+ assert recons.index.names == ('time', 'foo')
+
+ def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
+ ext):
+ # Test writing and re-reading a MI witout the index. GH 5616.
+
+ # Initial non-MI frame.
+ frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
+
+ # Add a MI.
+ frame2 = frame1.copy()
+ multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
+ frame2.index = multi_index
+
+ # Write out to Excel without the index.
+ frame2.to_excel(self.path, 'test1', index=False)
+
+ # Read it back in.
+ reader = ExcelFile(self.path)
+ frame3 = read_excel(reader, 'test1')
+
+ # Test that it is the same as the initial frame.
+ tm.assert_frame_equal(frame1, frame3)
+
+ def test_to_excel_float_format(self, *_):
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=["A", "B"], columns=["X", "Y", "Z"])
+ df.to_excel(self.path, "test1", float_format="%.2f")
+
+ reader = ExcelFile(self.path)
+ result = read_excel(reader, "test1", index_col=0)
+
+ expected = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=["A", "B"], columns=["X", "Y", "Z"])
+ tm.assert_frame_equal(result, expected)
+
+ def test_to_excel_output_encoding(self, merge_cells, engine, ext):
+ # Avoid mixed inferred_type.
+ df = DataFrame([[u"\u0192", u"\u0193", u"\u0194"],
+ [u"\u0195", u"\u0196", u"\u0197"]],
+ index=[u"A\u0192", u"B"],
+ columns=[u"X\u0193", u"Y", u"Z"])
+
+ with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
+ df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
+ result = read_excel(filename, "TestSheet",
+ encoding="utf8", index_col=0)
+ tm.assert_frame_equal(result, df)
+
+ def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
+ with ensure_clean(u("\u0192u.") + ext) as filename:
+ try:
+ f = open(filename, "wb")
+ except UnicodeEncodeError:
+ pytest.skip("No unicode file names on this system")
+ else:
+ f.close()
+
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=["A", "B"], columns=["X", "Y", "Z"])
+ df.to_excel(filename, "test1", float_format="%.2f")
+
+ reader = ExcelFile(filename)
+ result = read_excel(reader, "test1", index_col=0)
+
+ expected = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=["A", "B"], columns=["X", "Y", "Z"])
+ tm.assert_frame_equal(result, expected)
+
+ # def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
+
+ # import StringIO
+ # s = StringIO(
+ # """Date,ticker,type,value
+ # 2001-01-01,x,close,12.2
+ # 2001-01-01,x,open ,12.1
+ # 2001-01-01,y,close,12.2
+ # 2001-01-01,y,open ,12.1
+ # 2001-02-01,x,close,12.2
+ # 2001-02-01,x,open ,12.1
+ # 2001-02-01,y,close,12.2
+ # 2001-02-01,y,open ,12.1
+ # 2001-03-01,x,close,12.2
+ # 2001-03-01,x,open ,12.1
+ # 2001-03-01,y,close,12.2
+ # 2001-03-01,y,open ,12.1""")
+ # df = read_csv(s, parse_dates=["Date"])
+ # pdf = df.pivot_table(values="value", rows=["ticker"],
+ # cols=["Date", "type"])
+
+ # try:
+ # import xlwt
+ # import xlrd
+ # except ImportError:
+ # pytest.skip
+
+ # filename = '__tmp_to_excel_header_styling_xls__.xls'
+ # pdf.to_excel(filename, 'test1')
+
+ # wbk = xlrd.open_workbook(filename,
+ # formatting_info=True)
+ # assert ["test1"] == wbk.sheet_names()
+ # ws = wbk.sheet_by_name('test1')
+ # assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
+ # for i in range(0, 2):
+ # for j in range(0, 7):
+ # xfx = ws.cell_xf_index(0, 0)
+ # cell_xf = wbk.xf_list[xfx]
+ # font = wbk.font_list
+ # assert 1 == font[cell_xf.font_index].bold
+ # assert 1 == cell_xf.border.top_line_style
+ # assert 1 == cell_xf.border.right_line_style
+ # assert 1 == cell_xf.border.bottom_line_style
+ # assert 1 == cell_xf.border.left_line_style
+ # assert 2 == cell_xf.alignment.hor_align
+ # os.remove(filename)
+ # def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
+ # import StringIO
+ # s = StringIO(
+ # """Date,ticker,type,value
+ # 2001-01-01,x,close,12.2
+ # 2001-01-01,x,open ,12.1
+ # 2001-01-01,y,close,12.2
+ # 2001-01-01,y,open ,12.1
+ # 2001-02-01,x,close,12.2
+ # 2001-02-01,x,open ,12.1
+ # 2001-02-01,y,close,12.2
+ # 2001-02-01,y,open ,12.1
+ # 2001-03-01,x,close,12.2
+ # 2001-03-01,x,open ,12.1
+ # 2001-03-01,y,close,12.2
+ # 2001-03-01,y,open ,12.1""")
+ # df = read_csv(s, parse_dates=["Date"])
+ # pdf = df.pivot_table(values="value", rows=["ticker"],
+ # cols=["Date", "type"])
+ # try:
+ # import openpyxl
+ # from openpyxl.cell import get_column_letter
+ # except ImportError:
+ # pytest.skip
+ # if openpyxl.__version__ < '1.6.1':
+ # pytest.skip
+ # # test xlsx_styling
+ # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
+ # pdf.to_excel(filename, 'test1')
+ # wbk = openpyxl.load_workbook(filename)
+ # assert ["test1"] == wbk.get_sheet_names()
+ # ws = wbk.get_sheet_by_name('test1')
+ # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
+ # xlsaddrs += ["A%s" % i for i in range(1, 6)]
+ # xlsaddrs += ["B1", "D1", "F1"]
+ # for xlsaddr in xlsaddrs:
+ # cell = ws.cell(xlsaddr)
+ # assert cell.style.font.bold
+ # assert (openpyxl.style.Border.BORDER_THIN ==
+ # cell.style.borders.top.border_style)
+ # assert (openpyxl.style.Border.BORDER_THIN ==
+ # cell.style.borders.right.border_style)
+ # assert (openpyxl.style.Border.BORDER_THIN ==
+ # cell.style.borders.bottom.border_style)
+ # assert (openpyxl.style.Border.BORDER_THIN ==
+ # cell.style.borders.left.border_style)
+ # assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
+ # cell.style.alignment.horizontal)
+ # mergedcells_addrs = ["C1", "E1", "G1"]
+ # for maddr in mergedcells_addrs:
+ # assert ws.cell(maddr).merged
+ # os.remove(filename)
+
+ @pytest.mark.parametrize("use_headers", [True, False])
+ @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
+ @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
+ def test_excel_010_hemstring(self, merge_cells, engine, ext,
+ c_idx_nlevels, r_idx_nlevels, use_headers):
+
+ def roundtrip(data, header=True, parser_hdr=0, index=True):
+ data.to_excel(self.path, header=header,
+ merge_cells=merge_cells, index=index)
+
+ xf = ExcelFile(self.path)
+ return read_excel(xf, xf.sheet_names[0], header=parser_hdr)
+
+ # Basic test.
+ parser_header = 0 if use_headers else None
+ res = roundtrip(DataFrame([0]), use_headers, parser_header)
+
+ assert res.shape == (1, 2)
+ assert res.iloc[0, 0] is not np.nan
+
+ # More complex tests with multi-index.
+ nrows = 5
+ ncols = 3
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ # ensure limited functionality in 0.10
+ # override of gh-2370 until sorted out in 0.11
+
+ df = mkdf(nrows, ncols, r_idx_nlevels=r_idx_nlevels,
+ c_idx_nlevels=c_idx_nlevels)
+
+ # This if will be removed once multi-column Excel writing
+ # is implemented. For now fixing gh-9794.
+ if c_idx_nlevels > 1:
+ with pytest.raises(NotImplementedError):
+ roundtrip(df, use_headers, index=False)
+ else:
+ res = roundtrip(df, use_headers)
+
+ if use_headers:
+ assert res.shape == (nrows, ncols + r_idx_nlevels)
+ else:
+ # First row taken as columns.
+ assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
+
+ # No NaNs.
+ for r in range(len(res.index)):
+ for c in range(len(res.columns)):
+ assert res.iloc[r, c] is not np.nan
+
+ def test_duplicated_columns(self, *_):
+ # see gh-5235
+ df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
+ columns=["A", "B", "B"])
+ df.to_excel(self.path, "test1")
+ expected = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
+ columns=["A", "B", "B.1"])
+
+ # By default, we mangle.
+ result = read_excel(self.path, "test1", index_col=0)
+ tm.assert_frame_equal(result, expected)
+
+ # Explicitly, we pass in the parameter.
+ result = read_excel(self.path, "test1", index_col=0,
+ mangle_dupe_cols=True)
+ tm.assert_frame_equal(result, expected)
+
+ # see gh-11007, gh-10970
+ df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
+ columns=["A", "B", "A", "B"])
+ df.to_excel(self.path, "test1")
+
+ result = read_excel(self.path, "test1", index_col=0)
+ expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
+ columns=["A", "B", "A.1", "B.1"])
+ tm.assert_frame_equal(result, expected)
+
+ # see gh-10982
+ df.to_excel(self.path, "test1", index=False, header=False)
+ result = read_excel(self.path, "test1", header=None)
+
+ expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
+ tm.assert_frame_equal(result, expected)
+
+ msg = "Setting mangle_dupe_cols=False is not supported yet"
+ with pytest.raises(ValueError, match=msg):
+ read_excel(self.path, "test1", header=None, mangle_dupe_cols=False)
+
+ def test_swapped_columns(self, merge_cells, engine, ext):
+ # Test for issue #5427.
+ write_frame = DataFrame({'A': [1, 1, 1],
+ 'B': [2, 2, 2]})
+ write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
+
+ read_frame = read_excel(self.path, 'test1', header=0)
+
+ tm.assert_series_equal(write_frame['A'], read_frame['A'])
+ tm.assert_series_equal(write_frame['B'], read_frame['B'])
+
+ def test_invalid_columns(self, *_):
+ # see gh-10982
+ write_frame = DataFrame({"A": [1, 1, 1],
+ "B": [2, 2, 2]})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ write_frame.to_excel(self.path, "test1", columns=["B", "C"])
+
+ expected = write_frame.reindex(columns=["B", "C"])
+ read_frame = read_excel(self.path, "test1", index_col=0)
+ tm.assert_frame_equal(expected, read_frame)
+
+ with pytest.raises(KeyError):
+ write_frame.to_excel(self.path, "test1", columns=["C", "D"])
+
+ def test_comment_arg(self, *_):
+ # see gh-18735
+ #
+ # Test the comment argument functionality to read_excel.
+
+ # Create file to read in.
+ df = DataFrame({"A": ["one", "#one", "one"],
+ "B": ["two", "two", "#two"]})
+ df.to_excel(self.path, "test_c")
+
+ # Read file without comment arg.
+ result1 = read_excel(self.path, "test_c", index_col=0)
+
+ result1.iloc[1, 0] = None
+ result1.iloc[1, 1] = None
+ result1.iloc[2, 1] = None
+
+ result2 = read_excel(self.path, "test_c", comment="#", index_col=0)
+ tm.assert_frame_equal(result1, result2)
+
+ def test_comment_default(self, merge_cells, engine, ext):
+ # Re issue #18735
+ # Test the comment argument default to read_excel
+
+ # Create file to read in
+ df = DataFrame({'A': ['one', '#one', 'one'],
+ 'B': ['two', 'two', '#two']})
+ df.to_excel(self.path, 'test_c')
+
+ # Read file with default and explicit comment=None
+ result1 = read_excel(self.path, 'test_c')
+ result2 = read_excel(self.path, 'test_c', comment=None)
+ tm.assert_frame_equal(result1, result2)
+
+ def test_comment_used(self, *_):
+ # see gh-18735
+ #
+ # Test the comment argument is working as expected when used.
+
+ # Create file to read in.
+ df = DataFrame({"A": ["one", "#one", "one"],
+ "B": ["two", "two", "#two"]})
+ df.to_excel(self.path, "test_c")
+
+ # Test read_frame_comment against manually produced expected output.
+ expected = DataFrame({"A": ["one", None, "one"],
+ "B": ["two", None, None]})
+ result = read_excel(self.path, "test_c", comment="#", index_col=0)
+ tm.assert_frame_equal(result, expected)
+
+ def test_comment_empty_line(self, merge_cells, engine, ext):
+ # Re issue #18735
+ # Test that read_excel ignores commented lines at the end of file
+
+ df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
+ df.to_excel(self.path, index=False)
+
+ # Test that all-comment lines at EoF are ignored
+ expected = DataFrame({'a': [1], 'b': [2]})
+ result = read_excel(self.path, comment='#')
+ tm.assert_frame_equal(result, expected)
+
+ def test_datetimes(self, merge_cells, engine, ext):
+
+ # Test writing and reading datetimes. For issue #9139. (xref #9185)
+ datetimes = [datetime(2013, 1, 13, 1, 2, 3),
+ datetime(2013, 1, 13, 2, 45, 56),
+ datetime(2013, 1, 13, 4, 29, 49),
+ datetime(2013, 1, 13, 6, 13, 42),
+ datetime(2013, 1, 13, 7, 57, 35),
+ datetime(2013, 1, 13, 9, 41, 28),
+ datetime(2013, 1, 13, 11, 25, 21),
+ datetime(2013, 1, 13, 13, 9, 14),
+ datetime(2013, 1, 13, 14, 53, 7),
+ datetime(2013, 1, 13, 16, 37, 0),
+ datetime(2013, 1, 13, 18, 20, 52)]
+
+ write_frame = DataFrame({'A': datetimes})
+ write_frame.to_excel(self.path, 'Sheet1')
+ read_frame = read_excel(self.path, 'Sheet1', header=0)
+
+ tm.assert_series_equal(write_frame['A'], read_frame['A'])
+
+ def test_bytes_io(self, merge_cells, engine, ext):
+ # see gh-7074
+ bio = BytesIO()
+ df = DataFrame(np.random.randn(10, 2))
+
+ # Pass engine explicitly, as there is no file path to infer from.
+ writer = ExcelWriter(bio, engine=engine)
+ df.to_excel(writer)
+ writer.save()
+
+ bio.seek(0)
+ reread_df = read_excel(bio, index_col=0)
+ tm.assert_frame_equal(df, reread_df)
+
+ def test_write_lists_dict(self, *_):
+ # see gh-8188.
+ df = DataFrame({"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
+ "numeric": [1, 2, 3.0],
+ "str": ["apple", "banana", "cherry"]})
+ df.to_excel(self.path, "Sheet1")
+ read = read_excel(self.path, "Sheet1", header=0, index_col=0)
+
+ expected = df.copy()
+ expected.mixed = expected.mixed.apply(str)
+ expected.numeric = expected.numeric.astype("int64")
+
+ tm.assert_frame_equal(read, expected)
+
+ def test_true_and_false_value_options(self, *_):
+ # see gh-13347
+ df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
+ expected = df.replace({"foo": True, "bar": False})
+
+ df.to_excel(self.path)
+ read_frame = read_excel(self.path, true_values=["foo"],
+ false_values=["bar"], index_col=0)
+ tm.assert_frame_equal(read_frame, expected)
+
+ def test_freeze_panes(self, *_):
+ # see gh-15160
+ expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
+ expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
+
+ result = read_excel(self.path, index_col=0)
+ tm.assert_frame_equal(result, expected)
+
+ def test_path_path_lib(self, merge_cells, engine, ext):
+ df = tm.makeDataFrame()
+ writer = partial(df.to_excel, engine=engine)
+
+ reader = partial(pd.read_excel, index_col=0)
+ result = tm.round_trip_pathlib(writer, reader,
+ path="foo.{ext}".format(ext=ext))
+ tm.assert_frame_equal(result, df)
+
+ def test_path_local_path(self, merge_cells, engine, ext):
+ df = tm.makeDataFrame()
+ writer = partial(df.to_excel, engine=engine)
+
+ reader = partial(pd.read_excel, index_col=0)
+ result = tm.round_trip_pathlib(writer, reader,
+ path="foo.{ext}".format(ext=ext))
+ tm.assert_frame_equal(result, df)
diff --git a/pandas/tests/io/excel/writer/test_engine.py b/pandas/tests/io/excel/writer/test_engine.py
new file mode 100644
index 0000000000000..6d7c3ccaf4126
--- /dev/null
+++ b/pandas/tests/io/excel/writer/test_engine.py
@@ -0,0 +1,250 @@
+from distutils.version import LooseVersion
+import os
+
+import numpy as np
+import pytest
+
+from pandas.compat import PY36
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame
+import pandas.util.testing as tm
+from pandas.util.testing import ensure_clean
+
+from pandas.io.excel import (
+ ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter,
+ register_writer)
+from pandas.io.formats.excel import ExcelFormatter
+
+
+class TestExcelWriterEngineTests(object):
+
+ @pytest.mark.parametrize('klass,ext', [
+ pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
+ not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
+ pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
+ not td.safe_import('openpyxl'), reason='No openpyxl')),
+ pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
+ not td.safe_import('xlwt'), reason='No xlwt'))
+ ])
+ def test_ExcelWriter_dispatch(self, klass, ext):
+ with ensure_clean(ext) as path:
+ writer = ExcelWriter(path)
+ if ext == '.xlsx' and td.safe_import('xlsxwriter'):
+ # xlsxwriter has preference over openpyxl if both installed
+ assert isinstance(writer, _XlsxWriter)
+ else:
+ assert isinstance(writer, klass)
+
+ def test_ExcelWriter_dispatch_raises(self):
+ with pytest.raises(ValueError, match='No engine'):
+ ExcelWriter('nothing')
+
+ @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
+ def test_register_writer(self):
+ # some awkward mocking to test out dispatch and such actually works
+ called_save = []
+ called_write_cells = []
+
+ class DummyClass(ExcelWriter):
+ called_save = False
+ called_write_cells = False
+ supported_extensions = ['test', 'xlsx', 'xls']
+ engine = 'dummy'
+
+ def save(self):
+ called_save.append(True)
+
+ def write_cells(self, *args, **kwargs):
+ called_write_cells.append(True)
+
+ def check_called(func):
+ func()
+ assert len(called_save) >= 1
+ assert len(called_write_cells) >= 1
+ del called_save[:]
+ del called_write_cells[:]
+
+ with pd.option_context('io.excel.xlsx.writer', 'dummy'):
+ register_writer(DummyClass)
+ writer = ExcelWriter('something.test')
+ assert isinstance(writer, DummyClass)
+ df = tm.makeCustomDataframe(1, 1)
+
+ func = lambda: df.to_excel('something.test')
+ check_called(func)
+ check_called(lambda: df.to_excel('something.xlsx'))
+ check_called(
+ lambda: df.to_excel(
+ 'something.xls', engine='dummy'))
+
+
+@pytest.mark.parametrize('engine', [
+ pytest.param('xlwt',
+ marks=pytest.mark.xfail(reason='xlwt does not support '
+ 'openpyxl-compatible '
+ 'style dicts')),
+ 'xlsxwriter',
+ 'openpyxl',
+])
+def test_styler_to_excel(engine):
+ def style(df):
+ # XXX: RGB colors not supported in xlwt
+ return DataFrame([['font-weight: bold', '', ''],
+ ['', 'color: blue', ''],
+ ['', '', 'text-decoration: underline'],
+ ['border-style: solid', '', ''],
+ ['', 'font-style: italic', ''],
+ ['', '', 'text-align: right'],
+ ['background-color: red', '', ''],
+ ['number-format: 0%', '', ''],
+ ['', '', ''],
+ ['', '', ''],
+ ['', '', '']],
+ index=df.index, columns=df.columns)
+
+ def assert_equal_style(cell1, cell2):
+ # XXX: should find a better way to check equality
+ assert cell1.alignment.__dict__ == cell2.alignment.__dict__
+ assert cell1.border.__dict__ == cell2.border.__dict__
+ assert cell1.fill.__dict__ == cell2.fill.__dict__
+ assert cell1.font.__dict__ == cell2.font.__dict__
+ assert cell1.number_format == cell2.number_format
+ assert cell1.protection.__dict__ == cell2.protection.__dict__
+
+ def custom_converter(css):
+ # use bold iff there is custom style attached to the cell
+ if css.strip(' \n;'):
+ return {'font': {'bold': True}}
+ return {}
+
+ pytest.importorskip('jinja2')
+ pytest.importorskip(engine)
+
+ # Prepare spreadsheets
+
+ df = DataFrame(np.random.randn(11, 3))
+ with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
+ writer = ExcelWriter(path, engine=engine)
+ df.to_excel(writer, sheet_name='frame')
+ df.style.to_excel(writer, sheet_name='unstyled')
+ styled = df.style.apply(style, axis=None)
+ styled.to_excel(writer, sheet_name='styled')
+ ExcelFormatter(styled, style_converter=custom_converter).write(
+ writer, sheet_name='custom')
+ writer.save()
+
+ if engine not in ('openpyxl', 'xlsxwriter'):
+ # For other engines, we only smoke test
+ return
+ openpyxl = pytest.importorskip('openpyxl')
+ wb = openpyxl.load_workbook(path)
+
+ # (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
+ n_cells = 0
+ for col1, col2 in zip(wb['frame'].columns,
+ wb['unstyled'].columns):
+ assert len(col1) == len(col2)
+ for cell1, cell2 in zip(col1, col2):
+ assert cell1.value == cell2.value
+ assert_equal_style(cell1, cell2)
+ n_cells += 1
+
+ # ensure iteration actually happened:
+ assert n_cells == (11 + 1) * (3 + 1)
+
+ # (2) check styling with default converter
+
+ # XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
+ alpha = '00' if engine == 'openpyxl' else 'FF'
+
+ n_cells = 0
+ for col1, col2 in zip(wb['frame'].columns,
+ wb['styled'].columns):
+ assert len(col1) == len(col2)
+ for cell1, cell2 in zip(col1, col2):
+ ref = '%s%d' % (cell2.column, cell2.row)
+ # XXX: this isn't as strong a test as ideal; we should
+ # confirm that differences are exclusive
+ if ref == 'B2':
+ assert not cell1.font.bold
+ assert cell2.font.bold
+ elif ref == 'C3':
+ assert cell1.font.color.rgb != cell2.font.color.rgb
+ assert cell2.font.color.rgb == alpha + '0000FF'
+ elif ref == 'D4':
+ # This fails with engine=xlsxwriter due to
+ # https://bitbucket.org/openpyxl/openpyxl/issues/800
+ if engine == 'xlsxwriter' \
+ and (LooseVersion(openpyxl.__version__) <
+ LooseVersion('2.4.6')):
+ pass
+ else:
+ assert cell1.font.underline != cell2.font.underline
+ assert cell2.font.underline == 'single'
+ elif ref == 'B5':
+ assert not cell1.border.left.style
+ assert (cell2.border.top.style ==
+ cell2.border.right.style ==
+ cell2.border.bottom.style ==
+ cell2.border.left.style ==
+ 'medium')
+ elif ref == 'C6':
+ assert not cell1.font.italic
+ assert cell2.font.italic
+ elif ref == 'D7':
+ assert (cell1.alignment.horizontal !=
+ cell2.alignment.horizontal)
+ assert cell2.alignment.horizontal == 'right'
+ elif ref == 'B8':
+ assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
+ assert cell1.fill.patternType != cell2.fill.patternType
+ assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
+ assert cell2.fill.patternType == 'solid'
+ elif ref == 'B9':
+ assert cell1.number_format == 'General'
+ assert cell2.number_format == '0%'
+ else:
+ assert_equal_style(cell1, cell2)
+
+ assert cell1.value == cell2.value
+ n_cells += 1
+
+ assert n_cells == (11 + 1) * (3 + 1)
+
+ # (3) check styling with custom converter
+ n_cells = 0
+ for col1, col2 in zip(wb['frame'].columns,
+ wb['custom'].columns):
+ assert len(col1) == len(col2)
+ for cell1, cell2 in zip(col1, col2):
+ ref = '%s%d' % (cell2.column, cell2.row)
+ if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
+ assert not cell1.font.bold
+ assert cell2.font.bold
+ else:
+ assert_equal_style(cell1, cell2)
+
+ assert cell1.value == cell2.value
+ n_cells += 1
+
+ assert n_cells == (11 + 1) * (3 + 1)
+
+
+@td.skip_if_no('openpyxl')
+@pytest.mark.skipif(not PY36, reason='requires fspath')
+class TestFSPath(object):
+
+ def test_excelfile_fspath(self):
+ with tm.ensure_clean('foo.xlsx') as path:
+ df = DataFrame({"A": [1, 2]})
+ df.to_excel(path)
+ xl = ExcelFile(path)
+ result = os.fspath(xl)
+ assert result == path
+
+ def test_excelwriter_fspath(self):
+ with tm.ensure_clean('foo.xlsx') as path:
+ writer = ExcelWriter(path)
+ assert os.fspath(writer) == str(path)
diff --git a/pandas/tests/io/excel/writer/test_openpyxl.py b/pandas/tests/io/excel/writer/test_openpyxl.py
new file mode 100644
index 0000000000000..a4f6ced604106
--- /dev/null
+++ b/pandas/tests/io/excel/writer/test_openpyxl.py
@@ -0,0 +1,126 @@
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+from pandas.util.testing import ensure_clean
+
+from pandas.io.excel import ExcelWriter, _OpenpyxlWriter
+from pandas.tests.io.excel.writer.test_base import _WriterBase
+
+
+@td.skip_if_no('openpyxl')
+@pytest.mark.parametrize("merge_cells,ext,engine", [
+ (None, '.xlsx', 'openpyxl')])
+class TestOpenpyxlTests(_WriterBase):
+
+ def test_to_excel_styleconverter(self, merge_cells, ext, engine):
+ from openpyxl import styles
+
+ hstyle = {
+ "font": {
+ "color": '00FF0000',
+ "bold": True,
+ },
+ "borders": {
+ "top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin",
+ },
+ "alignment": {
+ "horizontal": "center",
+ "vertical": "top",
+ },
+ "fill": {
+ "patternType": 'solid',
+ 'fgColor': {
+ 'rgb': '006666FF',
+ 'tint': 0.3,
+ },
+ },
+ "number_format": {
+ "format_code": "0.00"
+ },
+ "protection": {
+ "locked": True,
+ "hidden": False,
+ },
+ }
+
+ font_color = styles.Color('00FF0000')
+ font = styles.Font(bold=True, color=font_color)
+ side = styles.Side(style=styles.borders.BORDER_THIN)
+ border = styles.Border(top=side, right=side, bottom=side, left=side)
+ alignment = styles.Alignment(horizontal='center', vertical='top')
+ fill_color = styles.Color(rgb='006666FF', tint=0.3)
+ fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
+
+ number_format = '0.00'
+
+ protection = styles.Protection(locked=True, hidden=False)
+
+ kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
+ assert kw['font'] == font
+ assert kw['border'] == border
+ assert kw['alignment'] == alignment
+ assert kw['fill'] == fill
+ assert kw['number_format'] == number_format
+ assert kw['protection'] == protection
+
+ def test_write_cells_merge_styled(self, merge_cells, ext, engine):
+ from pandas.io.formats.excel import ExcelCell
+
+ sheet_name = 'merge_styled'
+
+ sty_b1 = {'font': {'color': '00FF0000'}}
+ sty_a2 = {'font': {'color': '0000FF00'}}
+
+ initial_cells = [
+ ExcelCell(col=1, row=0, val=42, style=sty_b1),
+ ExcelCell(col=0, row=1, val=99, style=sty_a2),
+ ]
+
+ sty_merged = {'font': {'color': '000000FF', 'bold': True}}
+ sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
+ openpyxl_sty_merged = sty_kwargs['font']
+ merge_cells = [
+ ExcelCell(col=0, row=0, val='pandas',
+ mergestart=1, mergeend=1, style=sty_merged),
+ ]
+
+ with ensure_clean(ext) as path:
+ writer = _OpenpyxlWriter(path)
+ writer.write_cells(initial_cells, sheet_name=sheet_name)
+ writer.write_cells(merge_cells, sheet_name=sheet_name)
+
+ wks = writer.sheets[sheet_name]
+ xcell_b1 = wks['B1']
+ xcell_a2 = wks['A2']
+ assert xcell_b1.font == openpyxl_sty_merged
+ assert xcell_a2.font == openpyxl_sty_merged
+
+ @pytest.mark.parametrize("mode,expected", [
+ ('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
+ def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
+ import openpyxl
+ df = DataFrame([1], columns=['baz'])
+
+ with ensure_clean(ext) as f:
+ wb = openpyxl.Workbook()
+ wb.worksheets[0].title = 'foo'
+ wb.worksheets[0]['A1'].value = 'foo'
+ wb.create_sheet('bar')
+ wb.worksheets[1]['A1'].value = 'bar'
+ wb.save(f)
+
+ writer = ExcelWriter(f, engine=engine, mode=mode)
+ df.to_excel(writer, sheet_name='baz', index=False)
+ writer.save()
+
+ wb2 = openpyxl.load_workbook(f)
+ result = [sheet.title for sheet in wb2.worksheets]
+ assert result == expected
+
+ for index, cell_value in enumerate(expected):
+ assert wb2.worksheets[index]['A1'].value == cell_value
diff --git a/pandas/tests/io/excel/writer/test_xlsx.py b/pandas/tests/io/excel/writer/test_xlsx.py
new file mode 100644
index 0000000000000..04e37cca3c241
--- /dev/null
+++ b/pandas/tests/io/excel/writer/test_xlsx.py
@@ -0,0 +1,69 @@
+import warnings
+
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+from pandas.util.testing import ensure_clean
+
+from pandas.io.excel import ExcelWriter
+from pandas.tests.io.excel.writer.test_base import _WriterBase
+
+
+@td.skip_if_no('xlsxwriter')
+@pytest.mark.parametrize("merge_cells,ext,engine", [
+ (None, '.xlsx', 'xlsxwriter')])
+class TestXlsxWriterTests(_WriterBase):
+
+ @td.skip_if_no('openpyxl')
+ def test_column_format(self, merge_cells, ext, engine):
+ # Test that column formats are applied to cells. Test for issue #9167.
+ # Applicable to xlsxwriter only.
+ with warnings.catch_warnings():
+ # Ignore the openpyxl lxml warning.
+ warnings.simplefilter("ignore")
+ import openpyxl
+
+ with ensure_clean(ext) as path:
+ frame = DataFrame({'A': [123456, 123456],
+ 'B': [123456, 123456]})
+
+ writer = ExcelWriter(path)
+ frame.to_excel(writer)
+
+ # Add a number format to col B and ensure it is applied to cells.
+ num_format = '#,##0'
+ write_workbook = writer.book
+ write_worksheet = write_workbook.worksheets()[0]
+ col_format = write_workbook.add_format({'num_format': num_format})
+ write_worksheet.set_column('B:B', None, col_format)
+ writer.save()
+
+ read_workbook = openpyxl.load_workbook(path)
+ try:
+ read_worksheet = read_workbook['Sheet1']
+ except TypeError:
+ # compat
+ read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
+
+ # Get the number format from the cell.
+ try:
+ cell = read_worksheet['B2']
+ except TypeError:
+ # compat
+ cell = read_worksheet.cell('B2')
+
+ try:
+ read_num_format = cell.number_format
+ except Exception:
+ read_num_format = cell.style.number_format._format_code
+
+ assert read_num_format == num_format
+
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlsxwriter!"
+
+ with ensure_clean(ext) as f:
+ with pytest.raises(ValueError, match=msg):
+ ExcelWriter(f, engine=engine, mode='a')
diff --git a/pandas/tests/io/excel/writer/test_xlwt.py b/pandas/tests/io/excel/writer/test_xlwt.py
new file mode 100644
index 0000000000000..eff8f68695b19
--- /dev/null
+++ b/pandas/tests/io/excel/writer/test_xlwt.py
@@ -0,0 +1,72 @@
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame, MultiIndex
+from pandas.util.testing import ensure_clean
+
+from pandas.io.excel import ExcelWriter, _XlwtWriter
+from pandas.tests.io.excel.writer.test_base import _WriterBase
+
+
+@td.skip_if_no('xlwt')
+@pytest.mark.parametrize("merge_cells,ext,engine", [
+ (None, '.xls', 'xlwt')])
+class TestXlwtTests(_WriterBase):
+
+ def test_excel_raise_error_on_multiindex_columns_and_no_index(
+ self, merge_cells, ext, engine):
+ # MultiIndex as columns is not yet implemented 9794
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = DataFrame(np.random.randn(10, 3), columns=cols)
+ with pytest.raises(NotImplementedError):
+ with ensure_clean(ext) as path:
+ df.to_excel(path, index=False)
+
+ def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
+ engine):
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
+ with ensure_clean(ext) as path:
+ df.to_excel(path, index=True)
+
+ def test_excel_multiindex_index(self, merge_cells, ext, engine):
+ # MultiIndex as index works so assert no error #9794
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = DataFrame(np.random.randn(3, 10), index=cols)
+ with ensure_clean(ext) as path:
+ df.to_excel(path, index=False)
+
+ def test_to_excel_styleconverter(self, merge_cells, ext, engine):
+ import xlwt
+
+ hstyle = {"font": {"bold": True},
+ "borders": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "alignment": {"horizontal": "center", "vertical": "top"}}
+
+ xls_style = _XlwtWriter._convert_to_style(hstyle)
+ assert xls_style.font.bold
+ assert xlwt.Borders.THIN == xls_style.borders.top
+ assert xlwt.Borders.THIN == xls_style.borders.right
+ assert xlwt.Borders.THIN == xls_style.borders.bottom
+ assert xlwt.Borders.THIN == xls_style.borders.left
+ assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
+ assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
+
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlwt!"
+
+ with ensure_clean(ext) as f:
+ with pytest.raises(ValueError, match=msg):
+ ExcelWriter(f, engine=engine, mode='a')
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
deleted file mode 100644
index 8c92db734168b..0000000000000
--- a/pandas/tests/io/test_excel.py
+++ /dev/null
@@ -1,2559 +0,0 @@
-from collections import OrderedDict
-import contextlib
-from datetime import date, datetime, time, timedelta
-from distutils.version import LooseVersion
-from functools import partial
-import os
-import warnings
-
-import numpy as np
-from numpy import nan
-import pytest
-
-from pandas.compat import PY36, BytesIO, iteritems, map, range, u
-import pandas.util._test_decorators as td
-
-import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, Series
-from pandas.core.config import get_option, set_option
-import pandas.util.testing as tm
-from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
-
-from pandas.io.common import URLError
-from pandas.io.excel import (
- ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter,
- read_excel, register_writer)
-from pandas.io.formats.excel import ExcelFormatter
-from pandas.io.parsers import read_csv
-
-_seriesd = tm.getSeriesData()
-_tsd = tm.getTimeSeriesData()
-_frame = DataFrame(_seriesd)[:10]
-_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
-_tsframe = tm.makeTimeDataFrame()[:5]
-_mixed_frame = _frame.copy()
-_mixed_frame['foo'] = 'bar'
-
-
-@contextlib.contextmanager
-def ignore_xlrd_time_clock_warning():
- """
- Context manager to ignore warnings raised by the xlrd library,
- regarding the deprecation of `time.clock` in Python 3.7.
- """
- with warnings.catch_warnings():
- warnings.filterwarnings(
- action='ignore',
- message='time.clock has been deprecated',
- category=DeprecationWarning)
- yield
-
-
-@td.skip_if_no('xlrd', '1.0.0')
-class SharedItems(object):
-
- @pytest.fixture(autouse=True)
- def setup_method(self, datapath):
- self.dirpath = datapath("io", "data")
- self.frame = _frame.copy()
- self.frame2 = _frame2.copy()
- self.tsframe = _tsframe.copy()
- self.mixed_frame = _mixed_frame.copy()
-
- def get_csv_refdf(self, basename):
- """
- Obtain the reference data from read_csv with the Python engine.
-
- Parameters
- ----------
-
- basename : str
- File base name, excluding file extension.
-
- Returns
- -------
-
- dfref : DataFrame
- """
- pref = os.path.join(self.dirpath, basename + '.csv')
- dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
- return dfref
-
- def get_excelfile(self, basename, ext):
- """
- Return test data ExcelFile instance.
-
- Parameters
- ----------
-
- basename : str
- File base name, excluding file extension.
-
- Returns
- -------
-
- excel : io.excel.ExcelFile
- """
- return ExcelFile(os.path.join(self.dirpath, basename + ext))
-
- def get_exceldf(self, basename, ext, *args, **kwds):
- """
- Return test data DataFrame.
-
- Parameters
- ----------
-
- basename : str
- File base name, excluding file extension.
-
- Returns
- -------
-
- df : DataFrame
- """
- pth = os.path.join(self.dirpath, basename + ext)
- return read_excel(pth, *args, **kwds)
-
-
-class ReadingTestsBase(SharedItems):
- # This is based on ExcelWriterBase
-
- @pytest.fixture(autouse=True, params=['xlrd', None])
- def set_engine(self, request):
- func_name = "get_exceldf"
- old_func = getattr(self, func_name)
- new_func = partial(old_func, engine=request.param)
- setattr(self, func_name, new_func)
- yield
- setattr(self, func_name, old_func)
-
- @td.skip_if_no("xlrd", "1.0.1") # see gh-22682
- def test_usecols_int(self, ext):
-
- df_ref = self.get_csv_refdf("test1")
- df_ref = df_ref.reindex(columns=["A", "B", "C"])
-
- # usecols as int
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with ignore_xlrd_time_clock_warning():
- df1 = self.get_exceldf("test1", ext, "Sheet1",
- index_col=0, usecols=3)
-
- # usecols as int
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with ignore_xlrd_time_clock_warning():
- df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
- index_col=0, usecols=3)
-
- # parse_cols instead of usecols, usecols as int
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with ignore_xlrd_time_clock_warning():
- df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
- index_col=0, parse_cols=3)
-
- # TODO add index to xls file)
- tm.assert_frame_equal(df1, df_ref, check_names=False)
- tm.assert_frame_equal(df2, df_ref, check_names=False)
- tm.assert_frame_equal(df3, df_ref, check_names=False)
-
- @td.skip_if_no('xlrd', '1.0.1') # GH-22682
- def test_usecols_list(self, ext):
-
- dfref = self.get_csv_refdf('test1')
- dfref = dfref.reindex(columns=['B', 'C'])
- df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
- usecols=[0, 2, 3])
- df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, usecols=[0, 2, 3])
-
- with tm.assert_produces_warning(FutureWarning):
- with ignore_xlrd_time_clock_warning():
- df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, parse_cols=[0, 2, 3])
-
- # TODO add index to xls file)
- tm.assert_frame_equal(df1, dfref, check_names=False)
- tm.assert_frame_equal(df2, dfref, check_names=False)
- tm.assert_frame_equal(df3, dfref, check_names=False)
-
- @td.skip_if_no('xlrd', '1.0.1') # GH-22682
- def test_usecols_str(self, ext):
-
- dfref = self.get_csv_refdf('test1')
-
- df1 = dfref.reindex(columns=['A', 'B', 'C'])
- df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
- usecols='A:D')
- df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, usecols='A:D')
-
- with tm.assert_produces_warning(FutureWarning):
- with ignore_xlrd_time_clock_warning():
- df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, parse_cols='A:D')
-
- # TODO add index to xls, read xls ignores index name ?
- tm.assert_frame_equal(df2, df1, check_names=False)
- tm.assert_frame_equal(df3, df1, check_names=False)
- tm.assert_frame_equal(df4, df1, check_names=False)
-
- df1 = dfref.reindex(columns=['B', 'C'])
- df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
- usecols='A,C,D')
- df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, usecols='A,C,D')
- # TODO add index to xls file
- tm.assert_frame_equal(df2, df1, check_names=False)
- tm.assert_frame_equal(df3, df1, check_names=False)
-
- df1 = dfref.reindex(columns=['B', 'C'])
- df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
- usecols='A,C:D')
- df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0, usecols='A,C:D')
- tm.assert_frame_equal(df2, df1, check_names=False)
- tm.assert_frame_equal(df3, df1, check_names=False)
-
- @pytest.mark.parametrize("usecols", [
- [0, 1, 3], [0, 3, 1],
- [1, 0, 3], [1, 3, 0],
- [3, 0, 1], [3, 1, 0],
- ])
- def test_usecols_diff_positional_int_columns_order(self, ext, usecols):
- expected = self.get_csv_refdf("test1")[["A", "C"]]
- result = self.get_exceldf("test1", ext, "Sheet1",
- index_col=0, usecols=usecols)
- tm.assert_frame_equal(result, expected, check_names=False)
-
- @pytest.mark.parametrize("usecols", [
- ["B", "D"], ["D", "B"]
- ])
- def test_usecols_diff_positional_str_columns_order(self, ext, usecols):
- expected = self.get_csv_refdf("test1")[["B", "D"]]
- expected.index = range(len(expected))
-
- result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols)
- tm.assert_frame_equal(result, expected, check_names=False)
-
- def test_read_excel_without_slicing(self, ext):
- expected = self.get_csv_refdf("test1")
- result = self.get_exceldf("test1", ext, "Sheet1", index_col=0)
- tm.assert_frame_equal(result, expected, check_names=False)
-
- def test_usecols_excel_range_str(self, ext):
- expected = self.get_csv_refdf("test1")[["C", "D"]]
- result = self.get_exceldf("test1", ext, "Sheet1",
- index_col=0, usecols="A,D:E")
- tm.assert_frame_equal(result, expected, check_names=False)
-
- def test_usecols_excel_range_str_invalid(self, ext):
- msg = "Invalid column name: E1"
-
- with pytest.raises(ValueError, match=msg):
- self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1")
-
- def test_index_col_label_error(self, ext):
- msg = "list indices must be integers.*, not str"
-
- with pytest.raises(TypeError, match=msg):
- self.get_exceldf("test1", ext, "Sheet1", index_col=["A"],
- usecols=["A", "C"])
-
- def test_index_col_empty(self, ext):
- # see gh-9208
- result = self.get_exceldf("test1", ext, "Sheet3",
- index_col=["A", "B", "C"])
- expected = DataFrame(columns=["D", "E", "F"],
- index=MultiIndex(levels=[[]] * 3,
- codes=[[]] * 3,
- names=["A", "B", "C"]))
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("index_col", [None, 2])
- def test_index_col_with_unnamed(self, ext, index_col):
- # see gh-18792
- result = self.get_exceldf("test1", ext, "Sheet4",
- index_col=index_col)
- expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]],
- columns=["Unnamed: 0", "col1", "col2"])
- if index_col:
- expected = expected.set_index(expected.columns[index_col])
-
- tm.assert_frame_equal(result, expected)
-
- def test_usecols_pass_non_existent_column(self, ext):
- msg = ("Usecols do not match columns, "
- "columns expected but not found: " + r"\['E'\]")
-
- with pytest.raises(ValueError, match=msg):
- self.get_exceldf("test1", ext, usecols=["E"])
-
- def test_usecols_wrong_type(self, ext):
- msg = ("'usecols' must either be list-like of "
- "all strings, all unicode, all integers or a callable.")
-
- with pytest.raises(ValueError, match=msg):
- self.get_exceldf("test1", ext, usecols=["E1", 0])
-
- def test_excel_stop_iterator(self, ext):
-
- parsed = self.get_exceldf('test2', ext, 'Sheet1')
- expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
- tm.assert_frame_equal(parsed, expected)
-
- def test_excel_cell_error_na(self, ext):
-
- parsed = self.get_exceldf('test3', ext, 'Sheet1')
- expected = DataFrame([[np.nan]], columns=['Test'])
- tm.assert_frame_equal(parsed, expected)
-
- def test_excel_passes_na(self, ext):
-
- excel = self.get_excelfile('test4', ext)
-
- parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
- na_values=['apple'])
- expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
- columns=['Test'])
- tm.assert_frame_equal(parsed, expected)
-
- parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
- na_values=['apple'])
- expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
- columns=['Test'])
- tm.assert_frame_equal(parsed, expected)
-
- # 13967
- excel = self.get_excelfile('test5', ext)
-
- parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
- na_values=['apple'])
- expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
- columns=['Test'])
- tm.assert_frame_equal(parsed, expected)
-
- parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
- na_values=['apple'])
- expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
- columns=['Test'])
- tm.assert_frame_equal(parsed, expected)
-
- @td.skip_if_no('xlrd', '1.0.1') # GH-22682
- def test_deprecated_sheetname(self, ext):
- # gh-17964
- excel = self.get_excelfile('test1', ext)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- read_excel(excel, sheetname='Sheet1')
-
- with pytest.raises(TypeError):
- read_excel(excel, sheet='Sheet1')
-
- @td.skip_if_no('xlrd', '1.0.1') # GH-22682
- def test_excel_table_sheet_by_index(self, ext):
-
- excel = self.get_excelfile('test1', ext)
- dfref = self.get_csv_refdf('test1')
-
- df1 = read_excel(excel, 0, index_col=0)
- df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
- tm.assert_frame_equal(df1, dfref, check_names=False)
- tm.assert_frame_equal(df2, dfref, check_names=False)
-
- df1 = excel.parse(0, index_col=0)
- df2 = excel.parse(1, skiprows=[1], index_col=0)
- tm.assert_frame_equal(df1, dfref, check_names=False)
- tm.assert_frame_equal(df2, dfref, check_names=False)
-
- df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
- tm.assert_frame_equal(df3, df1.iloc[:-1])
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
- tm.assert_frame_equal(df3, df4)
-
- df3 = excel.parse(0, index_col=0, skipfooter=1)
- tm.assert_frame_equal(df3, df1.iloc[:-1])
-
- import xlrd
- with pytest.raises(xlrd.XLRDError):
- read_excel(excel, 'asdf')
-
- def test_excel_table(self, ext):
-
- dfref = self.get_csv_refdf('test1')
-
- df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
- df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
- index_col=0)
- # TODO add index to file
- tm.assert_frame_equal(df1, dfref, check_names=False)
- tm.assert_frame_equal(df2, dfref, check_names=False)
-
- df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
- skipfooter=1)
- tm.assert_frame_equal(df3, df1.iloc[:-1])
-
- def test_reader_special_dtypes(self, ext):
-
- expected = DataFrame.from_dict(OrderedDict([
- ("IntCol", [1, 2, -3, 4, 0]),
- ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
- ("BoolCol", [True, False, True, True, False]),
- ("StrCol", [1, 2, 3, 4, 5]),
- # GH5394 - this is why convert_float isn't vectorized
- ("Str2Col", ["a", 3, "c", "d", "e"]),
- ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
- datetime(1905, 1, 1), datetime(2013, 12, 14),
- datetime(2015, 3, 14)])
- ]))
- basename = 'test_types'
-
- # should read in correctly and infer types
- actual = self.get_exceldf(basename, ext, 'Sheet1')
- tm.assert_frame_equal(actual, expected)
-
- # if not coercing number, then int comes in as float
- float_expected = expected.copy()
- float_expected["IntCol"] = float_expected["IntCol"].astype(float)
- float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
- actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
- tm.assert_frame_equal(actual, float_expected)
-
- # check setting Index (assuming xls and xlsx are the same here)
- for icol, name in enumerate(expected.columns):
- actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
- exp = expected.set_index(name)
- tm.assert_frame_equal(actual, exp)
-
- # convert_float and converters should be different but both accepted
- expected["StrCol"] = expected["StrCol"].apply(str)
- actual = self.get_exceldf(
- basename, ext, 'Sheet1', converters={"StrCol": str})
- tm.assert_frame_equal(actual, expected)
-
- no_convert_float = float_expected.copy()
- no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
- actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
- converters={"StrCol": str})
- tm.assert_frame_equal(actual, no_convert_float)
-
- # GH8212 - support for converters and missing values
- def test_reader_converters(self, ext):
-
- basename = 'test_converters'
-
- expected = DataFrame.from_dict(OrderedDict([
- ("IntCol", [1, 2, -3, -1000, 0]),
- ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
- ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
- ("StrCol", ['1', np.nan, '3', '4', '5']),
- ]))
-
- converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
- 'FloatCol': lambda x: 10 * x if x else np.nan,
- 2: lambda x: 'Found' if x != '' else 'Not found',
- 3: lambda x: str(x) if x else '',
- }
-
- # should read in correctly and set types of single cells (not array
- # dtypes)
- actual = self.get_exceldf(basename, ext, 'Sheet1',
- converters=converters)
- tm.assert_frame_equal(actual, expected)
-
- def test_reader_dtype(self, ext):
- # GH 8212
- basename = 'testdtype'
- actual = self.get_exceldf(basename, ext)
-
- expected = DataFrame({
- 'a': [1, 2, 3, 4],
- 'b': [2.5, 3.5, 4.5, 5.5],
- 'c': [1, 2, 3, 4],
- 'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
- columns=['a', 'b', 'c', 'd'])
-
- tm.assert_frame_equal(actual, expected)
-
- actual = self.get_exceldf(basename, ext,
- dtype={'a': 'float64',
- 'b': 'float32',
- 'c': str})
-
- expected['a'] = expected['a'].astype('float64')
- expected['b'] = expected['b'].astype('float32')
- expected['c'] = ['001', '002', '003', '004']
- tm.assert_frame_equal(actual, expected)
-
- with pytest.raises(ValueError):
- self.get_exceldf(basename, ext, dtype={'d': 'int64'})
-
- @pytest.mark.parametrize("dtype,expected", [
- (None,
- DataFrame({
- "a": [1, 2, 3, 4],
- "b": [2.5, 3.5, 4.5, 5.5],
- "c": [1, 2, 3, 4],
- "d": [1.0, 2.0, np.nan, 4.0]
- })),
- ({"a": "float64",
- "b": "float32",
- "c": str,
- "d": str
- },
- DataFrame({
- "a": Series([1, 2, 3, 4], dtype="float64"),
- "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
- "c": ["001", "002", "003", "004"],
- "d": ["1", "2", np.nan, "4"]
- })),
- ])
- def test_reader_dtype_str(self, ext, dtype, expected):
- # see gh-20377
- basename = "testdtype"
-
- actual = self.get_exceldf(basename, ext, dtype=dtype)
- tm.assert_frame_equal(actual, expected)
-
- def test_reading_all_sheets(self, ext):
- # Test reading all sheetnames by setting sheetname to None,
- # Ensure a dict is returned.
- # See PR #9450
- basename = 'test_multisheet'
- dfs = self.get_exceldf(basename, ext, sheet_name=None)
- # ensure this is not alphabetical to test order preservation
- expected_keys = ['Charlie', 'Alpha', 'Beta']
- tm.assert_contains_all(expected_keys, dfs.keys())
- # Issue 9930
- # Ensure sheet order is preserved
- assert expected_keys == list(dfs.keys())
-
- def test_reading_multiple_specific_sheets(self, ext):
- # Test reading specific sheetnames by specifying a mixed list
- # of integers and strings, and confirm that duplicated sheet
- # references (positions/names) are removed properly.
- # Ensure a dict is returned
- # See PR #9450
- basename = 'test_multisheet'
- # Explicitly request duplicates. Only the set should be returned.
- expected_keys = [2, 'Charlie', 'Charlie']
- dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
- expected_keys = list(set(expected_keys))
- tm.assert_contains_all(expected_keys, dfs.keys())
- assert len(expected_keys) == len(dfs.keys())
-
- def test_reading_all_sheets_with_blank(self, ext):
- # Test reading all sheetnames by setting sheetname to None,
- # In the case where some sheets are blank.
- # Issue #11711
- basename = 'blank_with_header'
- dfs = self.get_exceldf(basename, ext, sheet_name=None)
- expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
- tm.assert_contains_all(expected_keys, dfs.keys())
-
- # GH6403
- def test_read_excel_blank(self, ext):
- actual = self.get_exceldf('blank', ext, 'Sheet1')
- tm.assert_frame_equal(actual, DataFrame())
-
- def test_read_excel_blank_with_header(self, ext):
- expected = DataFrame(columns=['col_1', 'col_2'])
- actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
- tm.assert_frame_equal(actual, expected)
-
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- @pytest.mark.parametrize("header,expected", [
- (None, DataFrame([np.nan] * 4)),
- (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
- ])
- def test_read_one_empty_col_no_header(self, ext, header, expected):
- # xref gh-12292
- filename = "no_header"
- df = pd.DataFrame(
- [["", 1, 100],
- ["", 2, 200],
- ["", 3, 300],
- ["", 4, 400]]
- )
-
- with ensure_clean(ext) as path:
- df.to_excel(path, filename, index=False, header=False)
- result = read_excel(path, filename, usecols=[0], header=header)
-
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- @pytest.mark.parametrize("header,expected", [
- (None, DataFrame([0] + [np.nan] * 4)),
- (0, DataFrame([np.nan] * 4))
- ])
- def test_read_one_empty_col_with_header(self, ext, header, expected):
- filename = "with_header"
- df = pd.DataFrame(
- [["", 1, 100],
- ["", 2, 200],
- ["", 3, 300],
- ["", 4, 400]]
- )
-
- with ensure_clean(ext) as path:
- df.to_excel(path, 'with_header', index=False, header=True)
- result = read_excel(path, filename, usecols=[0], header=header)
-
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no('openpyxl')
- @td.skip_if_no('xlwt')
- def test_set_column_names_in_parameter(self, ext):
- # GH 12870 : pass down column names associated with
- # keyword argument names
- refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
- [3, 'baz']], columns=['a', 'b'])
-
- with ensure_clean(ext) as pth:
- with ExcelWriter(pth) as writer:
- refdf.to_excel(writer, 'Data_no_head',
- header=False, index=False)
- refdf.to_excel(writer, 'Data_with_head', index=False)
-
- refdf.columns = ['A', 'B']
-
- with ExcelFile(pth) as reader:
- xlsdf_no_head = read_excel(reader, 'Data_no_head',
- header=None, names=['A', 'B'])
- xlsdf_with_head = read_excel(reader, 'Data_with_head',
- index_col=None, names=['A', 'B'])
-
- tm.assert_frame_equal(xlsdf_no_head, refdf)
- tm.assert_frame_equal(xlsdf_with_head, refdf)
-
- def test_date_conversion_overflow(self, ext):
- # GH 10001 : pandas.ExcelFile ignore parse_dates=False
- expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
- [pd.Timestamp('2016-03-16'), 'Jack Black'],
- [1e+20, 'Timothy Brown']],
- columns=['DateColWithBigInt', 'StringCol'])
-
- result = self.get_exceldf('testdateoverflow', ext)
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no("xlrd", "1.0.1") # see gh-22682
- def test_sheet_name_and_sheetname(self, ext):
- # gh-10559: Minor improvement: Change "sheet_name" to "sheetname"
- # gh-10969: DOC: Consistent var names (sheetname vs sheet_name)
- # gh-12604: CLN GH10559 Rename sheetname variable to sheet_name
- # gh-20920: ExcelFile.parse() and pd.read_xlsx() have different
- # behavior for "sheetname" argument
- filename = "test1"
- sheet_name = "Sheet1"
-
- df_ref = self.get_csv_refdf(filename)
- df1 = self.get_exceldf(filename, ext,
- sheet_name=sheet_name, index_col=0) # doc
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- with ignore_xlrd_time_clock_warning():
- df2 = self.get_exceldf(filename, ext, index_col=0,
- sheetname=sheet_name) # backward compat
-
- excel = self.get_excelfile(filename, ext)
- df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df2_parse = excel.parse(index_col=0,
- sheetname=sheet_name) # backward compat
-
- tm.assert_frame_equal(df1, df_ref, check_names=False)
- tm.assert_frame_equal(df2, df_ref, check_names=False)
- tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
- tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
-
- def test_sheet_name_both_raises(self, ext):
- with pytest.raises(TypeError, match="Cannot specify both"):
- self.get_exceldf('test1', ext, sheetname='Sheet1',
- sheet_name='Sheet1')
-
- excel = self.get_excelfile('test1', ext)
- with pytest.raises(TypeError, match="Cannot specify both"):
- excel.parse(sheetname='Sheet1',
- sheet_name='Sheet1')
-
- def test_excel_read_buffer(self, ext):
-
- pth = os.path.join(self.dirpath, 'test1' + ext)
- expected = read_excel(pth, 'Sheet1', index_col=0)
- with open(pth, 'rb') as f:
- actual = read_excel(f, 'Sheet1', index_col=0)
- tm.assert_frame_equal(expected, actual)
-
- with open(pth, 'rb') as f:
- xls = ExcelFile(f)
- actual = read_excel(xls, 'Sheet1', index_col=0)
- tm.assert_frame_equal(expected, actual)
-
- def test_bad_engine_raises(self, ext):
- bad_engine = 'foo'
- with pytest.raises(ValueError, match="Unknown engine: foo"):
- read_excel('', engine=bad_engine)
-
- @tm.network
- def test_read_from_http_url(self, ext):
- url = ('https://raw.github.com/pandas-dev/pandas/master/'
- 'pandas/tests/io/data/test1' + ext)
- url_table = read_excel(url)
- local_table = self.get_exceldf('test1', ext)
- tm.assert_frame_equal(url_table, local_table)
-
- @td.skip_if_not_us_locale
- def test_read_from_s3_url(self, ext, s3_resource):
- # Bucket "pandas-test" created in tests/io/conftest.py
- file_name = os.path.join(self.dirpath, 'test1' + ext)
-
- with open(file_name, "rb") as f:
- s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext,
- Body=f)
-
- url = ('s3://pandas-test/test1' + ext)
- url_table = read_excel(url)
- local_table = self.get_exceldf('test1', ext)
- tm.assert_frame_equal(url_table, local_table)
-
- @pytest.mark.slow
- # ignore warning from old xlrd
- @pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
- def test_read_from_file_url(self, ext):
-
- # FILE
- localtable = os.path.join(self.dirpath, 'test1' + ext)
- local_table = read_excel(localtable)
-
- try:
- url_table = read_excel('file://localhost/' + localtable)
- except URLError:
- # fails on some systems
- import platform
- pytest.skip("failing on %s" %
- ' '.join(platform.uname()).strip())
-
- tm.assert_frame_equal(url_table, local_table)
-
- @td.skip_if_no('pathlib')
- def test_read_from_pathlib_path(self, ext):
-
- # GH12655
- from pathlib import Path
-
- str_path = os.path.join(self.dirpath, 'test1' + ext)
- expected = read_excel(str_path, 'Sheet1', index_col=0)
-
- path_obj = Path(self.dirpath, 'test1' + ext)
- actual = read_excel(path_obj, 'Sheet1', index_col=0)
-
- tm.assert_frame_equal(expected, actual)
-
- @td.skip_if_no('py.path')
- def test_read_from_py_localpath(self, ext):
-
- # GH12655
- from py.path import local as LocalPath
-
- str_path = os.path.join(self.dirpath, 'test1' + ext)
- expected = read_excel(str_path, 'Sheet1', index_col=0)
-
- abs_dir = os.path.abspath(self.dirpath)
- path_obj = LocalPath(abs_dir).join('test1' + ext)
- actual = read_excel(path_obj, 'Sheet1', index_col=0)
-
- tm.assert_frame_equal(expected, actual)
-
- def test_reader_closes_file(self, ext):
-
- pth = os.path.join(self.dirpath, 'test1' + ext)
- f = open(pth, 'rb')
- with ExcelFile(f) as xlsx:
- # parses okay
- read_excel(xlsx, 'Sheet1', index_col=0)
-
- assert f.closed
-
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- def test_creating_and_reading_multiple_sheets(self, ext):
- # see gh-9450
- #
- # Test reading multiple sheets, from a runtime
- # created Excel file with multiple sheets.
- def tdf(col_sheet_name):
- d, i = [11, 22, 33], [1, 2, 3]
- return DataFrame(d, i, columns=[col_sheet_name])
-
- sheets = ["AAA", "BBB", "CCC"]
-
- dfs = [tdf(s) for s in sheets]
- dfs = dict(zip(sheets, dfs))
-
- with ensure_clean(ext) as pth:
- with ExcelWriter(pth) as ew:
- for sheetname, df in iteritems(dfs):
- df.to_excel(ew, sheetname)
-
- dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
-
- for s in sheets:
- tm.assert_frame_equal(dfs[s], dfs_returned[s])
-
- def test_reader_seconds(self, ext):
-
- # Test reading times with and without milliseconds. GH5945.
- expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
- time(2, 45, 56, 100000),
- time(4, 29, 49, 200000),
- time(6, 13, 42, 300000),
- time(7, 57, 35, 400000),
- time(9, 41, 28, 500000),
- time(11, 25, 21, 600000),
- time(13, 9, 14, 700000),
- time(14, 53, 7, 800000),
- time(16, 37, 0, 900000),
- time(18, 20, 54)]})
-
- actual = self.get_exceldf('times_1900', ext, 'Sheet1')
- tm.assert_frame_equal(actual, expected)
-
- actual = self.get_exceldf('times_1904', ext, 'Sheet1')
- tm.assert_frame_equal(actual, expected)
-
- def test_read_excel_multiindex(self, ext):
- # see gh-4679
- mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
- mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
-
- # "mi_column" sheet
- expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True],
- [2, 3.5, pd.Timestamp("2015-01-02"), False],
- [3, 4.5, pd.Timestamp("2015-01-03"), False],
- [4, 5.5, pd.Timestamp("2015-01-04"), True]],
- columns=mi)
-
- actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- # "mi_index" sheet
- expected.index = mi
- expected.columns = ["a", "b", "c", "d"]
-
- actual = read_excel(mi_file, "mi_index", index_col=[0, 1])
- tm.assert_frame_equal(actual, expected, check_names=False)
-
- # "both" sheet
- expected.columns = mi
-
- actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
- tm.assert_frame_equal(actual, expected, check_names=False)
-
- # "mi_index_name" sheet
- expected.columns = ["a", "b", "c", "d"]
- expected.index = mi.set_names(["ilvl1", "ilvl2"])
-
- actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1])
- tm.assert_frame_equal(actual, expected)
-
- # "mi_column_name" sheet
- expected.index = list(range(4))
- expected.columns = mi.set_names(["c1", "c2"])
- actual = read_excel(mi_file, "mi_column_name",
- header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- # see gh-11317
- # "name_with_int" sheet
- expected.columns = mi.set_levels(
- [1, 2], level=1).set_names(["c1", "c2"])
-
- actual = read_excel(mi_file, "name_with_int",
- index_col=0, header=[0, 1])
- tm.assert_frame_equal(actual, expected)
-
- # "both_name" sheet
- expected.columns = mi.set_names(["c1", "c2"])
- expected.index = mi.set_names(["ilvl1", "ilvl2"])
-
- actual = read_excel(mi_file, "both_name",
- index_col=[0, 1], header=[0, 1])
- tm.assert_frame_equal(actual, expected)
-
- # "both_skiprows" sheet
- actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1],
- header=[0, 1], skiprows=2)
- tm.assert_frame_equal(actual, expected)
-
- def test_read_excel_multiindex_header_only(self, ext):
- # see gh-11733.
- #
- # Don't try to parse a header name if there isn't one.
- mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
- result = read_excel(mi_file, "index_col_none", header=[0, 1])
-
- exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
- expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no("xlsxwriter")
- def test_read_excel_multiindex_empty_level(self, ext):
- # see gh-12453
- with ensure_clean(ext) as path:
- df = DataFrame({
- ("One", "x"): {0: 1},
- ("Two", "X"): {0: 3},
- ("Two", "Y"): {0: 7},
- ("Zero", ""): {0: 0}
- })
-
- expected = DataFrame({
- ("One", "x"): {0: 1},
- ("Two", "X"): {0: 3},
- ("Two", "Y"): {0: 7},
- ("Zero", "Unnamed: 4_level_1"): {0: 0}
- })
-
- df.to_excel(path)
- actual = pd.read_excel(path, header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- df = pd.DataFrame({
- ("Beg", ""): {0: 0},
- ("Middle", "x"): {0: 1},
- ("Tail", "X"): {0: 3},
- ("Tail", "Y"): {0: 7}
- })
-
- expected = pd.DataFrame({
- ("Beg", "Unnamed: 1_level_1"): {0: 0},
- ("Middle", "x"): {0: 1},
- ("Tail", "X"): {0: 3},
- ("Tail", "Y"): {0: 7}
- })
-
- df.to_excel(path)
- actual = pd.read_excel(path, header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- @td.skip_if_no("xlsxwriter")
- @pytest.mark.parametrize("c_idx_names", [True, False])
- @pytest.mark.parametrize("r_idx_names", [True, False])
- @pytest.mark.parametrize("c_idx_levels", [1, 3])
- @pytest.mark.parametrize("r_idx_levels", [1, 3])
- def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
- c_idx_levels, r_idx_levels):
- # see gh-4679
- with ensure_clean(ext) as pth:
- if c_idx_levels == 1 and c_idx_names:
- pytest.skip("Column index name cannot be "
- "serialized unless it's a MultiIndex")
-
- # Empty name case current read in as
- # unnamed levels, not Nones.
- check_names = r_idx_names or r_idx_levels <= 1
-
- df = mkdf(5, 5, c_idx_names, r_idx_names,
- c_idx_levels, r_idx_levels)
- df.to_excel(pth)
-
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
- df.iloc[0, :] = np.nan
- df.to_excel(pth)
-
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
- df.iloc[-1, :] = np.nan
- df.to_excel(pth)
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
- def test_excel_old_index_format(self, ext):
- # see gh-4679
- filename = "test_index_name_pre17" + ext
- in_file = os.path.join(self.dirpath, filename)
-
- # We detect headers to determine if index names exist, so
- # that "index" name in the "names" version of the data will
- # now be interpreted as rows that include null data.
- data = np.array([[None, None, None, None, None],
- ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
- ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
- ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
- ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
- ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
- columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
- mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1",
- "R_l0_g2", "R_l0_g3", "R_l0_g4"],
- ["R1", "R_l1_g0", "R_l1_g1",
- "R_l1_g2", "R_l1_g3", "R_l1_g4"]],
- codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
- names=[None, None])
- si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2",
- "R_l0_g3", "R_l0_g4"], name=None)
-
- expected = pd.DataFrame(data, index=si, columns=columns)
-
- actual = pd.read_excel(in_file, "single_names", index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- expected.index = mi
-
- actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1])
- tm.assert_frame_equal(actual, expected)
-
- # The analogous versions of the "names" version data
- # where there are explicitly no names for the indices.
- data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
- ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
- ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
- ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
- ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
- columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
- mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2",
- "R_l0_g3", "R_l0_g4"],
- ["R_l1_g0", "R_l1_g1", "R_l1_g2",
- "R_l1_g3", "R_l1_g4"]],
- codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
- names=[None, None])
- si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2",
- "R_l0_g3", "R_l0_g4"], name=None)
-
- expected = pd.DataFrame(data, index=si, columns=columns)
-
- actual = pd.read_excel(in_file, "single_no_names", index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- expected.index = mi
-
- actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1])
- tm.assert_frame_equal(actual, expected, check_names=False)
-
- def test_read_excel_bool_header_arg(self, ext):
- # GH 6114
- for arg in [True, False]:
- with pytest.raises(TypeError):
- pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
- header=arg)
-
- def test_read_excel_chunksize(self, ext):
- # GH 8011
- with pytest.raises(NotImplementedError):
- pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
- chunksize=100)
-
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- def test_read_excel_parse_dates(self, ext):
- # see gh-11544, gh-12051
- df = DataFrame(
- {"col": [1, 2, 3],
- "date_strings": pd.date_range("2012-01-01", periods=3)})
- df2 = df.copy()
- df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
-
- with ensure_clean(ext) as pth:
- df2.to_excel(pth)
-
- res = read_excel(pth, index_col=0)
- tm.assert_frame_equal(df2, res)
-
- res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
- tm.assert_frame_equal(df, res)
-
- date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
- res = read_excel(pth, parse_dates=["date_strings"],
- date_parser=date_parser, index_col=0)
- tm.assert_frame_equal(df, res)
-
- def test_read_excel_skiprows_list(self, ext):
- # GH 4903
- actual = pd.read_excel(os.path.join(self.dirpath,
- 'testskiprows' + ext),
- 'skiprows_list', skiprows=[0, 2])
- expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
- [2, 3.5, pd.Timestamp('2015-01-02'), False],
- [3, 4.5, pd.Timestamp('2015-01-03'), False],
- [4, 5.5, pd.Timestamp('2015-01-04'), True]],
- columns=['a', 'b', 'c', 'd'])
- tm.assert_frame_equal(actual, expected)
-
- actual = pd.read_excel(os.path.join(self.dirpath,
- 'testskiprows' + ext),
- 'skiprows_list', skiprows=np.array([0, 2]))
- tm.assert_frame_equal(actual, expected)
-
- def test_read_excel_nrows(self, ext):
- # GH 16645
- num_rows_to_pull = 5
- actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
- nrows=num_rows_to_pull)
- expected = pd.read_excel(os.path.join(self.dirpath,
- 'test1' + ext))
- expected = expected[:num_rows_to_pull]
- tm.assert_frame_equal(actual, expected)
-
- def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
- # GH 16645
- expected = pd.read_excel(os.path.join(self.dirpath,
- 'test1' + ext))
- num_records_in_file = len(expected)
- num_rows_to_pull = num_records_in_file + 10
- actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
- nrows=num_rows_to_pull)
- tm.assert_frame_equal(actual, expected)
-
- def test_read_excel_nrows_non_integer_parameter(self, ext):
- # GH 16645
- msg = "'nrows' must be an integer >=0"
- with pytest.raises(ValueError, match=msg):
- pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
- nrows='5')
-
- def test_read_excel_squeeze(self, ext):
- # GH 12157
- f = os.path.join(self.dirpath, 'test_squeeze' + ext)
-
- actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
- expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
- expected.index.name = 'a'
- tm.assert_series_equal(actual, expected)
-
- actual = pd.read_excel(f, 'two_columns', squeeze=True)
- expected = pd.DataFrame({'a': [4, 5, 6],
- 'b': [2, 3, 4]})
- tm.assert_frame_equal(actual, expected)
-
- actual = pd.read_excel(f, 'one_column', squeeze=True)
- expected = pd.Series([1, 2, 3], name='a')
- tm.assert_series_equal(actual, expected)
-
-
-@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
-class TestXlrdReader(ReadingTestsBase):
- """
- This is the base class for the xlrd tests, and 3 different file formats
- are supported: xls, xlsx, xlsm
- """
-
- @td.skip_if_no("xlwt")
- def test_read_xlrd_book(self, ext):
- import xlrd
- df = self.frame
-
- engine = "xlrd"
- sheet_name = "SheetA"
-
- with ensure_clean(ext) as pth:
- df.to_excel(pth, sheet_name)
- book = xlrd.open_workbook(pth)
-
- with ExcelFile(book, engine=engine) as xl:
- result = read_excel(xl, sheet_name, index_col=0)
- tm.assert_frame_equal(df, result)
-
- result = read_excel(book, sheet_name=sheet_name,
- engine=engine, index_col=0)
- tm.assert_frame_equal(df, result)
-
-
-class _WriterBase(SharedItems):
-
- @pytest.fixture(autouse=True)
- def set_engine_and_path(self, request, merge_cells, engine, ext):
- """Fixture to set engine and open file for use in each test case
-
- Rather than requiring `engine=...` to be provided explicitly as an
- argument in each test, this fixture sets a global option to dictate
- which engine should be used to write Excel files. After executing
- the test it rolls back said change to the global option.
-
- It also uses a context manager to open a temporary excel file for
- the function to write to, accessible via `self.path`
-
- Notes
- -----
- This fixture will run as part of each test method defined in the
- class and any subclasses, on account of the `autouse=True`
- argument
- """
- option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
- prev_engine = get_option(option_name)
- set_option(option_name, engine)
- with ensure_clean(ext) as path:
- self.path = path
- yield
- set_option(option_name, prev_engine) # Roll back option change
-
-
-@pytest.mark.parametrize("merge_cells", [True, False])
-@pytest.mark.parametrize("engine,ext", [
- pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
- not td.safe_import('openpyxl'), reason='No openpyxl')),
- pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
- not td.safe_import('openpyxl'), reason='No openpyxl')),
- pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
- not td.safe_import('xlwt'), reason='No xlwt')),
- pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
- not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
-])
-class TestExcelWriter(_WriterBase):
- # Base class for test cases to run with different Excel writers.
-
- def test_excel_sheet_by_name_raise(self, *_):
- import xlrd
-
- gt = DataFrame(np.random.randn(10, 2))
- gt.to_excel(self.path)
-
- xl = ExcelFile(self.path)
- df = read_excel(xl, 0, index_col=0)
-
- tm.assert_frame_equal(gt, df)
-
- with pytest.raises(xlrd.XLRDError):
- read_excel(xl, "0")
-
- def test_excel_writer_context_manager(self, *_):
- with ExcelWriter(self.path) as writer:
- self.frame.to_excel(writer, "Data1")
- self.frame2.to_excel(writer, "Data2")
-
- with ExcelFile(self.path) as reader:
- found_df = read_excel(reader, "Data1", index_col=0)
- found_df2 = read_excel(reader, "Data2", index_col=0)
-
- tm.assert_frame_equal(found_df, self.frame)
- tm.assert_frame_equal(found_df2, self.frame2)
-
- def test_roundtrip(self, merge_cells, engine, ext):
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(self.path, 'test1')
- self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
- self.frame.to_excel(self.path, 'test1', header=False)
- self.frame.to_excel(self.path, 'test1', index=False)
-
- # test roundtrip
- self.frame.to_excel(self.path, 'test1')
- recons = read_excel(self.path, 'test1', index_col=0)
- tm.assert_frame_equal(self.frame, recons)
-
- self.frame.to_excel(self.path, 'test1', index=False)
- recons = read_excel(self.path, 'test1', index_col=None)
- recons.index = self.frame.index
- tm.assert_frame_equal(self.frame, recons)
-
- self.frame.to_excel(self.path, 'test1', na_rep='NA')
- recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
- tm.assert_frame_equal(self.frame, recons)
-
- # GH 3611
- self.frame.to_excel(self.path, 'test1', na_rep='88')
- recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
- tm.assert_frame_equal(self.frame, recons)
-
- self.frame.to_excel(self.path, 'test1', na_rep='88')
- recons = read_excel(self.path, 'test1', index_col=0,
- na_values=[88, 88.0])
- tm.assert_frame_equal(self.frame, recons)
-
- # GH 6573
- self.frame.to_excel(self.path, 'Sheet1')
- recons = read_excel(self.path, index_col=0)
- tm.assert_frame_equal(self.frame, recons)
-
- self.frame.to_excel(self.path, '0')
- recons = read_excel(self.path, index_col=0)
- tm.assert_frame_equal(self.frame, recons)
-
- # GH 8825 Pandas Series should provide to_excel method
- s = self.frame["A"]
- s.to_excel(self.path)
- recons = read_excel(self.path, index_col=0)
- tm.assert_frame_equal(s.to_frame(), recons)
-
- def test_mixed(self, merge_cells, engine, ext):
- self.mixed_frame.to_excel(self.path, 'test1')
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1', index_col=0)
- tm.assert_frame_equal(self.mixed_frame, recons)
-
- def test_ts_frame(self, *_):
- df = tm.makeTimeDataFrame()[:5]
-
- df.to_excel(self.path, "test1")
- reader = ExcelFile(self.path)
-
- recons = read_excel(reader, "test1", index_col=0)
- tm.assert_frame_equal(df, recons)
-
- def test_basics_with_nan(self, merge_cells, engine, ext):
- self.frame['A'][:5] = nan
- self.frame.to_excel(self.path, 'test1')
- self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
- self.frame.to_excel(self.path, 'test1', header=False)
- self.frame.to_excel(self.path, 'test1', index=False)
-
- @pytest.mark.parametrize("np_type", [
- np.int8, np.int16, np.int32, np.int64])
- def test_int_types(self, merge_cells, engine, ext, np_type):
- # Test np.int values read come back as int
- # (rather than float which is Excel's format).
- frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
- dtype=np_type)
- frame.to_excel(self.path, "test1")
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, "test1", index_col=0)
-
- int_frame = frame.astype(np.int64)
- tm.assert_frame_equal(int_frame, recons)
-
- recons2 = read_excel(self.path, "test1", index_col=0)
- tm.assert_frame_equal(int_frame, recons2)
-
- # Test with convert_float=False comes back as float.
- float_frame = frame.astype(float)
- recons = read_excel(self.path, "test1",
- convert_float=False, index_col=0)
- tm.assert_frame_equal(recons, float_frame,
- check_index_type=False,
- check_column_type=False)
-
- @pytest.mark.parametrize("np_type", [
- np.float16, np.float32, np.float64])
- def test_float_types(self, merge_cells, engine, ext, np_type):
- # Test np.float values read come back as float.
- frame = DataFrame(np.random.random_sample(10), dtype=np_type)
- frame.to_excel(self.path, "test1")
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, "test1", index_col=0).astype(np_type)
-
- tm.assert_frame_equal(frame, recons, check_dtype=False)
-
- @pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
- def test_bool_types(self, merge_cells, engine, ext, np_type):
- # Test np.bool values read come back as float.
- frame = (DataFrame([1, 0, True, False], dtype=np_type))
- frame.to_excel(self.path, "test1")
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, "test1", index_col=0).astype(np_type)
-
- tm.assert_frame_equal(frame, recons)
-
- def test_inf_roundtrip(self, *_):
- frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
- frame.to_excel(self.path, "test1")
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, "test1", index_col=0)
-
- tm.assert_frame_equal(frame, recons)
-
- def test_sheets(self, merge_cells, engine, ext):
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(self.path, 'test1')
- self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
- self.frame.to_excel(self.path, 'test1', header=False)
- self.frame.to_excel(self.path, 'test1', index=False)
-
- # Test writing to separate sheets
- writer = ExcelWriter(self.path)
- self.frame.to_excel(writer, 'test1')
- self.tsframe.to_excel(writer, 'test2')
- writer.save()
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1', index_col=0)
- tm.assert_frame_equal(self.frame, recons)
- recons = read_excel(reader, 'test2', index_col=0)
- tm.assert_frame_equal(self.tsframe, recons)
- assert 2 == len(reader.sheet_names)
- assert 'test1' == reader.sheet_names[0]
- assert 'test2' == reader.sheet_names[1]
-
- def test_colaliases(self, merge_cells, engine, ext):
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(self.path, 'test1')
- self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
- self.frame.to_excel(self.path, 'test1', header=False)
- self.frame.to_excel(self.path, 'test1', index=False)
-
- # column aliases
- col_aliases = Index(['AA', 'X', 'Y', 'Z'])
- self.frame2.to_excel(self.path, 'test1', header=col_aliases)
- reader = ExcelFile(self.path)
- rs = read_excel(reader, 'test1', index_col=0)
- xp = self.frame2.copy()
- xp.columns = col_aliases
- tm.assert_frame_equal(xp, rs)
-
- def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(self.path, 'test1')
- self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
- self.frame.to_excel(self.path, 'test1', header=False)
- self.frame.to_excel(self.path, 'test1', index=False)
-
- # test index_label
- frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- frame.to_excel(self.path, 'test1',
- index_label=['test'],
- merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1',
- index_col=0,
- ).astype(np.int64)
- frame.index.names = ['test']
- assert frame.index.names == recons.index.names
-
- frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- frame.to_excel(self.path,
- 'test1',
- index_label=['test', 'dummy', 'dummy2'],
- merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1',
- index_col=0,
- ).astype(np.int64)
- frame.index.names = ['test']
- assert frame.index.names == recons.index.names
-
- frame = (DataFrame(np.random.randn(10, 2)) >= 0)
- frame.to_excel(self.path,
- 'test1',
- index_label='test',
- merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1',
- index_col=0,
- ).astype(np.int64)
- frame.index.names = ['test']
- tm.assert_frame_equal(frame, recons.astype(bool))
-
- self.frame.to_excel(self.path,
- 'test1',
- columns=['A', 'B', 'C', 'D'],
- index=False, merge_cells=merge_cells)
- # take 'A' and 'B' as indexes (same row as cols 'C', 'D')
- df = self.frame.copy()
- df = df.set_index(['A', 'B'])
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1', index_col=[0, 1])
- tm.assert_frame_equal(df, recons, check_less_precise=True)
-
- def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
- df = DataFrame(np.random.randn(10, 4))
- df.index.name = 'foo'
-
- df.to_excel(self.path, merge_cells=merge_cells)
-
- xf = ExcelFile(self.path)
- result = read_excel(xf, xf.sheet_names[0],
- index_col=0)
-
- tm.assert_frame_equal(result, df)
- assert result.index.name == 'foo'
-
- def test_excel_roundtrip_datetime(self, merge_cells, *_):
- # datetime.date, not sure what to test here exactly
- tsf = self.tsframe.copy()
-
- tsf.index = [x.date() for x in self.tsframe.index]
- tsf.to_excel(self.path, "test1", merge_cells=merge_cells)
-
- reader = ExcelFile(self.path)
- recons = read_excel(reader, "test1", index_col=0)
-
- tm.assert_frame_equal(self.tsframe, recons)
-
- def test_excel_date_datetime_format(self, merge_cells, engine, ext):
- # see gh-4133
- #
- # Excel output format strings
- df = DataFrame([[date(2014, 1, 31),
- date(1999, 9, 24)],
- [datetime(1998, 5, 26, 23, 33, 4),
- datetime(2014, 2, 28, 13, 5, 13)]],
- index=["DATE", "DATETIME"], columns=["X", "Y"])
- df_expected = DataFrame([[datetime(2014, 1, 31),
- datetime(1999, 9, 24)],
- [datetime(1998, 5, 26, 23, 33, 4),
- datetime(2014, 2, 28, 13, 5, 13)]],
- index=["DATE", "DATETIME"], columns=["X", "Y"])
-
- with ensure_clean(ext) as filename2:
- writer1 = ExcelWriter(self.path)
- writer2 = ExcelWriter(filename2,
- date_format="DD.MM.YYYY",
- datetime_format="DD.MM.YYYY HH-MM-SS")
-
- df.to_excel(writer1, "test1")
- df.to_excel(writer2, "test1")
-
- writer1.close()
- writer2.close()
-
- reader1 = ExcelFile(self.path)
- reader2 = ExcelFile(filename2)
-
- rs1 = read_excel(reader1, "test1", index_col=0)
- rs2 = read_excel(reader2, "test1", index_col=0)
-
- tm.assert_frame_equal(rs1, rs2)
-
- # Since the reader returns a datetime object for dates,
- # we need to use df_expected to check the result.
- tm.assert_frame_equal(rs2, df_expected)
-
- def test_to_excel_interval_no_labels(self, *_):
- # see gh-19242
- #
- # Test writing Interval without labels.
- frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
- dtype=np.int64)
- expected = frame.copy()
-
- frame["new"] = pd.cut(frame[0], 10)
- expected["new"] = pd.cut(expected[0], 10).astype(str)
-
- frame.to_excel(self.path, "test1")
- reader = ExcelFile(self.path)
-
- recons = read_excel(reader, "test1", index_col=0)
- tm.assert_frame_equal(expected, recons)
-
- def test_to_excel_interval_labels(self, *_):
- # see gh-19242
- #
- # Test writing Interval with labels.
- frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
- dtype=np.int64)
- expected = frame.copy()
- intervals = pd.cut(frame[0], 10, labels=["A", "B", "C", "D", "E",
- "F", "G", "H", "I", "J"])
- frame["new"] = intervals
- expected["new"] = pd.Series(list(intervals))
-
- frame.to_excel(self.path, "test1")
- reader = ExcelFile(self.path)
-
- recons = read_excel(reader, "test1", index_col=0)
- tm.assert_frame_equal(expected, recons)
-
- def test_to_excel_timedelta(self, *_):
- # see gh-19242, gh-9155
- #
- # Test writing timedelta to xls.
- frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
- columns=["A"], dtype=np.int64)
- expected = frame.copy()
-
- frame["new"] = frame["A"].apply(lambda x: timedelta(seconds=x))
- expected["new"] = expected["A"].apply(
- lambda x: timedelta(seconds=x).total_seconds() / float(86400))
-
- frame.to_excel(self.path, "test1")
- reader = ExcelFile(self.path)
-
- recons = read_excel(reader, "test1", index_col=0)
- tm.assert_frame_equal(expected, recons)
-
- def test_to_excel_periodindex(self, merge_cells, engine, ext):
- frame = self.tsframe
- xp = frame.resample('M', kind='period').mean()
-
- xp.to_excel(self.path, 'sht1')
-
- reader = ExcelFile(self.path)
- rs = read_excel(reader, 'sht1', index_col=0)
- tm.assert_frame_equal(xp, rs.to_period('M'))
-
- def test_to_excel_multiindex(self, merge_cells, engine, ext):
- frame = self.frame
- arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
- new_index = MultiIndex.from_arrays(arrays,
- names=['first', 'second'])
- frame.index = new_index
-
- frame.to_excel(self.path, 'test1', header=False)
- frame.to_excel(self.path, 'test1', columns=['A', 'B'])
-
- # round trip
- frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- df = read_excel(reader, 'test1', index_col=[0, 1])
- tm.assert_frame_equal(frame, df)
-
- # GH13511
- def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
- frame = pd.DataFrame({'A': [None, 2, 3],
- 'B': [10, 20, 30],
- 'C': np.random.sample(3)})
- frame = frame.set_index(['A', 'B'])
-
- frame.to_excel(self.path, merge_cells=merge_cells)
- df = read_excel(self.path, index_col=[0, 1])
- tm.assert_frame_equal(frame, df)
-
- # Test for Issue 11328. If column indices are integers, make
- # sure they are handled correctly for either setting of
- # merge_cells
- def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
- frame = self.frame
- arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
- new_index = MultiIndex.from_arrays(arrays,
- names=['first', 'second'])
- frame.index = new_index
-
- new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
- (50, 1), (50, 2)])
- frame.columns = new_cols_index
- header = [0, 1]
- if not merge_cells:
- header = 0
-
- # round trip
- frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- df = read_excel(reader, 'test1', header=header,
- index_col=[0, 1])
- if not merge_cells:
- fm = frame.columns.format(sparsify=False,
- adjoin=False, names=False)
- frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
- tm.assert_frame_equal(frame, df)
-
- def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
- # try multiindex with dates
- tsframe = self.tsframe.copy()
- new_index = [tsframe.index, np.arange(len(tsframe.index))]
- tsframe.index = MultiIndex.from_arrays(new_index)
-
- tsframe.index.names = ['time', 'foo']
- tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
- reader = ExcelFile(self.path)
- recons = read_excel(reader, 'test1',
- index_col=[0, 1])
-
- tm.assert_frame_equal(tsframe, recons)
- assert recons.index.names == ('time', 'foo')
-
- def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
- ext):
- # Test writing and re-reading a MI witout the index. GH 5616.
-
- # Initial non-MI frame.
- frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
-
- # Add a MI.
- frame2 = frame1.copy()
- multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
- frame2.index = multi_index
-
- # Write out to Excel without the index.
- frame2.to_excel(self.path, 'test1', index=False)
-
- # Read it back in.
- reader = ExcelFile(self.path)
- frame3 = read_excel(reader, 'test1')
-
- # Test that it is the same as the initial frame.
- tm.assert_frame_equal(frame1, frame3)
-
- def test_to_excel_float_format(self, *_):
- df = DataFrame([[0.123456, 0.234567, 0.567567],
- [12.32112, 123123.2, 321321.2]],
- index=["A", "B"], columns=["X", "Y", "Z"])
- df.to_excel(self.path, "test1", float_format="%.2f")
-
- reader = ExcelFile(self.path)
- result = read_excel(reader, "test1", index_col=0)
-
- expected = DataFrame([[0.12, 0.23, 0.57],
- [12.32, 123123.20, 321321.20]],
- index=["A", "B"], columns=["X", "Y", "Z"])
- tm.assert_frame_equal(result, expected)
-
- def test_to_excel_output_encoding(self, merge_cells, engine, ext):
- # Avoid mixed inferred_type.
- df = DataFrame([[u"\u0192", u"\u0193", u"\u0194"],
- [u"\u0195", u"\u0196", u"\u0197"]],
- index=[u"A\u0192", u"B"],
- columns=[u"X\u0193", u"Y", u"Z"])
-
- with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
- df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
- result = read_excel(filename, "TestSheet",
- encoding="utf8", index_col=0)
- tm.assert_frame_equal(result, df)
-
- def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
- with ensure_clean(u("\u0192u.") + ext) as filename:
- try:
- f = open(filename, "wb")
- except UnicodeEncodeError:
- pytest.skip("No unicode file names on this system")
- else:
- f.close()
-
- df = DataFrame([[0.123456, 0.234567, 0.567567],
- [12.32112, 123123.2, 321321.2]],
- index=["A", "B"], columns=["X", "Y", "Z"])
- df.to_excel(filename, "test1", float_format="%.2f")
-
- reader = ExcelFile(filename)
- result = read_excel(reader, "test1", index_col=0)
-
- expected = DataFrame([[0.12, 0.23, 0.57],
- [12.32, 123123.20, 321321.20]],
- index=["A", "B"], columns=["X", "Y", "Z"])
- tm.assert_frame_equal(result, expected)
-
- # def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
-
- # import StringIO
- # s = StringIO(
- # """Date,ticker,type,value
- # 2001-01-01,x,close,12.2
- # 2001-01-01,x,open ,12.1
- # 2001-01-01,y,close,12.2
- # 2001-01-01,y,open ,12.1
- # 2001-02-01,x,close,12.2
- # 2001-02-01,x,open ,12.1
- # 2001-02-01,y,close,12.2
- # 2001-02-01,y,open ,12.1
- # 2001-03-01,x,close,12.2
- # 2001-03-01,x,open ,12.1
- # 2001-03-01,y,close,12.2
- # 2001-03-01,y,open ,12.1""")
- # df = read_csv(s, parse_dates=["Date"])
- # pdf = df.pivot_table(values="value", rows=["ticker"],
- # cols=["Date", "type"])
-
- # try:
- # import xlwt
- # import xlrd
- # except ImportError:
- # pytest.skip
-
- # filename = '__tmp_to_excel_header_styling_xls__.xls'
- # pdf.to_excel(filename, 'test1')
-
- # wbk = xlrd.open_workbook(filename,
- # formatting_info=True)
- # assert ["test1"] == wbk.sheet_names()
- # ws = wbk.sheet_by_name('test1')
- # assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
- # for i in range(0, 2):
- # for j in range(0, 7):
- # xfx = ws.cell_xf_index(0, 0)
- # cell_xf = wbk.xf_list[xfx]
- # font = wbk.font_list
- # assert 1 == font[cell_xf.font_index].bold
- # assert 1 == cell_xf.border.top_line_style
- # assert 1 == cell_xf.border.right_line_style
- # assert 1 == cell_xf.border.bottom_line_style
- # assert 1 == cell_xf.border.left_line_style
- # assert 2 == cell_xf.alignment.hor_align
- # os.remove(filename)
- # def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
- # import StringIO
- # s = StringIO(
- # """Date,ticker,type,value
- # 2001-01-01,x,close,12.2
- # 2001-01-01,x,open ,12.1
- # 2001-01-01,y,close,12.2
- # 2001-01-01,y,open ,12.1
- # 2001-02-01,x,close,12.2
- # 2001-02-01,x,open ,12.1
- # 2001-02-01,y,close,12.2
- # 2001-02-01,y,open ,12.1
- # 2001-03-01,x,close,12.2
- # 2001-03-01,x,open ,12.1
- # 2001-03-01,y,close,12.2
- # 2001-03-01,y,open ,12.1""")
- # df = read_csv(s, parse_dates=["Date"])
- # pdf = df.pivot_table(values="value", rows=["ticker"],
- # cols=["Date", "type"])
- # try:
- # import openpyxl
- # from openpyxl.cell import get_column_letter
- # except ImportError:
- # pytest.skip
- # if openpyxl.__version__ < '1.6.1':
- # pytest.skip
- # # test xlsx_styling
- # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
- # pdf.to_excel(filename, 'test1')
- # wbk = openpyxl.load_workbook(filename)
- # assert ["test1"] == wbk.get_sheet_names()
- # ws = wbk.get_sheet_by_name('test1')
- # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
- # xlsaddrs += ["A%s" % i for i in range(1, 6)]
- # xlsaddrs += ["B1", "D1", "F1"]
- # for xlsaddr in xlsaddrs:
- # cell = ws.cell(xlsaddr)
- # assert cell.style.font.bold
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.top.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.right.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.bottom.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.left.border_style)
- # assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
- # cell.style.alignment.horizontal)
- # mergedcells_addrs = ["C1", "E1", "G1"]
- # for maddr in mergedcells_addrs:
- # assert ws.cell(maddr).merged
- # os.remove(filename)
-
- @pytest.mark.parametrize("use_headers", [True, False])
- @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
- @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
- def test_excel_010_hemstring(self, merge_cells, engine, ext,
- c_idx_nlevels, r_idx_nlevels, use_headers):
-
- def roundtrip(data, header=True, parser_hdr=0, index=True):
- data.to_excel(self.path, header=header,
- merge_cells=merge_cells, index=index)
-
- xf = ExcelFile(self.path)
- return read_excel(xf, xf.sheet_names[0], header=parser_hdr)
-
- # Basic test.
- parser_header = 0 if use_headers else None
- res = roundtrip(DataFrame([0]), use_headers, parser_header)
-
- assert res.shape == (1, 2)
- assert res.iloc[0, 0] is not np.nan
-
- # More complex tests with multi-index.
- nrows = 5
- ncols = 3
-
- from pandas.util.testing import makeCustomDataframe as mkdf
- # ensure limited functionality in 0.10
- # override of gh-2370 until sorted out in 0.11
-
- df = mkdf(nrows, ncols, r_idx_nlevels=r_idx_nlevels,
- c_idx_nlevels=c_idx_nlevels)
-
- # This if will be removed once multi-column Excel writing
- # is implemented. For now fixing gh-9794.
- if c_idx_nlevels > 1:
- with pytest.raises(NotImplementedError):
- roundtrip(df, use_headers, index=False)
- else:
- res = roundtrip(df, use_headers)
-
- if use_headers:
- assert res.shape == (nrows, ncols + r_idx_nlevels)
- else:
- # First row taken as columns.
- assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
-
- # No NaNs.
- for r in range(len(res.index)):
- for c in range(len(res.columns)):
- assert res.iloc[r, c] is not np.nan
-
- def test_duplicated_columns(self, *_):
- # see gh-5235
- df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
- columns=["A", "B", "B"])
- df.to_excel(self.path, "test1")
- expected = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
- columns=["A", "B", "B.1"])
-
- # By default, we mangle.
- result = read_excel(self.path, "test1", index_col=0)
- tm.assert_frame_equal(result, expected)
-
- # Explicitly, we pass in the parameter.
- result = read_excel(self.path, "test1", index_col=0,
- mangle_dupe_cols=True)
- tm.assert_frame_equal(result, expected)
-
- # see gh-11007, gh-10970
- df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
- columns=["A", "B", "A", "B"])
- df.to_excel(self.path, "test1")
-
- result = read_excel(self.path, "test1", index_col=0)
- expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
- columns=["A", "B", "A.1", "B.1"])
- tm.assert_frame_equal(result, expected)
-
- # see gh-10982
- df.to_excel(self.path, "test1", index=False, header=False)
- result = read_excel(self.path, "test1", header=None)
-
- expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
- tm.assert_frame_equal(result, expected)
-
- msg = "Setting mangle_dupe_cols=False is not supported yet"
- with pytest.raises(ValueError, match=msg):
- read_excel(self.path, "test1", header=None, mangle_dupe_cols=False)
-
- def test_swapped_columns(self, merge_cells, engine, ext):
- # Test for issue #5427.
- write_frame = DataFrame({'A': [1, 1, 1],
- 'B': [2, 2, 2]})
- write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
-
- read_frame = read_excel(self.path, 'test1', header=0)
-
- tm.assert_series_equal(write_frame['A'], read_frame['A'])
- tm.assert_series_equal(write_frame['B'], read_frame['B'])
-
- def test_invalid_columns(self, *_):
- # see gh-10982
- write_frame = DataFrame({"A": [1, 1, 1],
- "B": [2, 2, 2]})
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- write_frame.to_excel(self.path, "test1", columns=["B", "C"])
-
- expected = write_frame.reindex(columns=["B", "C"])
- read_frame = read_excel(self.path, "test1", index_col=0)
- tm.assert_frame_equal(expected, read_frame)
-
- with pytest.raises(KeyError):
- write_frame.to_excel(self.path, "test1", columns=["C", "D"])
-
- def test_comment_arg(self, *_):
- # see gh-18735
- #
- # Test the comment argument functionality to read_excel.
-
- # Create file to read in.
- df = DataFrame({"A": ["one", "#one", "one"],
- "B": ["two", "two", "#two"]})
- df.to_excel(self.path, "test_c")
-
- # Read file without comment arg.
- result1 = read_excel(self.path, "test_c", index_col=0)
-
- result1.iloc[1, 0] = None
- result1.iloc[1, 1] = None
- result1.iloc[2, 1] = None
-
- result2 = read_excel(self.path, "test_c", comment="#", index_col=0)
- tm.assert_frame_equal(result1, result2)
-
- def test_comment_default(self, merge_cells, engine, ext):
- # Re issue #18735
- # Test the comment argument default to read_excel
-
- # Create file to read in
- df = DataFrame({'A': ['one', '#one', 'one'],
- 'B': ['two', 'two', '#two']})
- df.to_excel(self.path, 'test_c')
-
- # Read file with default and explicit comment=None
- result1 = read_excel(self.path, 'test_c')
- result2 = read_excel(self.path, 'test_c', comment=None)
- tm.assert_frame_equal(result1, result2)
-
- def test_comment_used(self, *_):
- # see gh-18735
- #
- # Test the comment argument is working as expected when used.
-
- # Create file to read in.
- df = DataFrame({"A": ["one", "#one", "one"],
- "B": ["two", "two", "#two"]})
- df.to_excel(self.path, "test_c")
-
- # Test read_frame_comment against manually produced expected output.
- expected = DataFrame({"A": ["one", None, "one"],
- "B": ["two", None, None]})
- result = read_excel(self.path, "test_c", comment="#", index_col=0)
- tm.assert_frame_equal(result, expected)
-
- def test_comment_empty_line(self, merge_cells, engine, ext):
- # Re issue #18735
- # Test that read_excel ignores commented lines at the end of file
-
- df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
- df.to_excel(self.path, index=False)
-
- # Test that all-comment lines at EoF are ignored
- expected = DataFrame({'a': [1], 'b': [2]})
- result = read_excel(self.path, comment='#')
- tm.assert_frame_equal(result, expected)
-
- def test_datetimes(self, merge_cells, engine, ext):
-
- # Test writing and reading datetimes. For issue #9139. (xref #9185)
- datetimes = [datetime(2013, 1, 13, 1, 2, 3),
- datetime(2013, 1, 13, 2, 45, 56),
- datetime(2013, 1, 13, 4, 29, 49),
- datetime(2013, 1, 13, 6, 13, 42),
- datetime(2013, 1, 13, 7, 57, 35),
- datetime(2013, 1, 13, 9, 41, 28),
- datetime(2013, 1, 13, 11, 25, 21),
- datetime(2013, 1, 13, 13, 9, 14),
- datetime(2013, 1, 13, 14, 53, 7),
- datetime(2013, 1, 13, 16, 37, 0),
- datetime(2013, 1, 13, 18, 20, 52)]
-
- write_frame = DataFrame({'A': datetimes})
- write_frame.to_excel(self.path, 'Sheet1')
- read_frame = read_excel(self.path, 'Sheet1', header=0)
-
- tm.assert_series_equal(write_frame['A'], read_frame['A'])
-
- def test_bytes_io(self, merge_cells, engine, ext):
- # see gh-7074
- bio = BytesIO()
- df = DataFrame(np.random.randn(10, 2))
-
- # Pass engine explicitly, as there is no file path to infer from.
- writer = ExcelWriter(bio, engine=engine)
- df.to_excel(writer)
- writer.save()
-
- bio.seek(0)
- reread_df = read_excel(bio, index_col=0)
- tm.assert_frame_equal(df, reread_df)
-
- def test_write_lists_dict(self, *_):
- # see gh-8188.
- df = DataFrame({"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
- "numeric": [1, 2, 3.0],
- "str": ["apple", "banana", "cherry"]})
- df.to_excel(self.path, "Sheet1")
- read = read_excel(self.path, "Sheet1", header=0, index_col=0)
-
- expected = df.copy()
- expected.mixed = expected.mixed.apply(str)
- expected.numeric = expected.numeric.astype("int64")
-
- tm.assert_frame_equal(read, expected)
-
- def test_true_and_false_value_options(self, *_):
- # see gh-13347
- df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
- expected = df.replace({"foo": True, "bar": False})
-
- df.to_excel(self.path)
- read_frame = read_excel(self.path, true_values=["foo"],
- false_values=["bar"], index_col=0)
- tm.assert_frame_equal(read_frame, expected)
-
- def test_freeze_panes(self, *_):
- # see gh-15160
- expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
- expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
-
- result = read_excel(self.path, index_col=0)
- tm.assert_frame_equal(result, expected)
-
- def test_path_path_lib(self, merge_cells, engine, ext):
- df = tm.makeDataFrame()
- writer = partial(df.to_excel, engine=engine)
-
- reader = partial(pd.read_excel, index_col=0)
- result = tm.round_trip_pathlib(writer, reader,
- path="foo.{ext}".format(ext=ext))
- tm.assert_frame_equal(result, df)
-
- def test_path_local_path(self, merge_cells, engine, ext):
- df = tm.makeDataFrame()
- writer = partial(df.to_excel, engine=engine)
-
- reader = partial(pd.read_excel, index_col=0)
- result = tm.round_trip_pathlib(writer, reader,
- path="foo.{ext}".format(ext=ext))
- tm.assert_frame_equal(result, df)
-
-
-@td.skip_if_no('openpyxl')
-@pytest.mark.parametrize("merge_cells,ext,engine", [
- (None, '.xlsx', 'openpyxl')])
-class TestOpenpyxlTests(_WriterBase):
-
- def test_to_excel_styleconverter(self, merge_cells, ext, engine):
- from openpyxl import styles
-
- hstyle = {
- "font": {
- "color": '00FF0000',
- "bold": True,
- },
- "borders": {
- "top": "thin",
- "right": "thin",
- "bottom": "thin",
- "left": "thin",
- },
- "alignment": {
- "horizontal": "center",
- "vertical": "top",
- },
- "fill": {
- "patternType": 'solid',
- 'fgColor': {
- 'rgb': '006666FF',
- 'tint': 0.3,
- },
- },
- "number_format": {
- "format_code": "0.00"
- },
- "protection": {
- "locked": True,
- "hidden": False,
- },
- }
-
- font_color = styles.Color('00FF0000')
- font = styles.Font(bold=True, color=font_color)
- side = styles.Side(style=styles.borders.BORDER_THIN)
- border = styles.Border(top=side, right=side, bottom=side, left=side)
- alignment = styles.Alignment(horizontal='center', vertical='top')
- fill_color = styles.Color(rgb='006666FF', tint=0.3)
- fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
-
- number_format = '0.00'
-
- protection = styles.Protection(locked=True, hidden=False)
-
- kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
- assert kw['font'] == font
- assert kw['border'] == border
- assert kw['alignment'] == alignment
- assert kw['fill'] == fill
- assert kw['number_format'] == number_format
- assert kw['protection'] == protection
-
- def test_write_cells_merge_styled(self, merge_cells, ext, engine):
- from pandas.io.formats.excel import ExcelCell
-
- sheet_name = 'merge_styled'
-
- sty_b1 = {'font': {'color': '00FF0000'}}
- sty_a2 = {'font': {'color': '0000FF00'}}
-
- initial_cells = [
- ExcelCell(col=1, row=0, val=42, style=sty_b1),
- ExcelCell(col=0, row=1, val=99, style=sty_a2),
- ]
-
- sty_merged = {'font': {'color': '000000FF', 'bold': True}}
- sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
- openpyxl_sty_merged = sty_kwargs['font']
- merge_cells = [
- ExcelCell(col=0, row=0, val='pandas',
- mergestart=1, mergeend=1, style=sty_merged),
- ]
-
- with ensure_clean(ext) as path:
- writer = _OpenpyxlWriter(path)
- writer.write_cells(initial_cells, sheet_name=sheet_name)
- writer.write_cells(merge_cells, sheet_name=sheet_name)
-
- wks = writer.sheets[sheet_name]
- xcell_b1 = wks['B1']
- xcell_a2 = wks['A2']
- assert xcell_b1.font == openpyxl_sty_merged
- assert xcell_a2.font == openpyxl_sty_merged
-
- @pytest.mark.parametrize("mode,expected", [
- ('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
- def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
- import openpyxl
- df = DataFrame([1], columns=['baz'])
-
- with ensure_clean(ext) as f:
- wb = openpyxl.Workbook()
- wb.worksheets[0].title = 'foo'
- wb.worksheets[0]['A1'].value = 'foo'
- wb.create_sheet('bar')
- wb.worksheets[1]['A1'].value = 'bar'
- wb.save(f)
-
- writer = ExcelWriter(f, engine=engine, mode=mode)
- df.to_excel(writer, sheet_name='baz', index=False)
- writer.save()
-
- wb2 = openpyxl.load_workbook(f)
- result = [sheet.title for sheet in wb2.worksheets]
- assert result == expected
-
- for index, cell_value in enumerate(expected):
- assert wb2.worksheets[index]['A1'].value == cell_value
-
-
-@td.skip_if_no('xlwt')
-@pytest.mark.parametrize("merge_cells,ext,engine", [
- (None, '.xls', 'xlwt')])
-class TestXlwtTests(_WriterBase):
-
- def test_excel_raise_error_on_multiindex_columns_and_no_index(
- self, merge_cells, ext, engine):
- # MultiIndex as columns is not yet implemented 9794
- cols = MultiIndex.from_tuples([('site', ''),
- ('2014', 'height'),
- ('2014', 'weight')])
- df = DataFrame(np.random.randn(10, 3), columns=cols)
- with pytest.raises(NotImplementedError):
- with ensure_clean(ext) as path:
- df.to_excel(path, index=False)
-
- def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
- engine):
- cols = MultiIndex.from_tuples([('site', ''),
- ('2014', 'height'),
- ('2014', 'weight')])
- df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
- with ensure_clean(ext) as path:
- df.to_excel(path, index=True)
-
- def test_excel_multiindex_index(self, merge_cells, ext, engine):
- # MultiIndex as index works so assert no error #9794
- cols = MultiIndex.from_tuples([('site', ''),
- ('2014', 'height'),
- ('2014', 'weight')])
- df = DataFrame(np.random.randn(3, 10), index=cols)
- with ensure_clean(ext) as path:
- df.to_excel(path, index=False)
-
- def test_to_excel_styleconverter(self, merge_cells, ext, engine):
- import xlwt
-
- hstyle = {"font": {"bold": True},
- "borders": {"top": "thin",
- "right": "thin",
- "bottom": "thin",
- "left": "thin"},
- "alignment": {"horizontal": "center", "vertical": "top"}}
-
- xls_style = _XlwtWriter._convert_to_style(hstyle)
- assert xls_style.font.bold
- assert xlwt.Borders.THIN == xls_style.borders.top
- assert xlwt.Borders.THIN == xls_style.borders.right
- assert xlwt.Borders.THIN == xls_style.borders.bottom
- assert xlwt.Borders.THIN == xls_style.borders.left
- assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
- assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
-
- def test_write_append_mode_raises(self, merge_cells, ext, engine):
- msg = "Append mode is not supported with xlwt!"
-
- with ensure_clean(ext) as f:
- with pytest.raises(ValueError, match=msg):
- ExcelWriter(f, engine=engine, mode='a')
-
-
-@td.skip_if_no('xlsxwriter')
-@pytest.mark.parametrize("merge_cells,ext,engine", [
- (None, '.xlsx', 'xlsxwriter')])
-class TestXlsxWriterTests(_WriterBase):
-
- @td.skip_if_no('openpyxl')
- def test_column_format(self, merge_cells, ext, engine):
- # Test that column formats are applied to cells. Test for issue #9167.
- # Applicable to xlsxwriter only.
- with warnings.catch_warnings():
- # Ignore the openpyxl lxml warning.
- warnings.simplefilter("ignore")
- import openpyxl
-
- with ensure_clean(ext) as path:
- frame = DataFrame({'A': [123456, 123456],
- 'B': [123456, 123456]})
-
- writer = ExcelWriter(path)
- frame.to_excel(writer)
-
- # Add a number format to col B and ensure it is applied to cells.
- num_format = '#,##0'
- write_workbook = writer.book
- write_worksheet = write_workbook.worksheets()[0]
- col_format = write_workbook.add_format({'num_format': num_format})
- write_worksheet.set_column('B:B', None, col_format)
- writer.save()
-
- read_workbook = openpyxl.load_workbook(path)
- try:
- read_worksheet = read_workbook['Sheet1']
- except TypeError:
- # compat
- read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
-
- # Get the number format from the cell.
- try:
- cell = read_worksheet['B2']
- except TypeError:
- # compat
- cell = read_worksheet.cell('B2')
-
- try:
- read_num_format = cell.number_format
- except Exception:
- read_num_format = cell.style.number_format._format_code
-
- assert read_num_format == num_format
-
- def test_write_append_mode_raises(self, merge_cells, ext, engine):
- msg = "Append mode is not supported with xlsxwriter!"
-
- with ensure_clean(ext) as f:
- with pytest.raises(ValueError, match=msg):
- ExcelWriter(f, engine=engine, mode='a')
-
-
-class TestExcelWriterEngineTests(object):
-
- @pytest.mark.parametrize('klass,ext', [
- pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
- not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
- pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
- not td.safe_import('openpyxl'), reason='No openpyxl')),
- pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
- not td.safe_import('xlwt'), reason='No xlwt'))
- ])
- def test_ExcelWriter_dispatch(self, klass, ext):
- with ensure_clean(ext) as path:
- writer = ExcelWriter(path)
- if ext == '.xlsx' and td.safe_import('xlsxwriter'):
- # xlsxwriter has preference over openpyxl if both installed
- assert isinstance(writer, _XlsxWriter)
- else:
- assert isinstance(writer, klass)
-
- def test_ExcelWriter_dispatch_raises(self):
- with pytest.raises(ValueError, match='No engine'):
- ExcelWriter('nothing')
-
- @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
- def test_register_writer(self):
- # some awkward mocking to test out dispatch and such actually works
- called_save = []
- called_write_cells = []
-
- class DummyClass(ExcelWriter):
- called_save = False
- called_write_cells = False
- supported_extensions = ['test', 'xlsx', 'xls']
- engine = 'dummy'
-
- def save(self):
- called_save.append(True)
-
- def write_cells(self, *args, **kwargs):
- called_write_cells.append(True)
-
- def check_called(func):
- func()
- assert len(called_save) >= 1
- assert len(called_write_cells) >= 1
- del called_save[:]
- del called_write_cells[:]
-
- with pd.option_context('io.excel.xlsx.writer', 'dummy'):
- register_writer(DummyClass)
- writer = ExcelWriter('something.test')
- assert isinstance(writer, DummyClass)
- df = tm.makeCustomDataframe(1, 1)
-
- func = lambda: df.to_excel('something.test')
- check_called(func)
- check_called(lambda: df.to_excel('something.xlsx'))
- check_called(
- lambda: df.to_excel(
- 'something.xls', engine='dummy'))
-
-
-@pytest.mark.parametrize('engine', [
- pytest.param('xlwt',
- marks=pytest.mark.xfail(reason='xlwt does not support '
- 'openpyxl-compatible '
- 'style dicts')),
- 'xlsxwriter',
- 'openpyxl',
-])
-def test_styler_to_excel(engine):
- def style(df):
- # XXX: RGB colors not supported in xlwt
- return DataFrame([['font-weight: bold', '', ''],
- ['', 'color: blue', ''],
- ['', '', 'text-decoration: underline'],
- ['border-style: solid', '', ''],
- ['', 'font-style: italic', ''],
- ['', '', 'text-align: right'],
- ['background-color: red', '', ''],
- ['number-format: 0%', '', ''],
- ['', '', ''],
- ['', '', ''],
- ['', '', '']],
- index=df.index, columns=df.columns)
-
- def assert_equal_style(cell1, cell2):
- # XXX: should find a better way to check equality
- assert cell1.alignment.__dict__ == cell2.alignment.__dict__
- assert cell1.border.__dict__ == cell2.border.__dict__
- assert cell1.fill.__dict__ == cell2.fill.__dict__
- assert cell1.font.__dict__ == cell2.font.__dict__
- assert cell1.number_format == cell2.number_format
- assert cell1.protection.__dict__ == cell2.protection.__dict__
-
- def custom_converter(css):
- # use bold iff there is custom style attached to the cell
- if css.strip(' \n;'):
- return {'font': {'bold': True}}
- return {}
-
- pytest.importorskip('jinja2')
- pytest.importorskip(engine)
-
- # Prepare spreadsheets
-
- df = DataFrame(np.random.randn(11, 3))
- with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
- writer = ExcelWriter(path, engine=engine)
- df.to_excel(writer, sheet_name='frame')
- df.style.to_excel(writer, sheet_name='unstyled')
- styled = df.style.apply(style, axis=None)
- styled.to_excel(writer, sheet_name='styled')
- ExcelFormatter(styled, style_converter=custom_converter).write(
- writer, sheet_name='custom')
- writer.save()
-
- if engine not in ('openpyxl', 'xlsxwriter'):
- # For other engines, we only smoke test
- return
- openpyxl = pytest.importorskip('openpyxl')
- wb = openpyxl.load_workbook(path)
-
- # (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
- n_cells = 0
- for col1, col2 in zip(wb['frame'].columns,
- wb['unstyled'].columns):
- assert len(col1) == len(col2)
- for cell1, cell2 in zip(col1, col2):
- assert cell1.value == cell2.value
- assert_equal_style(cell1, cell2)
- n_cells += 1
-
- # ensure iteration actually happened:
- assert n_cells == (11 + 1) * (3 + 1)
-
- # (2) check styling with default converter
-
- # XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
- alpha = '00' if engine == 'openpyxl' else 'FF'
-
- n_cells = 0
- for col1, col2 in zip(wb['frame'].columns,
- wb['styled'].columns):
- assert len(col1) == len(col2)
- for cell1, cell2 in zip(col1, col2):
- ref = '%s%d' % (cell2.column, cell2.row)
- # XXX: this isn't as strong a test as ideal; we should
- # confirm that differences are exclusive
- if ref == 'B2':
- assert not cell1.font.bold
- assert cell2.font.bold
- elif ref == 'C3':
- assert cell1.font.color.rgb != cell2.font.color.rgb
- assert cell2.font.color.rgb == alpha + '0000FF'
- elif ref == 'D4':
- # This fails with engine=xlsxwriter due to
- # https://bitbucket.org/openpyxl/openpyxl/issues/800
- if engine == 'xlsxwriter' \
- and (LooseVersion(openpyxl.__version__) <
- LooseVersion('2.4.6')):
- pass
- else:
- assert cell1.font.underline != cell2.font.underline
- assert cell2.font.underline == 'single'
- elif ref == 'B5':
- assert not cell1.border.left.style
- assert (cell2.border.top.style ==
- cell2.border.right.style ==
- cell2.border.bottom.style ==
- cell2.border.left.style ==
- 'medium')
- elif ref == 'C6':
- assert not cell1.font.italic
- assert cell2.font.italic
- elif ref == 'D7':
- assert (cell1.alignment.horizontal !=
- cell2.alignment.horizontal)
- assert cell2.alignment.horizontal == 'right'
- elif ref == 'B8':
- assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
- assert cell1.fill.patternType != cell2.fill.patternType
- assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
- assert cell2.fill.patternType == 'solid'
- elif ref == 'B9':
- assert cell1.number_format == 'General'
- assert cell2.number_format == '0%'
- else:
- assert_equal_style(cell1, cell2)
-
- assert cell1.value == cell2.value
- n_cells += 1
-
- assert n_cells == (11 + 1) * (3 + 1)
-
- # (3) check styling with custom converter
- n_cells = 0
- for col1, col2 in zip(wb['frame'].columns,
- wb['custom'].columns):
- assert len(col1) == len(col2)
- for cell1, cell2 in zip(col1, col2):
- ref = '%s%d' % (cell2.column, cell2.row)
- if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
- assert not cell1.font.bold
- assert cell2.font.bold
- else:
- assert_equal_style(cell1, cell2)
-
- assert cell1.value == cell2.value
- n_cells += 1
-
- assert n_cells == (11 + 1) * (3 + 1)
-
-
-@td.skip_if_no('openpyxl')
-@pytest.mark.skipif(not PY36, reason='requires fspath')
-class TestFSPath(object):
-
- def test_excelfile_fspath(self):
- with tm.ensure_clean('foo.xlsx') as path:
- df = DataFrame({"A": [1, 2]})
- df.to_excel(path)
- xl = ExcelFile(path)
- result = os.fspath(xl)
- assert result == path
-
- def test_excelwriter_fspath(self):
- with tm.ensure_clean('foo.xlsx') as path:
- writer = ExcelWriter(path)
- assert os.fspath(writer) == str(path)
| - [ ] closes #24472 and update to #24749
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
In brief, I have tried to split the quite large `test_excel.py` test file into logical chunks. I have done this by first splitting by reading and writing, and then by engine.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25334 | 2019-02-15T15:52:07Z | 2019-03-25T23:59:43Z | null | 2019-03-25T23:59:44Z |
Doc: corrects spelling in generic.py | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e2308836d982a..b1fcbba7bd7ec 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7604,16 +7604,16 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
- ... ['Capitve', 'Wild', 'Capitve', 'Wild']]
+ ... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
- Falcon Capitve 390.0
+ Falcon Captive 390.0
Wild 350.0
- Parrot Capitve 30.0
+ Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
@@ -7623,7 +7623,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
>>> df.groupby(level=1).mean()
Max Speed
Type
- Capitve 210.0
+ Captive 210.0
Wild 185.0
"""
from pandas.core.groupby.groupby import groupby
| Corrects spelling of capitve to captive.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25333 | 2019-02-15T14:56:48Z | 2019-02-15T18:42:46Z | 2019-02-15T18:42:46Z | 2019-02-15T18:43:02Z |
BUG: Series.__setitem__ with datetimetz data | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 95362521f3b9f..f6cbfdb8949cf 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -133,7 +133,7 @@ Interval
Indexing
^^^^^^^^
--
+- Bug when setting a value to a :class:`Series` with timezone aware values and an :class:`Index` of tuples or strings (:issue:`12862`, :issue:`20441`)
-
-
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 31c6247436418..e82afb1cb0635 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1047,7 +1047,11 @@ def setitem(key, value):
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
- values = self._values
+ if is_extension_array_dtype(self):
+ # GH 20441: set_value expects and ndarray, not ExtensionArray
+ values = self.values
+ else:
+ values = self._values
try:
self.index._engine.set_value(values, key, value)
return
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index dbe667a166d0a..eaec6e18597ad 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -839,3 +839,24 @@ def test_head_tail(test_data):
assert_series_equal(test_data.series.head(0), test_data.series[0:0])
assert_series_equal(test_data.series.tail(), test_data.series[-5:])
assert_series_equal(test_data.series.tail(0), test_data.series[0:0])
+
+
+def test_setitem_tuple_with_datetimetz():
+ # GH 20441
+ arr = pd.date_range('2017', periods=4, tz='US/Eastern')
+ index = [(0, 1), (0, 2), (0, 3), (0, 4)]
+ result = Series(arr, index=index)
+ expected = result.copy()
+ result[(0, 1)] = np.nan
+ expected.iloc[0] = np.nan
+ assert_series_equal(result, expected)
+
+
+def test_setitem_str_with_datetimetz():
+ # GH 12862
+ result = pd.Series()
+ result['foo'] = pd.to_datetime(1000).tz_localize('UTC')
+ result['bar'] = pd.to_datetime(1001).tz_localize('UTC')
+ expected = pd.Series(pd.to_datetime([1000, 1001], utc=True),
+ index=['foo', 'bar'])
+ assert_series_equal(result, expected)
| - [x] closes #12862
- [x] closes #20441
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25331 | 2019-02-15T04:53:39Z | 2019-02-19T06:29:31Z | null | 2019-02-19T06:29:39Z |
MultiIndex Support for DataFrame.pivot | diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst
index 5c11be34e6ed4..4406243aa8b5e 100644
--- a/doc/source/user_guide/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -90,6 +90,19 @@ You can then select subsets from the pivoted ``DataFrame``:
Note that this returns a view on the underlying data in the case where the data
are homogeneously-typed.
+Now :meth:`DataFrame.pivot` method also supports multiple columns as indexes.
+
+.. ipython:: python
+
+ df1 = pd.DataFrame({'variable1': ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'B'],
+ 'variable2': ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b'],
+ 'variable3': ['C', 'D', 'C', 'D', 'C', 'D', 'C', 'D'],
+ 'value': np.arange(8)})
+ df1
+
+ df1.pivot(index=['variable1', 'variable2'], columns='variable3',
+ values='value')
+
.. note::
:func:`~pandas.pivot` will error with a ``ValueError: Index contains duplicate
entries, cannot reshape`` if the index/column pair is not unique. In this
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 2ed2c21ba5584..8bbd972fa9ed0 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -28,6 +28,7 @@ Other Enhancements
- Indexing of ``DataFrame`` and ``Series`` now accepts zerodim ``np.ndarray`` (:issue:`24919`)
- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`)
- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`)
+- :meth:`DataFrame.pivot` now supports multiple column indexes by accepting a list of columns (:issue:`21425`)
- ``Series.str`` has gained :meth:`Series.str.casefold` method to removes all case distinctions present in a string (:issue:`25405`)
- :meth:`DataFrame.set_index` now works for instances of ``abc.Iterator``, provided their output is of the same length as the calling frame (:issue:`22484`, :issue:`24984`)
- :meth:`DatetimeIndex.union` now supports the ``sort`` argument. The behaviour of the sort parameter matches that of :meth:`Index.union` (:issue:`24994`)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 8d7616c4b6b61..843193a85ed7d 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -368,18 +368,34 @@ def _convert_by(by):
@Appender(_shared_docs['pivot'], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
- cols = [columns] if index is None else [index, columns]
+ # Make acceptable for multiple column indexes.
+ # cols = []
+ if is_list_like(index):
+ cols = index # cols.extend(index)
+ elif index is not None:
+ cols = [index]
+ else:
+ cols = []
+ cols.append(columns)
+
append = index is None
indexed = data.set_index(cols, append=append)
+
else:
if index is None:
- index = data.index
+ index = MultiIndex.from_arrays([data.index, data[columns]])
+ elif is_list_like(index):
+ # Iterating through the list of multiple columns of an index.
+ indexes = [data[column] for column in index]
+ indexes.append(data[columns])
+ index = MultiIndex.from_arrays(indexes)
else:
+ # Build multi-indexes if index is not None and not a list.
index = data[index]
- index = MultiIndex.from_arrays([index, data[columns]])
+ index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
- # Exclude tuple because it is seen as a single column name
+ # Exclude tuple because it is seen as a single column name.
indexed = data._constructor(data[values].values, index=index,
columns=values)
else:
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index e4fbb204af533..a897f6465c811 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -301,6 +301,33 @@ def test_pivot_multi_functions(self):
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize('method', [True, False])
+ def test_pivot_multiple_columns_as_index(self, method):
+ # adding the test case for multiple columns as index (#21425)
+ df = DataFrame({'lev1': [1, 1, 1, 1, 2, 2, 2, 2],
+ 'lev2': [1, 1, 2, 2, 1, 1, 2, 2],
+ 'lev3': [1, 2, 1, 2, 1, 2, 1, 2],
+ 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
+ data = [[0, 1], [2, 3], [4, 5], [6, 7]]
+ exp_index = pd.MultiIndex.from_product([[1, 2], [1, 2]],
+ names=['lev1', 'lev2'])
+ if method:
+ result = df.pivot(index=['lev1', 'lev2'],
+ columns='lev3',
+ values='values')
+ exp_columns = Index([1, 2], name='lev3')
+
+ else:
+ result = df.pivot(index=['lev1', 'lev2'],
+ columns='lev3')
+ exp_columns = MultiIndex(levels=[['values'], [1, 2]],
+ codes=[[0, 0], [0, 1]],
+ names=[None, 'lev3'])
+
+ expected = DataFrame(data=data, index=exp_index,
+ columns=exp_columns)
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
| - [ ] closes #21425. Related to #21425
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25330 | 2019-02-15T04:47:24Z | 2019-07-11T16:10:00Z | null | 2019-07-11T16:10:00Z |
REGR: fix TimedeltaIndex sum and datetime subtraction with NaT (#25282, #25317) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f528c058d2868..a7e522d27f8e2 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -26,6 +26,8 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
+- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the `Series` on the right contains null values (:issue:`25317`)
+- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`)
- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index cd8e8ed520ddc..75cf658423210 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -720,11 +720,11 @@ def _sub_datetime_arraylike(self, other):
self_i8 = self.asi8
other_i8 = other.asi8
+ arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8,
- arr_mask=self._isnan)
+ arr_mask=arr_mask)
if self._hasnans or other._hasnans:
- mask = (self._isnan) | (other._isnan)
- new_values[mask] = iNaT
+ new_values[arr_mask] = iNaT
return new_values.view('timedelta64[ns]')
def _add_offset(self, offset):
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 06e2bf76fcf96..74fe8072e6924 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -190,6 +190,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
"ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
+ if values.ndim != 1:
+ raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == 'i8':
# for compat with datetime/timedelta/period shared methods,
@@ -945,6 +947,9 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
.format(dtype=data.dtype))
data = np.array(data, copy=copy)
+ if data.ndim != 1:
+ raise ValueError("Only 1-dimensional input arrays are supported.")
+
assert data.dtype == 'm8[ns]', data
return data, inferred_freq
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f2c8ac6e9b413..b5f3c929a7f36 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -665,7 +665,8 @@ def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc.
"""
- if is_bool_dtype(result):
+ result = lib.item_from_zerodim(result)
+ if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 405dc0805a285..c81a371f37dc1 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1440,6 +1440,20 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture,
class TestDatetime64OverflowHandling(object):
# TODO: box + de-duplicate
+ def test_dt64_overflow_masking(self, box_with_array):
+ # GH#25317
+ left = Series([Timestamp('1969-12-31')])
+ right = Series([NaT])
+
+ left = tm.box_expected(left, box_with_array)
+ right = tm.box_expected(right, box_with_array)
+
+ expected = TimedeltaIndex([NaT])
+ expected = tm.box_expected(expected, box_with_array)
+
+ result = left - right
+ tm.assert_equal(result, expected)
+
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp('1700-01-31')
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 6b4662ca02e80..1fec533a14a6f 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -9,6 +9,18 @@
class TestTimedeltaArrayConstructor(object):
+ def test_only_1dim_accepted(self):
+ # GH#25282
+ arr = np.array([0, 1, 2, 3], dtype='m8[h]').astype('m8[ns]')
+
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
+ # 2-dim
+ TimedeltaArray(arr.reshape(2, 2))
+
+ with pytest.raises(ValueError, match="Only 1-dimensional"):
+ # 0-dim
+ TimedeltaArray(arr[[0]].squeeze())
+
def test_freq_validation(self):
# ensure that the public constructor cannot create an invalid instance
arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9
@@ -51,6 +63,16 @@ def test_copy(self):
class TestTimedeltaArray(object):
+ def test_np_sum(self):
+ # GH#25282
+ vals = np.arange(5, dtype=np.int64).view('m8[h]').astype('m8[ns]')
+ arr = TimedeltaArray(vals)
+ result = np.sum(arr)
+ assert result == vals.sum()
+
+ result = np.sum(pd.TimedeltaIndex(arr))
+ assert result == vals.sum()
+
def test_from_sequence_dtype(self):
msg = "dtype .*object.* cannot be converted to timedelta64"
with pytest.raises(ValueError, match=msg):
| closes #25282
closes #25317
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25329 | 2019-02-15T02:53:40Z | 2019-02-20T10:52:02Z | 2019-02-20T10:52:02Z | 2019-02-20T14:39:53Z |
DOC: Improvement docstring of DataFrame.rank() | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e2308836d982a..2e534d25bcfca 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8286,34 +8286,85 @@ def last(self, offset):
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
"""
- Compute numerical data ranks (1 through n) along axis. Equal values are
- assigned a rank that is the average of the ranks of those values.
+ Compute numerical data ranks (1 through n) along axis.
+
+ By default, equal values are assigned a rank that is the average of the
+ ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
- index to direct ranking
- method : {'average', 'min', 'max', 'first', 'dense'}
- * average: average rank of group
- * min: lowest rank in group
- * max: highest rank in group
+ Index to direct ranking.
+ method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
+ How to rank the group of records that have the same value
+ (i.e. ties):
+
+ * average: average rank of the group
+ * min: lowest rank in the group
+ * max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
- numeric_only : boolean, default None
- Include only float, int, boolean data. Valid only for DataFrame or
- Panel objects
- na_option : {'keep', 'top', 'bottom'}
- * keep: leave NA values where they are
- * top: smallest rank if ascending
- * bottom: smallest rank if descending
- ascending : boolean, default True
- False for ranks by high (1) to low (N)
- pct : boolean, default False
- Computes percentage rank of data
+ numeric_only : bool, optional
+ For DataFrame objects, rank only numeric columns if set to True.
+ na_option : {'keep', 'top', 'bottom'}, default 'keep'
+ How to rank NaN values:
+
+ * keep: assign NaN rank to NaN values
+ * top: assign smallest rank to NaN values if ascending
+ * bottom: assign highest rank to NaN values if ascending
+ ascending : bool, default True
+ Whether or not the elements should be ranked in ascending order.
+ pct : bool, default False
+ Whether or not to display the returned rankings in percentile
+ form.
Returns
-------
- ranks : same type as caller
+ same type as caller
+ Return a Series or DataFrame with data ranks as values.
+
+ See Also
+ --------
+ core.groupby.GroupBy.rank : Rank of values within each group.
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
+ ... 'spider', 'snake'],
+ ... 'Number_legs': [4, 2, 4, 8, np.nan]})
+ >>> df
+ Animal Number_legs
+ 0 cat 4.0
+ 1 penguin 2.0
+ 2 dog 4.0
+ 3 spider 8.0
+ 4 snake NaN
+
+ The following example shows how the method behaves with the above
+ parameters:
+
+ * default_rank: this is the default behaviour obtained without using
+ any parameter.
+ * max_rank: setting ``method = 'max'`` the records that have the
+ same values are ranked using the highest rank (e.g.: since 'cat'
+ and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
+ * NA_bottom: choosing ``na_option = 'bottom'``, if there are records
+ with NaN values they are placed at the bottom of the ranking.
+ * pct_rank: when setting ``pct = True``, the ranking is expressed as
+ percentile rank.
+
+ >>> df['default_rank'] = df['Number_legs'].rank()
+ >>> df['max_rank'] = df['Number_legs'].rank(method='max')
+ >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
+ >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
+ >>> df
+ Animal Number_legs default_rank max_rank NA_bottom pct_rank
+ 0 cat 4.0 2.5 3.0 2.5 0.625
+ 1 penguin 2.0 1.0 1.0 1.0 0.250
+ 2 dog 4.0 2.5 3.0 2.5 0.625
+ 3 spider 8.0 4.0 4.0 4.0 1.000
+ 4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
| - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
I have made the changes requested in the pull request #23263
This is the output of the docstring validation:
```
3 Errors found:
Parameter "method" description should finish with "."
Parameter "na_option" description should finish with "."
The first line of the Returns section should contain only the type, unless multiple values are being returned
```
In the previous pull request I have been told that I could ignore these 3 errors.
Please let me know if I can further improve the docstring.
Thanks a lot | https://api.github.com/repos/pandas-dev/pandas/pulls/25328 | 2019-02-15T00:51:58Z | 2019-05-06T18:56:50Z | 2019-05-06T18:56:50Z | 2019-05-06T18:57:01Z |
#14873: test for groupby.agg coercing booleans | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 9de8a08809009..0c2e74c0b735f 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -286,3 +286,20 @@ def test_multi_function_flexible_mix(df):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_agg_coercing_bools():
+ # issue 14873
+ dat = pd.DataFrame(
+ {'a': [1, 1, 2, 2], 'b': [0, 1, 2, 3], 'c': [None, None, 1, 1]})
+ gp = dat.groupby('a')
+
+ index = Index([1, 2], name='a')
+
+ result = gp['b'].aggregate(lambda x: (x != 0).all())
+ expected = Series([False, True], index=index, name='b')
+ tm.assert_series_equal(result, expected)
+
+ result = gp['c'].aggregate(lambda x: x.isnull().all())
+ expected = Series([True, False], index=index, name='c')
+ tm.assert_series_equal(result, expected)
| - [x] closes #14873
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The referenced issue seems to be solved in the current master. So added a test for the edge case. | https://api.github.com/repos/pandas-dev/pandas/pulls/25327 | 2019-02-14T20:17:59Z | 2019-02-19T14:11:42Z | 2019-02-19T14:11:42Z | 2019-02-19T14:35:41Z |
Backport PR #25323 on branch 0.24.x (Skipped broken Py2 / Windows test) | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index e22b9a0ef25e3..92b4e5a99041a 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -9,7 +9,7 @@
import numpy as np # noqa
import pytest
-from pandas.compat import PY36
+from pandas.compat import PY2, PY36, is_platform_windows
from pandas import DataFrame
from pandas.util import testing as tm
@@ -58,6 +58,8 @@ def test_xarray(df):
assert df.to_xarray() is not None
+@pytest.mark.skipif(is_platform_windows() and PY2,
+ reason="Broken on Windows / Py2")
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
| Backport PR #25323: Skipped broken Py2 / Windows test | https://api.github.com/repos/pandas-dev/pandas/pulls/25325 | 2019-02-14T17:28:21Z | 2019-02-15T09:20:58Z | 2019-02-15T09:20:58Z | 2019-02-15T09:20:58Z |
Skipped broken Py2 / Windows test | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index e22b9a0ef25e3..92b4e5a99041a 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -9,7 +9,7 @@
import numpy as np # noqa
import pytest
-from pandas.compat import PY36
+from pandas.compat import PY2, PY36, is_platform_windows
from pandas import DataFrame
from pandas.util import testing as tm
@@ -58,6 +58,8 @@ def test_xarray(df):
assert df.to_xarray() is not None
+@pytest.mark.skipif(is_platform_windows() and PY2,
+ reason="Broken on Windows / Py2")
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
| quick workaround for #25293
@jreback | https://api.github.com/repos/pandas-dev/pandas/pulls/25323 | 2019-02-14T16:51:27Z | 2019-02-14T17:27:28Z | 2019-02-14T17:27:28Z | 2019-02-14T17:29:33Z |
fix the function find_common_types bug | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index f6561948df99a..6c4a801155899 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1075,7 +1075,7 @@ def find_common_type(types):
Parameters
----------
- types : list of dtypes
+ types : list_like
Returns
-------
@@ -1090,7 +1090,7 @@ def find_common_type(types):
if len(types) == 0:
raise ValueError('no types given')
- first = types[0]
+ first = [t for t in types][0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
| ` types[0]` can raise a KeyError when `types` is a `pd.Series` . see issue #25270
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25320 | 2019-02-14T13:25:32Z | 2019-03-20T02:05:31Z | null | 2019-03-20T02:05:31Z |
... | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c63bc5164e25b..921be7a63176d 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1171,10 +1171,18 @@ def std(self, ddof=1, *args, **kwargs):
ddof : integer, default 1
degrees of freedom
"""
-
- # TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
- return np.sqrt(self.var(ddof=ddof, **kwargs))
+ if ddof == 1:
+ try:
+ return self._cython_agg_general('std', **kwargs)
+ except Exception:
+ f = lambda x: x.std(ddof=ddof, **kwargs)
+ with _group_selection_context(self):
+ return self._python_agg_general(f)
+ else:
+ f = lambda x: x.std(ddof=ddof, **kwargs)
+ with _group_selection_context(self):
+ return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_common_see_also)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 1ae8efd2f6867..630ac883b40ca 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1698,3 +1698,35 @@ def test_groupby_agg_ohlc_non_first():
result = df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc'])
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_std_with_as_index_false():
+ # https://github.com/pandas-dev/pandas/issues/16799
+
+ df = pd.DataFrame({
+ "A": ["a", "b", "a", "b"],
+ "B": ["A", "B", "A", "B"],
+ "X": [1, 2, 3, 4]
+ })
+
+ group_var = df.groupby(
+ ["A", "B"],
+ as_index=False,
+ ).var()
+
+ # A B X
+ # 0 a A 2
+ # 1 b B 2
+
+ group_std = df.groupby(
+ ["A", "B"],
+ as_index=False,
+ ).std()
+
+ # A B X
+ # 0 a A 1.414214
+ # 1 b B 1.414214
+
+ assert_series_equal(
+ np.sqrt(group_var["X"]),
+ group_std["X"])
| ... | https://api.github.com/repos/pandas-dev/pandas/pulls/25315 | 2019-02-14T05:05:55Z | 2019-02-15T10:31:33Z | null | 2019-02-15T17:38:52Z |
#9236: test for the DataFrame.groupby with MultiIndex having pd.NaT | diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 1ae8efd2f6867..12a5d494648fc 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1698,3 +1698,19 @@ def test_groupby_agg_ohlc_non_first():
result = df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc'])
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_multiindex_nat():
+ # GH 9236
+ values = [
+ (pd.NaT, 'a'),
+ (datetime(2012, 1, 2), 'a'),
+ (datetime(2012, 1, 2), 'b'),
+ (datetime(2012, 1, 3), 'a')
+ ]
+ mi = pd.MultiIndex.from_tuples(values, names=['date', None])
+ ser = pd.Series([3, 2, 2.5, 4], index=mi)
+
+ result = ser.groupby(level=1).mean()
+ expected = pd.Series([3., 2.5], index=["a", "b"])
+ assert_series_equal(result, expected)
| - [x] closes #9236
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The referenced issue seems to be solved in the current master. So added a test for the edge case. | https://api.github.com/repos/pandas-dev/pandas/pulls/25310 | 2019-02-13T22:10:35Z | 2019-02-19T13:32:42Z | 2019-02-19T13:32:42Z | 2019-02-19T18:55:20Z |
Rt05 documentation error fix issue 25108 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index eacab199cc0be..ac6aade106ce6 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -241,8 +241,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, PR10, EX04, RT04, SS05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,SS05,SA05
+ MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, PR10, EX04, RT04, RT05, SS05, SA05)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR04,PR05,EX04,RT04,RT05,SS05,SA05
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 77681f6ac3f93..c5c8f47ad6dba 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -291,7 +291,7 @@ def unique(values):
unique values.
If the input is an Index, the return is an Index
If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
+ If the input is a Series/ndarray, the return will be an ndarray.
See Also
--------
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index ab58f86e0a6bc..d7d0882bbcc94 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1292,7 +1292,7 @@ def __array__(self, dtype=None):
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
- categorical.categories.dtype
+ categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 73e799f9e0a36..84536ac72a455 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -154,7 +154,7 @@ def strftime(self, date_format):
Returns
-------
Index
- Index of formatted strings
+ Index of formatted strings.
See Also
--------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7ac18b79daba1..cf97c94f6d129 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2743,7 +2743,7 @@ def set_value(self, index, col, value, takeable=False):
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
- otherwise a new object
+ otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1a404630b660e..e2308836d982a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4953,7 +4953,7 @@ def pipe(self, func, *args, **kwargs):
If DataFrame.agg is called with a single function, returns a Series
If DataFrame.agg is called with several functions, returns a DataFrame
If Series.agg is called with single function, returns a scalar
- If Series.agg is called with several functions, returns a Series
+ If Series.agg is called with several functions, returns a Series.
%(see_also)s
@@ -5349,7 +5349,7 @@ def get_values(self):
Returns
-------
numpy.ndarray
- Numpy representation of DataFrame
+ Numpy representation of DataFrame.
See Also
--------
@@ -5428,7 +5428,7 @@ def get_ftype_counts(self):
-------
dtype : Series
Series with the count of columns with each type and
- sparsity (dense/sparse)
+ sparsity (dense/sparse).
See Also
--------
@@ -6657,7 +6657,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
- some or all ``NaN`` values
+ some or all ``NaN`` values.
See Also
--------
@@ -6877,11 +6877,11 @@ def asof(self, where, subset=None):
-------
scalar, Series, or DataFrame
- Scalar : when `self` is a Series and `where` is a scalar
+ Scalar : when `self` is a Series and `where` is a scalar.
Series: when `self` is a Series and `where` is an array-like,
- or when `self` is a DataFrame and `where` is a scalar
+ or when `self` is a DataFrame and `where` is a scalar.
DataFrame : when `self` is a DataFrame and `where` is an
- array-like
+ array-like.
See Also
--------
@@ -7235,7 +7235,7 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False,
-------
Series or DataFrame
Same type as calling object with the values outside the
- clip boundaries replaced
+ clip boundaries replaced.
Examples
--------
@@ -8386,7 +8386,7 @@ def ranker(data):
Returns
-------
(left, right) : (%(klass)s, type of other)
- Aligned objects
+ Aligned objects.
""")
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index cf813f4c3030b..f2c8ac6e9b413 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1828,7 +1828,7 @@ def isna(self):
Returns
-------
numpy.ndarray
- A boolean array of whether my values are NA
+ A boolean array of whether my values are NA.
See Also
--------
@@ -3098,9 +3098,9 @@ def reindex(self, target, method=None, level=None, limit=None,
Returns
-------
new_index : pd.Index
- Resulting index
+ Resulting index.
indexer : np.ndarray or None
- Indices of output values in original index
+ Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
@@ -4259,7 +4259,7 @@ def shift(self, periods=1, freq=None):
Returns
-------
pandas.Index
- Shifted index
+ Shifted index.
See Also
--------
@@ -4422,7 +4422,7 @@ def set_value(self, arr, key, value):
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
- These correspond to the -1 in the indexer array
+ These correspond to the -1 in the indexer array.
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e2237afbcac0f..efb77b5d155a1 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1956,7 +1956,7 @@ def swaplevel(self, i=-2, j=-1):
Returns
-------
MultiIndex
- A new MultiIndex
+ A new MultiIndex.
.. versionchanged:: 0.18.1
@@ -2053,9 +2053,9 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
Returns
-------
sorted_index : pd.MultiIndex
- Resulting index
+ Resulting index.
indexer : np.ndarray
- Indices of output values in original index
+ Indices of output values in original index.
"""
from pandas.core.sorting import indexer_from_factorized
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index dda5533f1ea7b..1555542079d80 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -540,7 +540,7 @@ def set_value(self, *args, **kwargs):
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
- otherwise a new object
+ otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
@@ -803,7 +803,7 @@ def major_xs(self, key):
Returns
-------
y : DataFrame
- Index -> minor axis, columns -> items
+ Index -> minor axis, columns -> items.
Notes
-----
@@ -827,7 +827,7 @@ def minor_xs(self, key):
Returns
-------
y : DataFrame
- Index -> major axis, columns -> items
+ Index -> major axis, columns -> items.
Notes
-----
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 312a108ad3380..0fa80de812c5f 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -230,7 +230,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
- (i, j)
+ (i, j).
Notes
-----
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b2011fdcdee98..31c6247436418 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1215,7 +1215,7 @@ def set_value(self, label, value, takeable=False):
-------
Series
If label is contained, will be reference to calling Series,
- otherwise a new object
+ otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
@@ -1648,10 +1648,19 @@ def unique(self):
Returns
-------
ndarray or ExtensionArray
- The unique values returned as a NumPy array. In case of an
- extension-array backed Series, a new
- :class:`~api.extensions.ExtensionArray` of that type with just
- the unique values is returned. This includes
+ The unique values returned as a NumPy array. See Notes.
+
+ See Also
+ --------
+ unique : Top-level unique method for any 1-d array-like object.
+ Index.unique : Return Index with unique values from an Index object.
+
+ Notes
+ -----
+ Returns the unique values as a NumPy array. In case of an
+ extension-array backed Series, a new
+ :class:`~api.extensions.ExtensionArray` of that type with just
+ the unique values is returned. This includes
* Categorical
* Period
@@ -1660,11 +1669,6 @@ def unique(self):
* Sparse
* IntegerNA
- See Also
- --------
- unique : Top-level unique method for any 1-d array-like object.
- Index.unique : Return Index with unique values from an Index object.
-
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index b8a7eb5b0c570..08ce649d8602c 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -59,7 +59,7 @@ def to_numeric(arg, errors='raise', downcast=None):
Returns
-------
ret : numeric if parsing succeeded.
- Return type depends on input. Series if Series, otherwise ndarray
+ Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
diff --git a/pandas/core/window.py b/pandas/core/window.py
index fb37d790f950c..9e29fdb94c1e0 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -1271,7 +1271,7 @@ def skew(self, **kwargs):
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
- calculation
+ calculation.
See Also
--------
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index cd6e3505d71db..c8b5dc6b9b7c0 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -435,7 +435,7 @@ def render(self, **kwargs):
Returns
-------
rendered : str
- The rendered HTML
+ The rendered HTML.
Notes
-----
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index a525b9cff1182..2c672f235f1e1 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2050,9 +2050,17 @@ def plot_series(data, kind='line', ax=None, # Series unique
Returns
-------
- result :
+ result
+ See Notes.
- The return type depends on the `return_type` parameter:
+ See Also
+ --------
+ Series.plot.hist: Make a histogram.
+ matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
+
+ Notes
+ -----
+ The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
@@ -2063,13 +2071,6 @@ def plot_series(data, kind='line', ax=None, # Series unique
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
- See Also
- --------
- Series.plot.hist: Make a histogram.
- matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
-
- Notes
- -----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
@@ -3332,7 +3333,7 @@ def area(self, x=None, y=None, **kwds):
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
- Area plot, or array of area plots if subplots is True
+ Area plot, or array of area plots if subplots is True.
See Also
--------
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 62a33245f99ef..21592a5b4a0a1 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -390,7 +390,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
Returns
-------
fig : matplotlib.figure.Figure
- Matplotlib figure
+ Matplotlib figure.
See Also
--------
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index f591b24f5b648..4802447cbc99d 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -68,7 +68,7 @@ def to_offset(freq):
Returns
-------
delta : DateOffset
- None if freq is None
+ None if freq is None.
Raises
------
| - [x] closes #25108
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] updates CI script to check for future testing automation | https://api.github.com/repos/pandas-dev/pandas/pulls/25309 | 2019-02-13T20:41:40Z | 2019-02-14T19:18:09Z | 2019-02-14T19:18:05Z | 2019-02-14T20:37:45Z |
BUG: Groupby.agg with reduction function with tz aware data | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index a6f7395f5177e..f847c1d827186 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -229,8 +229,8 @@ Groupby/Resample/Rolling
- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`)
- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`)
--
--
+- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`)
+- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`)
Reshaping
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 507567cf480d7..517d59c399179 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -342,7 +342,9 @@ cdef class SeriesGrouper:
index = None
else:
values = dummy.values
- if dummy.dtype != self.arr.dtype:
+ # GH 23683: datetimetz types are equivalent to datetime types here
+ if (dummy.dtype != self.arr.dtype
+ and values.dtype != self.arr.dtype):
raise ValueError('Dummy array must be same dtype')
if not values.flags.contiguous:
values = values.copy()
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c364f069bf53d..926da40deaff2 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -26,7 +26,8 @@ class providing the base-class of operations.
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
- ensure_float, is_extension_array_dtype, is_numeric_dtype, is_scalar)
+ ensure_float, is_datetime64tz_dtype, is_extension_array_dtype,
+ is_numeric_dtype, is_scalar)
from pandas.core.dtypes.missing import isna, notna
from pandas.api.types import (
@@ -766,7 +767,21 @@ def _try_cast(self, result, obj, numeric_only=False):
dtype = obj.dtype
if not is_scalar(result):
- if is_extension_array_dtype(dtype):
+ if is_datetime64tz_dtype(dtype):
+ # GH 23683
+ # Prior results _may_ have been generated in UTC.
+ # Ensure we localize to UTC first before converting
+ # to the target timezone
+ try:
+ result = obj._values._from_sequence(
+ result, dtype='datetime64[ns, UTC]'
+ )
+ result = result.astype(dtype)
+ except TypeError:
+ # _try_cast was called at a point where the result
+ # was already tz-aware
+ pass
+ elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index b5214b11bddcc..cacfdb7694de1 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -512,3 +512,18 @@ def test_agg_list_like_func():
expected = pd.DataFrame({'A': [str(x) for x in range(3)],
'B': [[str(x)] for x in range(3)]})
tm.assert_frame_equal(result, expected)
+
+
+def test_agg_lambda_with_timezone():
+ # GH 23683
+ df = pd.DataFrame({
+ 'tag': [1, 1],
+ 'date': [
+ pd.Timestamp('2018-01-01', tz='UTC'),
+ pd.Timestamp('2018-01-02', tz='UTC')]
+ })
+ result = df.groupby('tag').agg({'date': lambda e: e.head(1)})
+ expected = pd.DataFrame([pd.Timestamp('2018-01-01', tz='UTC')],
+ index=pd.Index([1], name='tag'),
+ columns=['date'])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 255d9a8acf2d0..7a3d189d3020e 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -278,6 +278,26 @@ def test_first_last_tz(data, expected_first, expected_last):
assert_frame_equal(result, expected[['id', 'time']])
+@pytest.mark.parametrize('method, ts, alpha', [
+ ['first', Timestamp('2013-01-01', tz='US/Eastern'), 'a'],
+ ['last', Timestamp('2013-01-02', tz='US/Eastern'), 'b']
+])
+def test_first_last_tz_multi_column(method, ts, alpha):
+ # GH 21603
+ df = pd.DataFrame({'group': [1, 1, 2],
+ 'category_string': pd.Series(list('abc')).astype(
+ 'category'),
+ 'datetimetz': pd.date_range('20130101', periods=3,
+ tz='US/Eastern')})
+ result = getattr(df.groupby('group'), method)()
+ expepcted = pd.DataFrame({'category_string': [alpha, 'c'],
+ 'datetimetz': [ts,
+ Timestamp('2013-01-03',
+ tz='US/Eastern')]},
+ index=pd.Index([1, 2], name='group'))
+ assert_frame_equal(result, expepcted)
+
+
def test_nth_multi_index_as_expected():
# PR 9090, related to issue 8979
# test nth on MultiIndex
| - [x] closes #23683
- [x] closes #21603
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25308 | 2019-02-13T19:55:07Z | 2019-02-28T17:38:48Z | 2019-02-28T17:38:48Z | 2019-02-28T17:39:01Z |
DOC: fix misspellings | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 2c2e5c5425216..e4dd82afcdf65 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -1317,7 +1317,7 @@ arbitrary function, for example:
df.groupby(['Store', 'Product']).pipe(mean)
where ``mean`` takes a GroupBy object and finds the mean of the Revenue and Quantity
-columns repectively for each Store-Product combination. The ``mean`` function can
+columns respectively for each Store-Product combination. The ``mean`` function can
be any function that takes in a GroupBy object; the ``.pipe`` will pass the GroupBy
object as a parameter into the function you specify.
diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index bc2a4918bc27b..2d6550bb6888d 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -370,7 +370,7 @@ Updated PyTables Support
df1.get_dtype_counts()
- performance improvements on table writing
-- support for arbitrarly indexed dimensions
+- support for arbitrarily indexed dimensions
- ``SparseSeries`` now has a ``density`` property (:issue:`2384`)
- enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument
to strip arbitrary characters (:issue:`2411`)
diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst
index 7621cb9c1e27c..cbcb23e356577 100644
--- a/doc/source/whatsnew/v0.16.1.rst
+++ b/doc/source/whatsnew/v0.16.1.rst
@@ -136,7 +136,7 @@ groupby operations on the index will preserve the index nature as well
reindexing operations, will return a resulting index based on the type of the passed
indexer, meaning that passing a list will return a plain-old-``Index``; indexing with
a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories
-of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with
+of the PASSED ``Categorical`` dtype. This allows one to arbitrarily index these even with
values NOT in the categories, similarly to how you can reindex ANY pandas index.
.. code-block:: ipython
| Signed-off-by: Takuya Noguchi <takninnovationresearch@gmail.com> | https://api.github.com/repos/pandas-dev/pandas/pulls/25305 | 2019-02-13T14:37:24Z | 2019-02-15T04:07:55Z | 2019-02-15T04:07:55Z | 2019-02-15T04:07:59Z |
BUG: Fix passing of numeric_only argument for categorical reduce | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f17c4974cd450..089da5f13c73f 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -25,6 +25,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
+- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index d7d0882bbcc94..6b977e34ae7f8 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2172,7 +2172,7 @@ def _reverse_indexer(self):
return result
# reduction ops #
- def _reduce(self, name, axis=0, skipna=True, **kwargs):
+ def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 31c6247436418..a5dfe8d43c336 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3678,8 +3678,12 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
if axis is not None:
self._get_axis_number(axis)
- # dispatch to ExtensionArray interface
- if isinstance(delegate, ExtensionArray):
+ if isinstance(delegate, Categorical):
+ # TODO deprecate numeric_only argument for Categorical and use
+ # skipna as well, see GH25303
+ return delegate._reduce(name, numeric_only=numeric_only, **kwds)
+ elif isinstance(delegate, ExtensionArray):
+ # dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
elif is_datetime64_dtype(delegate):
# use DatetimeIndex implementation to handle skipna correctly
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 173f719edd465..8520855d14918 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -960,6 +960,27 @@ def test_min_max(self):
assert np.isnan(_min)
assert _max == 1
+ def test_min_max_numeric_only(self):
+ # TODO deprecate numeric_only argument for Categorical and use
+ # skipna as well, see GH25303
+ cat = Series(Categorical(
+ ["a", "b", np.nan, "a"], categories=['b', 'a'], ordered=True))
+
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == "a"
+
+ _min = cat.min(numeric_only=True)
+ _max = cat.max(numeric_only=True)
+ assert _min == "b"
+ assert _max == "a"
+
+ _min = cat.min(numeric_only=False)
+ _max = cat.max(numeric_only=False)
+ assert np.isnan(_min)
+ assert _max == "a"
+
class TestSeriesMode(object):
# Note: the name TestSeriesMode indicates these tests
| See https://github.com/pandas-dev/pandas/issues/25303 and https://github.com/pandas-dev/pandas/issues/25299
- [x] closes https://github.com/pandas-dev/pandas/issues/25299
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25304 | 2019-02-13T14:10:56Z | 2019-02-16T18:49:51Z | 2019-02-16T18:49:51Z | 2019-02-16T18:49:58Z |
BUG: OverflowError in resample.agg with tz data | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 95362521f3b9f..286d267f024a1 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -172,7 +172,7 @@ Plotting
Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
--
+- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`)
-
-
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index df91c71cfe238..0e3dae61561c1 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1010,7 +1010,7 @@ def get_loc(self, key, method=None, tolerance=None):
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
- except (TypeError, KeyError, ValueError):
+ except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 69acf4ba6bde8..97f1e07380ef9 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -549,3 +549,25 @@ def test_selection_api_validation():
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
+
+
+@pytest.mark.parametrize('col_name', ['t2', 't2x', 't2q', 'T_2M',
+ 't2p', 't2m', 't2m1', 'T2M'])
+def test_agg_with_datetime_index_list_agg_func(col_name):
+ # GH 22660
+ # The parametrized column names would get converted to dates by our
+ # date parser. Some would result in OutOfBoundsError (ValueError) while
+ # others would result in OverflowError when passed into Timestamp.
+ # We catch these errors and move on to the correct branch.
+ df = pd.DataFrame(list(range(200)),
+ index=pd.date_range(start='2017-01-01', freq='15min',
+ periods=200, tz='Europe/Berlin'),
+ columns=[col_name])
+ result = df.resample('1d').aggregate(['mean'])
+ expected = pd.DataFrame([47.5, 143.5, 195.5],
+ index=pd.date_range(start='2017-01-01', freq='D',
+ periods=3, tz='Europe/Berlin'),
+ columns=pd.MultiIndex(levels=[[col_name],
+ ['mean']],
+ codes=[[0], [0]]))
+ assert_frame_equal(result, expected)
| - [x] closes #22660
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25297 | 2019-02-13T07:42:48Z | 2019-02-16T17:22:41Z | 2019-02-16T17:22:41Z | 2019-02-16T22:34:09Z |
DOC/CLN: Fix various docstring errors | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index d7d0882bbcc94..c2b024c9ae12e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -214,7 +214,7 @@ def contains(cat, key, container):
class Categorical(ExtensionArray, PandasObject):
"""
- Represent a categorical variable in classic R / S-plus fashion
+ Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
@@ -235,7 +235,7 @@ class Categorical(ExtensionArray, PandasObject):
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
- ordered : boolean, (default False)
+ ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
@@ -253,7 +253,7 @@ class Categorical(ExtensionArray, PandasObject):
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
- ordered : boolean
+ ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
@@ -297,7 +297,7 @@ class Categorical(ExtensionArray, PandasObject):
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
- >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
+ >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
@@ -618,7 +618,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
----------
codes : array-like, integers
An integer array, where each integer points to a category in
- categories or dtype.categories, or else is -1 for NaN
+ categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
@@ -700,7 +700,7 @@ def _set_categories(self, categories, fastpath=False):
Parameters
----------
- fastpath : boolean (default: False)
+ fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
@@ -747,15 +747,15 @@ def _set_dtype(self, dtype):
def set_ordered(self, value, inplace=False):
"""
- Set the ordered attribute to the boolean value
+ Set the ordered attribute to the boolean value.
Parameters
----------
- value : boolean to set whether this categorical is ordered (True) or
- not (False)
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to the value
+ value : bool
+ Set whether this categorical is ordered (True) or not (False).
+ inplace : bool, default False
+ Whether or not to set the ordered attribute in-place or return
+ a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
@@ -770,9 +770,9 @@ def as_ordered(self, inplace=False):
Parameters
----------
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to True
+ inplace : bool, default False
+ Whether or not to set the ordered attribute in-place or return
+ a copy of this categorical with ordered set to True.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
@@ -783,9 +783,9 @@ def as_unordered(self, inplace=False):
Parameters
----------
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to False
+ inplace : bool, default False
+ Whether or not to set the ordered attribute in-place or return
+ a copy of this categorical with ordered set to False.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
@@ -815,19 +815,19 @@ def set_categories(self, new_categories, ordered=None, rename=False,
----------
new_categories : Index-like
The categories in new order.
- ordered : boolean, (default: False)
+ ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
- rename : boolean (default: False)
+ rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
- inplace : boolean (default: False)
- Whether or not to reorder the categories inplace or return a copy of
- this categorical with reordered categories.
+ inplace : bool, default False
+ Whether or not to reorder the categories in-place or return a copy
+ of this categorical with reordered categories.
Returns
-------
- cat : Categorical with reordered categories or None if inplace.
+ Categorical with reordered categories or None if inplace.
Raises
------
@@ -890,7 +890,7 @@ def rename_categories(self, new_categories, inplace=False):
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
- inplace : boolean (default: False)
+ inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
@@ -967,10 +967,10 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
----------
new_categories : Index-like
The categories in new order.
- ordered : boolean, optional
+ ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
- inplace : boolean (default: False)
+ inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
@@ -1010,7 +1010,7 @@ def add_categories(self, new_categories, inplace=False):
----------
new_categories : category or list-like of category
The new categories to be included.
- inplace : boolean (default: False)
+ inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
@@ -1060,7 +1060,7 @@ def remove_categories(self, removals, inplace=False):
----------
removals : category or list of categories
The categories which should be removed.
- inplace : boolean (default: False)
+ inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
@@ -1108,7 +1108,7 @@ def remove_unused_categories(self, inplace=False):
Parameters
----------
- inplace : boolean (default: False)
+ inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
@@ -1460,7 +1460,7 @@ def value_counts(self, dropna=True):
Parameters
----------
- dropna : boolean, default True
+ dropna : bool, default True
Don't include counts of NaN.
Returns
@@ -1581,9 +1581,9 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
Parameters
----------
- inplace : boolean, default False
+ inplace : bool, default False
Do operation in place.
- ascending : boolean, default True
+ ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
@@ -2239,7 +2239,7 @@ def mode(self, dropna=True):
Parameters
----------
- dropna : boolean, default True
+ dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
@@ -2332,7 +2332,7 @@ def equals(self, other):
Returns
-------
- are_equal : boolean
+ bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
@@ -2356,7 +2356,7 @@ def is_dtype_equal(self, other):
Returns
-------
- are_equal : boolean
+ bool
"""
try:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 1b2a4da389dc4..cd8e8ed520ddc 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -799,14 +799,14 @@ def tz_convert(self, tz):
Parameters
----------
- tz : string, pytz.timezone, dateutil.tz.tzfile or None
+ tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
- normalized : same type as self
+ Array or Index
Raises
------
@@ -842,7 +842,7 @@ def tz_convert(self, tz):
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
- >>> dti = pd.date_range(start='2014-08-01 09:00',freq='H',
+ >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
@@ -882,7 +882,7 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
Parameters
----------
- tz : string, pytz.timezone, dateutil.tz.tzfile or None
+ tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
@@ -930,7 +930,7 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
Returns
-------
- result : same type as self
+ Same type as self
Array/Index converted to the specified time zone.
Raises
@@ -970,43 +970,39 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
- >>> s = pd.to_datetime(pd.Series([
- ... '2018-10-28 01:30:00',
- ... '2018-10-28 02:00:00',
- ... '2018-10-28 02:30:00',
- ... '2018-10-28 02:00:00',
- ... '2018-10-28 02:30:00',
- ... '2018-10-28 03:00:00',
- ... '2018-10-28 03:30:00']))
+ >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
+ ... '2018-10-28 02:00:00',
+ ... '2018-10-28 02:30:00',
+ ... '2018-10-28 02:00:00',
+ ... '2018-10-28 02:30:00',
+ ... '2018-10-28 03:00:00',
+ ... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
- 2018-10-28 01:30:00+02:00 0
- 2018-10-28 02:00:00+02:00 1
- 2018-10-28 02:30:00+02:00 2
- 2018-10-28 02:00:00+01:00 3
- 2018-10-28 02:30:00+01:00 4
- 2018-10-28 03:00:00+01:00 5
- 2018-10-28 03:30:00+01:00 6
- dtype: int64
+ 0 2018-10-28 01:30:00+02:00
+ 1 2018-10-28 02:00:00+02:00
+ 2 2018-10-28 02:30:00+02:00
+ 3 2018-10-28 02:00:00+01:00
+ 4 2018-10-28 02:30:00+01:00
+ 5 2018-10-28 03:00:00+01:00
+ 6 2018-10-28 03:30:00+01:00
+ dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
- >>> s = pd.to_datetime(pd.Series([
- ... '2018-10-28 01:20:00',
- ... '2018-10-28 02:36:00',
- ... '2018-10-28 03:46:00']))
+ >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
+ ... '2018-10-28 02:36:00',
+ ... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
- 0 2018-10-28 01:20:00+02:00
- 1 2018-10-28 02:36:00+02:00
- 2 2018-10-28 03:46:00+01:00
- dtype: datetime64[ns, CET]
+ 0 2015-03-29 03:00:00+02:00
+ 1 2015-03-29 03:30:00+02:00
+ dtype: datetime64[ns, Europe/Warsaw]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
- >>> s = pd.to_datetime(pd.Series([
- ... '2015-03-29 02:30:00',
- ... '2015-03-29 03:30:00']))
+ >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
+ ... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
@@ -1129,7 +1125,7 @@ def to_period(self, freq=None):
Parameters
----------
- freq : string or Offset, optional
+ freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
@@ -1150,7 +1146,7 @@ def to_period(self, freq=None):
Examples
--------
- >>> df = pd.DataFrame({"y": [1,2,3]},
+ >>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5a98e83c65884..7fdc64a8d9f85 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -794,7 +794,7 @@ def array(self):
Returns
-------
- array : ExtensionArray
+ ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
@@ -1022,7 +1022,7 @@ def max(self, axis=None, skipna=True):
def argmax(self, axis=None, skipna=True):
"""
- Return a ndarray of the maximum argument indexer.
+ Return an ndarray of the maximum argument indexer.
Parameters
----------
@@ -1087,6 +1087,10 @@ def argmin(self, axis=None, skipna=True):
Dummy argument for consistency with Series
skipna : bool, default True
+ Returns
+ -------
+ numpy.ndarray
+
See Also
--------
numpy.ndarray.argmin
@@ -1102,6 +1106,10 @@ def tolist(self):
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
+ Returns
+ -------
+ list
+
See Also
--------
numpy.ndarray.tolist
@@ -1162,7 +1170,7 @@ def _map_values(self, mapper, na_action=None):
Returns
-------
- applied : Union[Index, MultiIndex], inferred
+ Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
@@ -1246,7 +1254,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
Returns
-------
- counts : Series
+ Series
See Also
--------
@@ -1363,7 +1371,7 @@ def is_unique(self):
Returns
-------
- is_unique : boolean
+ bool
"""
return self.nunique(dropna=False) == len(self)
@@ -1377,7 +1385,7 @@ def is_monotonic(self):
Returns
-------
- is_monotonic : boolean
+ bool
"""
from pandas import Index
return Index(self).is_monotonic
@@ -1394,7 +1402,7 @@ def is_monotonic_decreasing(self):
Returns
-------
- is_monotonic_decreasing : boolean
+ bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cf97c94f6d129..a239ff4b4d5db 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6346,6 +6346,8 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
Returns
-------
Series or DataFrame
+ Result of applying ``func`` along the given axis of the
+ DataFrame.
See Also
--------
@@ -6364,7 +6366,7 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
Examples
--------
- >>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
+ >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b1fcbba7bd7ec..3a73861086bed 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5959,17 +5959,18 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
- each index (for a Series) or column (for a DataFrame). (values not
- in the dict/Series/DataFrame will not be filled). This value cannot
+ each index (for a Series) or column (for a DataFrame). Values not
+ in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap
+ backfill / bfill: use next valid observation to fill gap.
axis : %(axes_single_arg)s
- inplace : boolean, default False
- If True, fill in place. Note: this will modify any
- other views on this object, (e.g. a no-copy slice for a column in a
+ Axis along which to fill missing values.
+ inplace : bool, default False
+ If True, fill in-place. Note: this will modify any
+ other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
@@ -5979,18 +5980,20 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
- a dict of item->dtype of what to downcast if possible,
+ A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
- equal type (e.g. float64 to int64 if possible)
+ equal type (e.g. float64 to int64 if possible).
Returns
-------
- filled : %(klass)s
+ %(klass)s
+ Object with missing values filled.
See Also
--------
interpolate : Fill NaN values using interpolation.
- reindex, asfreq
+ reindex : Conform object to new index.
+ asfreq : Convert TimeSeries to specified frequency.
Examples
--------
@@ -5998,7 +6001,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
- ... columns=list('ABCD'))
+ ... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
@@ -6752,7 +6755,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry befofe it to use for interpolation.
- >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
+ >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
@@ -7221,9 +7224,9 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False,
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
- axis : int or string axis name, optional
+ axis : int or str axis name, optional
Align object with lower and upper along the given axis.
- inplace : boolean, default False
+ inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
@@ -7345,7 +7348,7 @@ def clip_upper(self, threshold, axis=None, inplace=False):
axis : {0 or 'index', 1 or 'columns'}, default 0
Align object with `threshold` along the given axis.
- inplace : boolean, default False
+ inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
@@ -7426,7 +7429,7 @@ def clip_lower(self, threshold, axis=None, inplace=False):
axis : {0 or 'index', 1 or 'columns'}, default 0
Align `self` with `threshold` along the given axis.
- inplace : boolean, default False
+ inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
@@ -7583,9 +7586,9 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
Examples
--------
- >>> df = pd.DataFrame({'Animal' : ['Falcon', 'Falcon',
- ... 'Parrot', 'Parrot'],
- ... 'Max Speed' : [380., 370., 24., 26.]})
+ >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
+ ... 'Parrot', 'Parrot'],
+ ... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
@@ -7606,8 +7609,8 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
- >>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]},
- ... index=index)
+ >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
+ ... index=index)
>>> df
Max Speed
Animal Type
@@ -7740,14 +7743,14 @@ def at_time(self, time, asof=False, axis=None):
Parameters
----------
- time : datetime.time or string
+ time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
- values_at_time : same type as caller
+ Series or DataFrame
Raises
------
@@ -7765,7 +7768,7 @@ def at_time(self, time, asof=False, axis=None):
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
- >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
@@ -7800,17 +7803,17 @@ def between_time(self, start_time, end_time, include_start=True,
Parameters
----------
- start_time : datetime.time or string
- end_time : datetime.time or string
- include_start : boolean, default True
- include_end : boolean, default True
+ start_time : datetime.time or str
+ end_time : datetime.time or str
+ include_start : bool, default True
+ include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
- values_between_time : same type as caller
+ Series or DataFrame
Raises
------
@@ -7828,7 +7831,7 @@ def between_time(self, start_time, end_time, include_start=True,
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
- >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 53671e00e88b4..a6c945ac2e464 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -38,15 +38,15 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
- they are all None in which case a ValueError will be raised
+ they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
- The axis to concatenate along
+ The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
- How to handle indexes on other axis(es)
+ How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
- inner/outer set logic
- ignore_index : boolean, default False
+ inner/outer set logic.
+ ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
@@ -54,16 +54,16 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
- hierarchical index using the passed keys as the outermost level
+ hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
- MultiIndex. Otherwise they will be inferred from the keys
+ MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
- Names for the levels in the resulting hierarchical index
- verify_integrity : boolean, default False
+ Names for the levels in the resulting hierarchical index.
+ verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
- be very expensive relative to the actual data concatenation
- sort : boolean, default None
+ be very expensive relative to the actual data concatenation.
+ sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
@@ -76,12 +76,12 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
.. versionadded:: 0.23.0
- copy : boolean, default True
- If False, do not copy data unnecessarily
+ copy : bool, default True
+ If False, do not copy data unnecessarily.
Returns
-------
- concatenated : object, type of objs
+ object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
@@ -89,10 +89,10 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
See Also
--------
- Series.append
- DataFrame.append
- DataFrame.join
- DataFrame.merge
+ Series.append : Concatenate Series.
+ DataFrame.append : Concatenate DataFrames.
+ DataFrame.join : Join DataFrames using indexes.
+ DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
@@ -128,7 +128,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
- >>> pd.concat([s1, s2], keys=['s1', 's2',])
+ >>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 54f11646fc753..8d7616c4b6b61 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -392,36 +392,36 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
- Compute a simple cross-tabulation of two (or more) factors. By default
+ Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
- aggregation function are passed
+ aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
- Values to group by in the rows
+ Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
- Values to group by in the columns
+ Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
- If passed, must match number of row arrays passed
+ If passed, must match number of row arrays passed.
colnames : sequence, default None
- If passed, must match number of column arrays passed
+ If passed, must match number of column arrays passed.
aggfunc : function, optional
- If specified, requires `values` be specified as well
- margins : boolean, default False
- Add row/column margins (subtotals)
- margins_name : string, default 'All'
- Name of the row / column that will contain the totals
+ If specified, requires `values` be specified as well.
+ margins : bool, default False
+ Add row/column margins (subtotals).
+ margins_name : str, default 'All'
+ Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
- dropna : boolean, default True
- Do not include columns whose entries are all NaN
- normalize : boolean, {'all', 'index', 'columns'}, or {0,1}, default False
+ dropna : bool, default True
+ Do not include columns whose entries are all NaN.
+ normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
@@ -433,7 +433,13 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None,
Returns
-------
- crosstab : DataFrame
+ DataFrame
+ Cross tabulation of the data.
+
+ See Also
+ --------
+ DataFrame.pivot : Reshape data based on column values.
+ pivot_table : Create a pivot table as a DataFrame.
Notes
-----
@@ -455,32 +461,26 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None,
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
- ... dtype=object)
-
+ ... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
- ... # doctest: +NORMALIZE_WHITESPACE
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
+ Here 'c' and 'f' are not represented in the data and will not be
+ shown in the output because dropna is True by default. Set
+ dropna=False to preserve categories with no data.
+
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
- >>> crosstab(foo, bar) # 'c' and 'f' are not represented in the data,
- # and will not be shown in the output because
- # dropna is True by default. Set 'dropna=False'
- # to preserve categories with no data
- ... # doctest: +SKIP
+ >>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
-
- >>> crosstab(foo, bar, dropna=False) # 'c' and 'f' are not represented
- # in the data, but they still will be counted
- # and shown in the output
- ... # doctest: +SKIP
+ >>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f436b3b92a359..6ba33301753d6 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -701,19 +701,20 @@ def _convert_level_number(level_num, columns):
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
- Convert categorical variable into dummy/indicator variables
+ Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
- prefix : string, list of strings, or dict of strings, default None
+ Data of which to get dummy indicators.
+ prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
- prefix_sep : string, default '_'
+ prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
- list or dictionary as with `prefix.`
+ list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
@@ -736,11 +737,12 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
Returns
-------
- dummies : DataFrame
+ DataFrame
+ Dummy-coded data.
See Also
--------
- Series.str.get_dummies
+ Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 2a654fec36a9f..f99fd9004bb31 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -163,7 +163,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
- ... right=False, duplicates='drop')
+ ... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 183a91c952140..cc7a4db515c42 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -120,7 +120,7 @@ def str_count(arr, pat, flags=0):
Returns
-------
- counts : Series or Index
+ Series or Index
Same type as the calling object containing the integer counts.
See Also
@@ -283,7 +283,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
return `True`. However, '.0' as a regex matches any character
followed by a 0.
- >>> s2 = pd.Series(['40','40.0','41','41.0','35'])
+ >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
@@ -433,13 +433,13 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
Parameters
----------
- pat : string or compiled regex
+ pat : str or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
- repl : string or callable
+ repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
@@ -448,15 +448,15 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
`repl` also accepts a callable.
n : int, default -1 (all)
- Number of replacements to make from start
- case : boolean, default None
+ Number of replacements to make from start.
+ case : bool, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
- regex : boolean, default True
+ regex : bool, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
@@ -537,6 +537,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
Using a compiled regex with flags
+ >>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
@@ -604,6 +605,7 @@ def str_repeat(arr, repeats):
0 a
1 b
2 c
+ dtype: object
Single int repeats string in Series
@@ -611,6 +613,7 @@ def str_repeat(arr, repeats):
0 aa
1 bb
2 cc
+ dtype: object
Sequence of int repeats corresponding string in Series
@@ -618,6 +621,7 @@ def str_repeat(arr, repeats):
0 a
1 bb
2 ccc
+ dtype: object
"""
if is_scalar(repeats):
def rep(x):
@@ -646,13 +650,14 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan):
Parameters
----------
- pat : string
- Character sequence or regular expression
- case : boolean, default True
- If True, case sensitive
+ pat : str
+ Character sequence or regular expression.
+ case : bool, default True
+ If True, case sensitive.
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- na : default NaN, fill value for missing values
+ re module flags, e.g. re.IGNORECASE.
+ na : default NaN
+ Fill value for missing values.
Returns
-------
@@ -768,7 +773,7 @@ def str_extract(arr, pat, flags=0, expand=True):
Parameters
----------
- pat : string
+ pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
@@ -966,21 +971,23 @@ def str_extractall(arr, pat, flags=0):
def str_get_dummies(arr, sep='|'):
"""
- Split each string in the Series by sep and return a frame of
- dummy/indicator variables.
+ Split each string in the Series by sep and return a DataFrame
+ of dummy/indicator variables.
Parameters
----------
- sep : string, default "|"
+ sep : str, default "|"
String to split on.
Returns
-------
- dummies : DataFrame
+ DataFrame
+ Dummy variables corresponding to values of the Series.
See Also
--------
- get_dummies
+ get_dummies : Convert categorical variable into dummy/indicator
+ variables.
Examples
--------
@@ -1089,11 +1096,11 @@ def str_findall(arr, pat, flags=0):
Parameters
----------
- pat : string
+ pat : str
Pattern or regular expression.
flags : int, default 0
- ``re`` module flags, e.g. `re.IGNORECASE` (default is 0, which means
- no flags).
+ Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
+ means no flags).
Returns
-------
@@ -1182,17 +1189,18 @@ def str_find(arr, sub, start=0, end=None, side='left'):
Parameters
----------
sub : str
- Substring being searched
+ Substring being searched.
start : int
- Left edge index
+ Left edge index.
end : int
- Right edge index
+ Right edge index.
side : {'left', 'right'}, default 'left'
- Specifies a starting side, equivalent to ``find`` or ``rfind``
+ Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
- found : Series/Index of integer values
+ Series or Index
+ Indexes where substring is found.
"""
if not isinstance(sub, compat.string_types):
@@ -1430,7 +1438,7 @@ def str_slice_replace(arr, start=None, stop=None, repl=None):
Returns
-------
- replaced : Series or Index
+ Series or Index
Same type as the original object.
See Also
@@ -1513,7 +1521,7 @@ def str_strip(arr, to_strip=None, side='both'):
Returns
-------
- stripped : Series/Index of objects
+ Series or Index
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
@@ -1537,30 +1545,30 @@ def str_wrap(arr, width, **kwargs):
Parameters
----------
width : int
- Maximum line-width
+ Maximum line width.
expand_tabs : bool, optional
- If true, tab characters will be expanded to spaces (default: True)
+ If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
- If true, each whitespace character (as defined by string.whitespace)
+ If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
- (default: True)
+ (default: True).
drop_whitespace : bool, optional
- If true, whitespace that, after wrapping, happens to end up at the
- beginning or end of a line is dropped (default: True)
+ If True, whitespace that, after wrapping, happens to end up at the
+ beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
- If true, then words longer than width will be broken in order to ensure
+ If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
- not be broken, and some lines may be longer than width. (default: True)
+ not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
- If true, wrapping will occur preferably on whitespace and right after
+ If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
- insecable words. (default: True)
+ insecable words (default: True).
Returns
-------
- wrapped : Series/Index of objects
+ Series or Index
Notes
-----
@@ -1581,6 +1589,7 @@ def str_wrap(arr, width, **kwargs):
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
+ dtype: object
"""
kwargs['width'] = width
@@ -1613,7 +1622,7 @@ def str_translate(arr, table, deletechars=None):
Returns
-------
- translated : Series/Index of objects
+ Series or Index
"""
if deletechars is None:
f = lambda x: x.translate(table)
@@ -1641,15 +1650,16 @@ def str_get(arr, i):
Returns
-------
- items : Series/Index of objects
+ Series or Index
Examples
--------
>>> s = pd.Series(["String",
- (1, 2, 3),
- ["a", "b", "c"],
- 123, -456,
- {1:"Hello", "2":"World"}])
+ ... (1, 2, 3),
+ ... ["a", "b", "c"],
+ ... 123,
+ ... -456,
+ ... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
@@ -1674,7 +1684,7 @@ def str_get(arr, i):
2 c
3 NaN
4 NaN
- 5 NaN
+ 5 None
dtype: object
"""
def f(x):
@@ -1699,7 +1709,7 @@ def str_decode(arr, encoding, errors="strict"):
Returns
-------
- decoded : Series/Index of objects
+ Series or Index
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
@@ -2091,7 +2101,7 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
Returns
-------
- concat : str or Series/Index of objects
+ str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
| This PR fixes a variety of docstring errors found in different parts of the code base (mostly incorrect names for data types, missing punctuation, PEP8 in examples, etc.). | https://api.github.com/repos/pandas-dev/pandas/pulls/25295 | 2019-02-13T03:08:35Z | 2019-02-16T17:27:50Z | 2019-02-16T17:27:50Z | 2019-02-16T17:29:49Z |
Series pct_change fill_method behavior | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 83beec5607986..d2e0e62e573b0 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -29,7 +29,7 @@ Enhancements
Other enhancements
^^^^^^^^^^^^^^^^^^
--
+- :meth:`Series.pct_change` and :meth:`DataFrame.pct_change` now accept a ``skipna`` argument (:issue:`25006`)
-
.. _whatsnew_1000.api_breaking:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6ade69fb4ca9d..117d9d9a27a58 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10293,6 +10293,10 @@ def _check_percentile(self, q):
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay()).
+ skipna : bool, default True
+ Exclude NA/null values before computing percent change.
+
+ .. versionadded:: 1.0.0
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
@@ -10309,6 +10313,11 @@ def _check_percentile(self, q):
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
+ Notes
+ -----
+ The default `skipna=True` drops NAs before computing the percentage
+ change, and the results are reindexed like the original calling object.
+
Examples
--------
**Series**
@@ -10332,22 +10341,53 @@ def _check_percentile(self, q):
2 -0.055556
dtype: float64
- See the percentage change in a Series where filling NAs with last
- valid observation forward to next valid.
+ See how the computing of percentage change is performed in a Series
+ with NAs. With default `skipna=True`, NAs are dropped before the
+ computation and eventually the results are reindexed like the original
+ object, thus keeping the original NAs.
- >>> s = pd.Series([90, 91, None, 85])
+ >>> s = pd.Series([90, 91, np.nan, 85, np.nan, 95])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
+ 4 NaN
+ 5 95.0
dtype: float64
+ >>> s.pct_change()
+ 0 NaN
+ 1 0.011111
+ 2 NaN
+ 3 -0.065934
+ 4 NaN
+ 5 0.117647
+ dtype: float64
+
+ By contrast, `skipna=False` will not drop NA values before
+ computation, instead evaluating each entry against the entry prior.
+
+ >>> s.pct_change(skipna=False)
+ 0 NaN
+ 1 0.011111
+ 2 NaN
+ 3 NaN
+ 4 NaN
+ 5 NaN
+
+ On the other hand, if a fill method is passed, NAs are filled before
+ the computation. For example, before the computation of percentage
+ change, forward fill method `ffill` first fills NAs with last valid
+ observation forward to next valid.
+
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
+ 4 0.000000
+ 5 0.117647
dtype: float64
**DataFrame**
@@ -10389,13 +10429,77 @@ def _check_percentile(self, q):
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
+
+ In a DataFrame with NAs, when computing the percentage change with
+ default `skipna=True`, NAs are first droppped on each column/row, and
+ the results are eventually reindexed as originally.
+
+ >>> df = pd.DataFrame({
+ ... 'a': [90, 91, np.nan, 85, np.nan, 95],
+ ... 'b': [91, np.nan, 85, np.nan, 95, np.nan],
+ ... 'c': [np.nan, 85, np.nan, 95, np.nan, np.nan]})
+ >>> df
+ a b c
+ 0 90.0 91.0 NaN
+ 1 91.0 NaN 85.0
+ 2 NaN 85.0 NaN
+ 3 85.0 NaN 95.0
+ 4 NaN 95.0 NaN
+ 5 95.0 NaN NaN
+
+ >>> df.pct_change()
+ a b c
+ 0 NaN NaN NaN
+ 1 0.011111 NaN NaN
+ 2 NaN -0.065934 NaN
+ 3 -0.065934 NaN 0.117647
+ 4 NaN 0.117647 NaN
+ 5 0.117647 NaN NaN
+
+ >>> df.pct_change(axis=1)
+ a b c
+ 0 NaN 0.011111 NaN
+ 1 NaN NaN -0.065934
+ 2 NaN NaN NaN
+ 3 NaN NaN 0.117647
+ 4 NaN NaN NaN
+ 5 NaN NaN NaN
+
+ Otherwise, if a fill method is passed, NAs are filled before the
+ computation.
+
+ >>> df.pct_change(fill_method='ffill')
+ a b c
+ 0 NaN NaN NaN
+ 1 0.011111 0.000000 NaN
+ 2 0.000000 -0.065934 0.000000
+ 3 -0.065934 0.000000 0.117647
+ 4 0.000000 0.117647 0.000000
+ 5 0.117647 0.000000 0.000000
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
- def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
- # TODO: Not sure if above is correct - need someone to confirm.
+ def pct_change(
+ self, periods=1, fill_method=None, limit=None, freq=None, skipna=None, **kwargs
+ ):
+ if fill_method is not None and skipna:
+ raise ValueError("cannot pass both fill_method and skipna")
+ elif limit is not None and skipna:
+ raise ValueError("cannot pass both limit and skipna")
+ if fill_method is None and limit is None and skipna is None:
+ skipna = True
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
- if fill_method is None:
+ if skipna and isinstance(self, ABCDataFrame):
+ # If DataFrame, apply to each column/row
+ return self.apply(
+ lambda s: s.pct_change(
+ periods=periods, freq=freq, skipna=skipna, **kwargs
+ ),
+ axis=axis,
+ )
+ if skipna:
+ data = self.dropna()
+ elif fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
@@ -10405,6 +10509,8 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwar
if freq is None:
mask = isna(com.values_from_object(data))
np.putmask(rs.values, mask, np.nan)
+ if skipna:
+ rs = rs.reindex_like(self)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index e99208ac78e15..56d310006927b 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1627,6 +1627,80 @@ def test_pct_change(self):
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "skipna, periods, expected_vals",
+ [
+ (
+ True,
+ 1,
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [1.0, np.nan],
+ [0.5, 1.0],
+ [np.nan, 0.5],
+ [0.33333333, np.nan],
+ [np.nan, 0.33333333],
+ ],
+ ),
+ (
+ True,
+ 2,
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [2.0, np.nan],
+ [np.nan, 2.0],
+ [1.0, np.nan],
+ [np.nan, 1.0],
+ ],
+ ),
+ (
+ False,
+ 1,
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [1.0, np.nan],
+ [0.5, 1.0],
+ [np.nan, 0.5],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ ],
+ ),
+ (
+ False,
+ 2,
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [2.0, np.nan],
+ [np.nan, 2.0],
+ [0.33333333, np.nan],
+ [np.nan, 0.33333333],
+ ],
+ ),
+ ],
+ )
+ def test_pct_change_skipna(self, skipna, periods, expected_vals):
+ # GH25006
+ df = DataFrame(
+ [
+ [np.nan, np.nan],
+ [1.0, np.nan],
+ [2.0, 1.0],
+ [3.0, 2.0],
+ [np.nan, 3.0],
+ [4.0, np.nan],
+ [np.nan, 4.0],
+ ]
+ )
+ result = df.pct_change(skipna=skipna, periods=periods)
+ expected = DataFrame(expected_vals)
+ tm.assert_frame_equal(result, expected)
+
# ----------------------------------------------------------------------
# Index of max / min
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index b8708e6ca1871..8c4ce7555526b 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -143,10 +143,10 @@ def test_diff_axis(self):
assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]]))
def test_pct_change(self):
- rs = self.tsframe.pct_change(fill_method=None)
+ rs = self.tsframe.pct_change(skipna=False, fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
- rs = self.tsframe.pct_change(2)
+ rs = self.tsframe.pct_change(periods=2)
filled = self.tsframe.fillna(method="pad")
assert_frame_equal(rs, filled / filled.shift(2) - 1)
@@ -165,7 +165,7 @@ def test_pct_change_shift_over_nas(self):
df = DataFrame({"a": s, "b": s})
- chg = df.pct_change()
+ chg = df.pct_change(fill_method="ffill")
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
assert_frame_equal(chg, edf)
@@ -187,13 +187,15 @@ def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = self.tsframe.pct_change(
- periods, fill_method=fill_method, limit=limit
+ periods=periods, fill_method=fill_method, limit=limit
)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index, columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
- rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
+ rs_periods = empty_ts.pct_change(
+ periods=periods, fill_method=fill_method, limit=limit
+ )
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 7b9e50ebbf342..d60b706091d96 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -585,6 +585,27 @@ def test_pct_change(self, periods, fill_method, limit, exp):
else:
tm.assert_series_equal(res, Series(exp))
+ @pytest.mark.parametrize(
+ "fill_method, limit",
+ [
+ ("backfill", None),
+ ("bfill", None),
+ ("pad", None),
+ ("ffill", None),
+ (None, 1),
+ ],
+ )
+ def test_pct_change_skipna_raises(self, fill_method, limit):
+ # GH25006
+ vals = [np.nan, np.nan, 1, 2, np.nan, 4, 10, np.nan]
+ obj = self._typ(vals)
+ if fill_method:
+ msg = "cannot pass both fill_method and skipna"
+ else:
+ msg = "cannot pass both limit and skipna"
+ with pytest.raises(ValueError, match=msg):
+ obj.pct_change(skipna=True, fill_method=fill_method, limit=limit)
+
class TestNDFrame:
# tests that don't fit elsewhere
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 1ddaa4692d741..65589b6402c60 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -2,7 +2,6 @@
import operator
import numpy as np
-from numpy import nan
import pytest
import pandas.util._test_decorators as td
@@ -228,6 +227,21 @@ def test_cummax_timedelta64(self):
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
+ @pytest.mark.parametrize(
+ "periods, expected_vals",
+ [
+ (1, [np.nan, np.nan, 1.0, 0.5, np.nan, 0.333333333333333, np.nan]),
+ (2, [np.nan, np.nan, np.nan, 2.0, np.nan, 1.0, np.nan]),
+ ],
+ )
+ def test_pct_change_skipna(self, periods, expected_vals):
+ # GH25006
+ vals = [np.nan, 1.0, 2.0, 3.0, np.nan, 4.0, np.nan]
+ s = Series(vals)
+ result = s.pct_change(skipna=True, periods=periods)
+ expected = Series(expected_vals)
+ assert_series_equal(expected, result)
+
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an ndarray")
@@ -235,7 +249,7 @@ def test_npdiff(self):
s = Series(np.arange(5))
r = np.diff(s)
- assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
+ assert_series_equal(Series([np.nan, 0, 0, 0, np.nan]), r)
def _check_accum_op(self, name, datetime_series_, check_dtype=True):
func = getattr(np, name)
@@ -476,14 +490,14 @@ def test_count(self, datetime_series):
assert datetime_series.count() == np.isfinite(datetime_series).sum()
- mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, nan, 1, 2]])
+ mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, np.nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
- right = Series([2, 3, 1], index=[1, 2, nan])
+ right = Series([2, 3, 1], index=[1, 2, np.nan])
assert_series_equal(left, right)
- ts.iloc[[0, 3, 5]] = nan
+ ts.iloc[[0, 3, 5]] = np.nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
@@ -708,11 +722,11 @@ def test_cummethods_bool(self):
result = getattr(s, method)()
assert_series_equal(result, expected)
- e = pd.Series([False, True, nan, False])
- cse = pd.Series([0, 1, nan, 1], dtype=object)
- cpe = pd.Series([False, 0, nan, 0])
- cmin = pd.Series([False, False, nan, False])
- cmax = pd.Series([False, True, nan, True])
+ e = pd.Series([False, True, np.nan, False])
+ cse = pd.Series([0, 1, np.nan, 1], dtype=object)
+ cpe = pd.Series([False, 0, np.nan, 0])
+ cmin = pd.Series([False, False, np.nan, False])
+ cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
@@ -980,8 +994,6 @@ def test_shift_categorical(self):
assert_index_equal(s.values.categories, sn2.values.categories)
def test_unstack(self):
- from numpy import nan
-
index = MultiIndex(
levels=[["bar", "foo"], ["one", "three", "two"]],
codes=[[1, 1, 0, 0], [0, 1, 0, 2]],
@@ -991,7 +1003,7 @@ def test_unstack(self):
unstacked = s.unstack()
expected = DataFrame(
- [[2.0, nan, 3.0], [0.0, 1.0, nan]],
+ [[2.0, np.nan, 3.0], [0.0, 1.0, np.nan]],
index=["bar", "foo"],
columns=["one", "three", "two"],
)
@@ -1018,7 +1030,9 @@ def test_unstack(self):
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
- right = DataFrame([[nan, 1], [2, nan]], index=[101, 102], columns=[nan, 3.5])
+ right = DataFrame(
+ [[np.nan, 1], [2, np.nan]], index=[101, 102], columns=[np.nan, 3.5]
+ )
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays(
@@ -1030,9 +1044,10 @@ def test_unstack(self):
)
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame(
- [[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]], columns=["cat", "dog"]
+ [[1.0, 1.3], [1.1, np.nan], [np.nan, 1.4], [1.2, np.nan]],
+ columns=["cat", "dog"],
)
- tpls = [("a", 1), ("a", 2), ("b", nan), ("b", 1)]
+ tpls = [("a", 1), ("a", 2), ("b", np.nan), ("b", 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index d0ca5d82c6b33..f353c672b6315 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -401,7 +401,7 @@ def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
- rs = self.ts.pct_change(2)
+ rs = self.ts.pct_change(periods=2)
filled = self.ts.fillna(method="pad")
assert_series_equal(rs, filled / filled.shift(2) - 1)
@@ -418,7 +418,7 @@ def test_pct_change(self):
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
- chg = s.pct_change()
+ chg = s.pct_change(fill_method="ffill")
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
assert_series_equal(chg, expected)
@@ -436,12 +436,16 @@ def test_pct_change_shift_over_nas(self):
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
- rs_periods = self.ts.pct_change(periods, fill_method=fill_method, limit=limit)
+ rs_periods = self.ts.pct_change(
+ periods=periods, fill_method=fill_method, limit=limit
+ )
assert_series_equal(rs_freq, rs_periods)
empty_ts = Series(index=self.ts.index)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
- rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
+ rs_periods = empty_ts.pct_change(
+ periods=periods, fill_method=fill_method, limit=limit
+ )
assert_series_equal(rs_freq, rs_periods)
def test_autocorr(self):
| - [ ] closes #25006
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25291 | 2019-02-12T22:55:12Z | 2019-09-20T14:42:35Z | null | 2019-09-20T14:42:35Z |
BUG: fixed merging with empty frame containing an Int64 column (#25183) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 8e59c2300e7ca..df215cf021b4b 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -93,7 +93,7 @@ Bug Fixes
**Other**
- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`)
--
+- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`)
-
.. _whatsnew_0.242.contributors:
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 640587b7f9f31..cb98274962656 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -190,6 +190,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
pass
elif getattr(self.block, 'is_sparse', False):
pass
+ elif getattr(self.block, 'is_extension', False):
+ pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 25487ccc76e62..7a97368504fd6 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -39,6 +39,54 @@ def get_test_data(ngroups=NGROUPS, n=N):
return arr
+def get_series():
+ return [
+ pd.Series([1], dtype='int64'),
+ pd.Series([1], dtype='Int64'),
+ pd.Series([1.23]),
+ pd.Series(['foo']),
+ pd.Series([True]),
+ pd.Series([pd.Timestamp('2018-01-01')]),
+ pd.Series([pd.Timestamp('2018-01-01', tz='US/Eastern')]),
+ ]
+
+
+def get_series_na():
+ return [
+ pd.Series([np.nan], dtype='Int64'),
+ pd.Series([np.nan], dtype='float'),
+ pd.Series([np.nan], dtype='object'),
+ pd.Series([pd.NaT]),
+ ]
+
+
+@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
+def series_of_dtype(request):
+ """
+ A parametrized fixture returning a variety of Series of different
+ dtypes
+ """
+ return request.param
+
+
+@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
+def series_of_dtype2(request):
+ """
+ A duplicate of the series_of_dtype fixture, so that it can be used
+ twice by a single function
+ """
+ return request.param
+
+
+@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
+def series_of_dtype_all_na(request):
+ """
+ A parametrized fixture returning a variety of Series with all NA
+ values
+ """
+ return request.param
+
+
class TestMerge(object):
def setup_method(self, method):
@@ -428,6 +476,36 @@ def check2(exp, kwarg):
check1(exp_in, kwarg)
check2(exp_out, kwarg)
+ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
+ # GH 25183
+ df = pd.DataFrame({'key': series_of_dtype, 'value': series_of_dtype2},
+ columns=['key', 'value'])
+ df_empty = df[:0]
+ expected = pd.DataFrame({
+ 'value_x': pd.Series(dtype=df.dtypes['value']),
+ 'key': pd.Series(dtype=df.dtypes['key']),
+ 'value_y': pd.Series(dtype=df.dtypes['value']),
+ }, columns=['value_x', 'key', 'value_y'])
+ actual = df_empty.merge(df, on='key')
+ assert_frame_equal(actual, expected)
+
+ def test_merge_all_na_column(self, series_of_dtype,
+ series_of_dtype_all_na):
+ # GH 25183
+ df_left = pd.DataFrame(
+ {'key': series_of_dtype, 'value': series_of_dtype_all_na},
+ columns=['key', 'value'])
+ df_right = pd.DataFrame(
+ {'key': series_of_dtype, 'value': series_of_dtype_all_na},
+ columns=['key', 'value'])
+ expected = pd.DataFrame({
+ 'key': series_of_dtype,
+ 'value_x': series_of_dtype_all_na,
+ 'value_y': series_of_dtype_all_na,
+ }, columns=['key', 'value_x', 'value_y'])
+ actual = df_left.merge(df_right, on='key')
+ assert_frame_equal(actual, expected)
+
def test_merge_nosort(self):
# #2098, anything to do?
| - [x] closes #25183
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25289 | 2019-02-12T21:01:45Z | 2019-02-24T03:47:24Z | 2019-02-24T03:47:24Z | 2019-02-24T03:47:53Z |
DOC: Fix minor typo in docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 85aa13526e77c..f8b48e6610ce5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5761,9 +5761,9 @@ def stack(self, level=-1, dropna=True):
Notes
-----
The function is named by analogy with a collection of books
- being re-organised from being side by side on a horizontal
+ being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
- vertically on top of of each other (in the index of the
+ vertically on top of each other (in the index of the
dataframe).
Examples
| fix for the docstring of `DataFrame.stack()` | https://api.github.com/repos/pandas-dev/pandas/pulls/25285 | 2019-02-12T15:47:27Z | 2019-02-14T01:49:50Z | 2019-02-14T01:49:50Z | 2019-02-14T01:50:05Z |
Removed Panel class from HDF ASVs | diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index f08904ba70a5f..a5dc28eb9508c 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -1,7 +1,5 @@
-import warnings
-
import numpy as np
-from pandas import DataFrame, Panel, date_range, HDFStore, read_hdf
+from pandas import DataFrame, date_range, HDFStore, read_hdf
import pandas.util.testing as tm
from ..pandas_vb_common import BaseIO
@@ -99,31 +97,6 @@ def time_store_info(self):
self.store.info()
-class HDFStorePanel(BaseIO):
-
- def setup(self):
- self.fname = '__test__.h5'
- with warnings.catch_warnings(record=True):
- self.p = Panel(np.random.randn(20, 1000, 25),
- items=['Item%03d' % i for i in range(20)],
- major_axis=date_range('1/1/2000', periods=1000),
- minor_axis=['E%03d' % i for i in range(25)])
- self.store = HDFStore(self.fname)
- self.store.append('p1', self.p)
-
- def teardown(self):
- self.store.close()
- self.remove(self.fname)
-
- def time_read_store_table_panel(self):
- with warnings.catch_warnings(record=True):
- self.store.select('p1')
-
- def time_write_store_table_panel(self):
- with warnings.catch_warnings(record=True):
- self.store.append('p2', self.p)
-
-
class HDF(BaseIO):
params = ['table', 'fixed']
| This is a logical follow up to #25233 which should also help resolve the build failures in #24953 | https://api.github.com/repos/pandas-dev/pandas/pulls/25281 | 2019-02-12T05:47:56Z | 2019-02-13T13:48:28Z | 2019-02-13T13:48:28Z | 2019-02-28T07:23:11Z |
ENH: Support times with timezones in at_time | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 95362521f3b9f..74d25d420e1a1 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -20,7 +20,7 @@ Other Enhancements
^^^^^^^^^^^^^^^^^^
- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`)
--
+- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`)
-
.. _whatsnew_0250.api_breaking:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index df91c71cfe238..5c591f3dd00ac 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1302,20 +1302,19 @@ def indexer_at_time(self, time, asof=False):
--------
indexer_between_time, DataFrame.at_time
"""
- from dateutil.parser import parse
-
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
+ from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
- # TODO
- raise NotImplementedError("argument 'time' with timezone info is "
- "not supported")
-
- time_micros = self._get_time_micros()
+ if self.tz is None:
+ raise ValueError("Index must be timezone aware.")
+ time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
+ else:
+ time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index bc37317f72802..31e81a9ca77c2 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -6,6 +6,7 @@
import numpy as np
import pytest
+import pytz
from pandas.compat import product
@@ -647,6 +648,28 @@ def test_at_time(self):
rs = ts.at_time('16:00')
assert len(rs) == 0
+ @pytest.mark.parametrize('hour', ['1:00', '1:00AM', time(1),
+ time(1, tzinfo=pytz.UTC)])
+ def test_at_time_errors(self, hour):
+ # GH 24043
+ dti = pd.date_range('2018', periods=3, freq='H')
+ df = pd.DataFrame(list(range(len(dti))), index=dti)
+ if getattr(hour, 'tzinfo', None) is None:
+ result = df.at_time(hour)
+ expected = df.iloc[1:2]
+ tm.assert_frame_equal(result, expected)
+ else:
+ with pytest.raises(ValueError, match="Index must be timezone"):
+ df.at_time(hour)
+
+ def test_at_time_tz(self):
+ # GH 24043
+ dti = pd.date_range('2018', periods=3, freq='H', tz='US/Pacific')
+ df = pd.DataFrame(list(range(len(dti))), index=dti)
+ result = df.at_time(time(4, tzinfo=pytz.timezone('US/Eastern')))
+ expected = df.iloc[1:2]
+ tm.assert_frame_equal(result, expected)
+
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
| - [x] closes #24043
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25280 | 2019-02-12T03:12:47Z | 2019-02-16T17:56:49Z | 2019-02-16T17:56:49Z | 2019-02-16T22:33:50Z |
"Backport PR #25260 on branch 0.24.x" | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 5ae777ca68eba..f17c4974cd450 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -79,7 +79,7 @@ Bug Fixes
**Reshaping**
-
--
+- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
**Visualization**
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e11847d2b8ce2..adfd69c21d715 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -862,7 +862,7 @@ def _get_merge_keys(self):
in zip(self.right.index.levels,
self.right.index.codes)]
else:
- right_keys = [self.right.index.values]
+ right_keys = [self.right.index._values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index e21f9d0291afa..c2a214446bb51 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -773,6 +773,28 @@ def test_join_multi_to_multi(self, join_type):
with pytest.raises(ValueError, match=msg):
right.join(left, on=['abc', 'xy'], how=join_type)
+ def test_join_on_tz_aware_datetimeindex(self):
+ # GH 23931
+ df1 = pd.DataFrame(
+ {
+ 'date': pd.date_range(start='2018-01-01', periods=5,
+ tz='America/Chicago'),
+ 'vals': list('abcde')
+ }
+ )
+
+ df2 = pd.DataFrame(
+ {
+ 'date': pd.date_range(start='2018-01-03', periods=5,
+ tz='America/Chicago'),
+ 'vals_2': list('tuvwx')
+ }
+ )
+ result = df1.join(df2.set_index('date'), on='date')
+ expected = df1.copy()
+ expected['vals_2'] = pd.Series([np.nan] * len(expected), dtype=object)
+ assert_frame_equal(result, expected)
+
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
| (cherry picked from commit a9a03a25d82ee285b6da48bfb20b9057d20bb9e7)
xref https://github.com/pandas-dev/pandas/pull/25260#issuecomment-462448610
| https://api.github.com/repos/pandas-dev/pandas/pulls/25279 | 2019-02-12T02:34:00Z | 2019-02-12T13:40:08Z | 2019-02-12T13:40:08Z | 2019-03-11T19:17:22Z |
TST/CLN: remove test_slice_ints_with_floats_raises | diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index c99007cef90d4..8415bab802239 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1541,8 +1541,9 @@ def test_slice_locs(self, dtype):
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
- def test_slice_float_locs(self):
- index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
+ @pytest.mark.parametrize("dtype", [int, float])
+ def test_slice_float_locs(self, dtype):
+ index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
@@ -1551,24 +1552,6 @@ def test_slice_float_locs(self):
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
- @pytest.mark.xfail(reason="Assertions were not correct - see GH#20915")
- def test_slice_ints_with_floats_raises(self):
- # int slicing with floats
- # GH 4892, these are all TypeErrors
- index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
- n = len(index)
-
- pytest.raises(TypeError,
- lambda: index.slice_locs(5.0, 10.0))
- pytest.raises(TypeError,
- lambda: index.slice_locs(4.5, 10.5))
-
- index2 = index[::-1]
- pytest.raises(TypeError,
- lambda: index2.slice_locs(8.5, 1.5), (2, 6))
- pytest.raises(TypeError,
- lambda: index2.slice_locs(10.5, -1), (0, n))
-
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
| - [x] closes #20915
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [n/a ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25277 | 2019-02-11T21:34:41Z | 2019-02-13T13:04:32Z | 2019-02-13T13:04:32Z | 2019-02-20T14:08:35Z |
[CLN] Excel Module Cleanups | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index ed5943e9a1698..8f7bf8e0466f9 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -590,9 +590,8 @@ def __new__(cls, path, engine=None, **kwargs):
if engine == 'auto':
engine = _get_default_writer(ext)
except KeyError:
- error = ValueError("No engine for filetype: '{ext}'"
- .format(ext=ext))
- raise error
+ raise ValueError("No engine for filetype: '{ext}'"
+ .format(ext=ext))
cls = get_writer(engine)
return object.__new__(cls)
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index 1aeaf70f0832e..49255d83d1cd3 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -5,32 +5,39 @@
from pandas.core.dtypes.common import is_integer, is_list_like
-from pandas.core import config
-
-_writer_extensions = ["xlsx", "xls", "xlsm"]
-
-
_writers = {}
def register_writer(klass):
- """Adds engine to the excel writer registry. You must use this method to
- integrate with ``to_excel``. Also adds config options for any new
- ``supported_extensions`` defined on the writer."""
+ """
+ Add engine to the excel writer registry.io.excel.
+
+ You must use this method to integrate with ``to_excel``.
+
+ Parameters
+ ----------
+ klass : ExcelWriter
+ """
if not callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
- for ext in klass.supported_extensions:
- if ext.startswith('.'):
- ext = ext[1:]
- if ext not in _writer_extensions:
- config.register_option("io.excel.{ext}.writer".format(ext=ext),
- engine_name, validator=str)
- _writer_extensions.append(ext)
def _get_default_writer(ext):
+ """
+ Return the default writer for the given extension.
+
+ Parameters
+ ----------
+ ext : str
+ The excel file extension for which to get the default engine.
+
+ Returns
+ -------
+ str
+ The default engine for the extension.
+ """
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
@@ -230,8 +237,6 @@ def _fill_mi_header(row, control_row):
return _maybe_convert_to_string(row), control_row
-# fill blank if index_col not None
-
def _pop_header_name(row, index_col):
"""
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 09b2d86bde3d3..04c9c58a326a4 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2359,7 +2359,7 @@ def test_register_writer(self):
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
- supported_extensions = ['test', 'xlsx', 'xls']
+ supported_extensions = ['xlsx', 'xls']
engine = 'dummy'
def save(self):
@@ -2377,12 +2377,9 @@ def check_called(func):
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
- writer = ExcelWriter('something.test')
+ writer = ExcelWriter('something.xlsx')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
-
- func = lambda: df.to_excel('something.test')
- check_called(func)
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
| - [x] closes #25271
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25275 | 2019-02-11T21:02:24Z | 2019-02-20T08:04:32Z | 2019-02-20T08:04:32Z | 2019-02-20T08:42:25Z |
REF: use _constructor and ABCFoo to avoid runtime imports | diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index f187d786d9f61..640d43f3b0e03 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -8,7 +8,8 @@
from pandas._libs.interval import Interval
from pandas._libs.tslibs import NaT, Period, Timestamp, timezones
-from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass
+from pandas.core.dtypes.generic import (
+ ABCCategoricalIndex, ABCDateOffset, ABCIndexClass)
from pandas import compat
@@ -758,8 +759,7 @@ def __new__(cls, freq=None):
# empty constructor for pickle compat
return object.__new__(cls)
- from pandas.tseries.offsets import DateOffset
- if not isinstance(freq, DateOffset):
+ if not isinstance(freq, ABCDateOffset):
freq = cls._parse_dtype_strict(freq)
try:
@@ -790,12 +790,10 @@ def construct_from_string(cls, string):
Strict construction from a string, raise a TypeError if not
possible
"""
- from pandas.tseries.offsets import DateOffset
-
if (isinstance(string, compat.string_types) and
(string.startswith('period[') or
string.startswith('Period[')) or
- isinstance(string, DateOffset)):
+ isinstance(string, ABCDateOffset)):
# do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 3c6d3f212342b..697c58a365233 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -221,8 +221,8 @@ def _isna_ndarraylike(obj):
# box
if isinstance(obj, ABCSeries):
- from pandas import Series
- result = Series(result, index=obj.index, name=obj.name, copy=False)
+ result = obj._constructor(
+ result, index=obj.index, name=obj.name, copy=False)
return result
@@ -250,8 +250,8 @@ def _isna_ndarraylike_old(obj):
# box
if isinstance(obj, ABCSeries):
- from pandas import Series
- result = Series(result, index=obj.index, name=obj.name, copy=False)
+ result = obj._constructor(
+ result, index=obj.index, name=obj.name, copy=False)
return result
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 3da349c570274..0c76ac6cd75ac 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -588,9 +588,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
if not cache_array.empty:
result = arg.map(cache_array)
else:
- from pandas import Series
values = convert_listlike(arg._values, True, format)
- result = Series(values, index=arg.index, name=arg.name)
+ result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, compat.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
@@ -827,7 +826,6 @@ def to_time(arg, format=None, infer_time_format=False, errors='raise'):
-------
datetime.time
"""
- from pandas.core.series import Series
def _convert_listlike(arg, format):
@@ -892,9 +890,9 @@ def _convert_listlike(arg, format):
return arg
elif isinstance(arg, time):
return arg
- elif isinstance(arg, Series):
+ elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
- return Series(values, index=arg.index, name=arg.name)
+ return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 30cb15f311b9f..7ebaf3056e79e 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -6,12 +6,12 @@
import numpy as np
+from pandas._libs.tslibs import NaT
from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
-import pandas as pd
from pandas.core.arrays.timedeltas import sequence_to_td64ns
@@ -100,10 +100,9 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'):
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
- from pandas import Series
values = _convert_listlike(arg._values, unit=unit,
box=False, errors=errors)
- return Series(values, index=arg.index, name=arg.name)
+ return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box,
errors=errors, name=arg.name)
@@ -136,7 +135,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
return r
# coerce
- result = pd.NaT
+ result = NaT
return result
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25272 | 2019-02-11T18:13:03Z | 2019-02-11T19:37:54Z | 2019-02-11T19:37:53Z | 2019-03-05T21:02:54Z |
[BUG] exception handling of MultiIndex.__contains__ too narrow | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 686c5ad0165e7..9d33c651ef283 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -149,10 +149,9 @@ Missing
MultiIndex
^^^^^^^^^^
+- Bug in which incorrect exception raised by :meth:`pd.Timedelta` when testing the membership of :class:`MultiIndex` (:issue:`24570`)
-
-
--
-
I/O
^^^
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index efb77b5d155a1..c19b6f61f2caa 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -840,7 +840,7 @@ def __contains__(self, key):
try:
self.get_loc(key)
return True
- except (LookupError, TypeError):
+ except (LookupError, TypeError, ValueError):
return False
contains = __contains__
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 4f5517f89e852..ccf017489e046 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -84,3 +84,11 @@ def test_multi_nan_indexing(self):
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
+
+ def test_contains(self):
+ # GH 24570
+ tx = pd.timedelta_range('09:30:00', '16:00:00', freq='30 min')
+ idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])
+ assert tx[0] in idx
+ assert 'element_not_exit' not in idx
+ assert '0 day 09:30:00' in idx
| - closes #24570
- 1 test added
- passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25268 | 2019-02-11T14:31:59Z | 2019-02-19T13:46:05Z | 2019-02-19T13:46:04Z | 2019-02-19T13:46:12Z |
Backport PR #25234 on branch 0.24.x (BUG: Duplicated returns boolean dataframe) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index b0f287cf0b9f6..5ae777ca68eba 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -24,6 +24,8 @@ Fixed Regressions
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
+- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`)
+
.. _whatsnew_0242.enhancements:
Enhancements
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index fac42dbd9c7c8..1f4fb39f76c7c 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -69,6 +69,8 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+-
+
Categorical
^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index da638e24dfce5..f5535096c967d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4661,7 +4661,7 @@ def duplicated(self, subset=None, keep='first'):
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
- return Series()
+ return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py
index f61dbbdb989e4..3396670fb5879 100644
--- a/pandas/tests/frame/test_duplicates.py
+++ b/pandas/tests/frame/test_duplicates.py
@@ -182,6 +182,17 @@ def test_drop_duplicates():
assert df.duplicated(keep=keep).sum() == 0
+def test_duplicated_on_empty_frame():
+ # GH 25184
+
+ df = DataFrame(columns=['a', 'b'])
+ dupes = df.duplicated('a')
+
+ result = df[dupes]
+ expected = df.copy()
+ tm.assert_frame_equal(result, expected)
+
+
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
| Backport PR #25234: BUG: Duplicated returns boolean dataframe | https://api.github.com/repos/pandas-dev/pandas/pulls/25267 | 2019-02-11T12:53:23Z | 2019-02-11T13:59:47Z | 2019-02-11T13:59:47Z | 2019-02-11T13:59:47Z |
BUG: Fix regression on DataFrame.replace for regex | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index b0f287cf0b9f6..63a83af79246c 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -23,6 +23,7 @@ Fixed Regressions
- Fixed regression in :meth:`DataFrame.all` and :meth:`DataFrame.any` where ``bool_only=True`` was ignored (:issue:`25101`)
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
+- Fixed regression in :meth:`DataFrame.replace` where ``regex=True`` was only replacing patterns matching the start of the string (:issue:`25259`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 38b719db1709f..407db772d73e8 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -552,9 +552,9 @@ def comp(s, regex=False):
if isna(s):
return isna(values)
if hasattr(s, 'asm8'):
- return _compare_or_regex_match(maybe_convert_objects(values),
- getattr(s, 'asm8'), regex)
- return _compare_or_regex_match(values, s, regex)
+ return _compare_or_regex_search(maybe_convert_objects(values),
+ getattr(s, 'asm8'), regex)
+ return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
@@ -1897,11 +1897,11 @@ def _consolidate(blocks):
return new_blocks
-def _compare_or_regex_match(a, b, regex=False):
+def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
- Calls operator.eq or re.match, depending on regex argument. If regex is
+ Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
@@ -1917,7 +1917,7 @@ def _compare_or_regex_match(a, b, regex=False):
if not regex:
op = lambda x: operator.eq(x, b)
else:
- op = np.vectorize(lambda x: bool(re.match(b, x)) if isinstance(x, str)
+ op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str)
else False)
is_a_array = isinstance(a, np.ndarray)
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 219f7a1585fc2..127a64da38ba3 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -466,6 +466,13 @@ def test_regex_replace_dict_nested(self):
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
+ def test_regex_replace_dict_nested_non_first_character(self):
+ # GH 25259
+ df = pd.DataFrame({'first': ['abc', 'bca', 'cab']})
+ expected = pd.DataFrame({'first': ['.bc', 'bc.', 'c.b']})
+ result = df.replace({'a': '.'}, regex=True)
+ assert_frame_equal(result, expected)
+
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
| The commit ensures that the replacement for regex is not confined to the
beginning of the string but spans all the characters within. The
behaviour is then consistent with versions prior to 0.24.0.
- [x] closes #25259
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25266 | 2019-02-11T10:41:58Z | 2019-02-28T13:55:34Z | 2019-02-28T13:55:33Z | 2019-02-28T18:44:51Z |
BUG: support casting from bool array to EA Integer dtype | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index dacd433f112a5..11507b5d3cb43 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -308,8 +308,9 @@ Numeric
- Bug in :meth:`Series.divmod` and :meth:`Series.rdivmod` which would raise an (incorrect) ``ValueError`` rather than return a pair of :class:`Series` objects as result (:issue:`25557`)
- Raises a helpful exception when a non-numeric index is sent to :meth:`interpolate` with methods which require numeric index. (:issue:`21662`)
- Bug in :meth:`~pandas.eval` when comparing floats with scalar operators, for example: ``x < -0.1`` (:issue:`25928`)
+- Fixed bug where casting all-boolean array to integer extension array failed (:issue:`25211`)
+-
-
-
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 8e81066de71cb..42aa6a055acca 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -188,6 +188,9 @@ def coerce_to_array(values, dtype, mask=None, copy=False):
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
+ elif is_bool_dtype(values) and is_integer_dtype(dtype):
+ values = np.array(values, dtype=int, copy=copy)
+
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e23d0bf6a5b83..f0b674596656a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2670,7 +2670,6 @@ def combine(self, other, func, fill_value=None):
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
-
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 759dacd7f221e..066eadc9b68bc 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -613,6 +613,19 @@ def test_to_integer_array_float():
assert result.dtype == Int64Dtype()
+@pytest.mark.parametrize(
+ 'bool_values, int_values, target_dtype, expected_dtype',
+ [([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
+ ([False, True], [0, 1], 'Int64', Int64Dtype()),
+ ([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype())])
+def test_to_integer_array_bool(bool_values, int_values, target_dtype,
+ expected_dtype):
+ result = integer_array(bool_values, dtype=target_dtype)
+ assert result.dtype == expected_dtype
+ expected = integer_array(int_values, dtype=target_dtype)
+ tm.assert_extension_array_equal(result, expected)
+
+
@pytest.mark.parametrize(
'values, to_dtype, result_dtype',
[
| Cast boolean array to int before casting to EA Integer dtype.
- [x] closes #25211
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/25265 | 2019-02-11T09:09:36Z | 2019-05-14T14:15:30Z | 2019-05-14T14:15:30Z | 2019-05-14T14:25:41Z |
BUG: groupby.transform retains timezone information | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index f17c4974cd450..0c78cf01ad300 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -78,7 +78,7 @@ Bug Fixes
**Reshaping**
--
+- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`)
- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 27e13e86a6e9e..52056a6842ed9 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -964,7 +964,7 @@ def _transform_fast(self, func, func_nm):
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
- out = algorithms.take_1d(func().values, ids)
+ out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index f120402e6e8ca..b645073fcf72a 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -834,3 +834,14 @@ def demean_rename(x):
tm.assert_frame_equal(result, expected)
result_single = df.groupby('group').value.transform(demean_rename)
tm.assert_series_equal(result_single, expected['value'])
+
+
+@pytest.mark.parametrize('func', [min, max, np.min, np.max, 'first', 'last'])
+def test_groupby_transform_timezone_column(func):
+ # GH 24198
+ ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore')
+ result = pd.DataFrame({'end_time': [ts], 'id': [1]})
+ result['max_end_time'] = result.groupby('id').end_time.transform(func)
+ expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id',
+ 'max_end_time'])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #24198
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25264 | 2019-02-11T07:01:00Z | 2019-02-16T16:32:04Z | 2019-02-16T16:32:04Z | 2019-02-16T22:33:36Z |
BUG: Indexing with UTC offset string no longer ignored | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 23f1aabd69ff3..4e2c428415926 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -633,6 +633,16 @@ We are stopping on the included end-point as it is part of the index:
dft2 = dft2.swaplevel(0, 1).sort_index()
dft2.loc[idx[:, '2013-01-05'], :]
+.. versionadded:: 0.25.0
+
+Slicing with string indexing also honors UTC offset.
+
+.. ipython:: python
+
+ df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
+ df
+ df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00']
+
.. _timeseries.slice_vs_exact_match:
Slice vs. Exact Match
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 6e225185ecf84..5716bea7ce694 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -29,7 +29,37 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- :meth:`Timestamp.strptime` will now raise a NotImplementedError (:issue:`25016`)
+.. _whatsnew_0250.api_breaking.utc_offset_indexing:
+
+Indexing with date strings with UTC offsets
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Indexing a :class:`DataFrame` or :class:`Series` with a :class:`DatetimeIndex` with a
+date string with a UTC offset would previously ignore the UTC offset. Now, the UTC offset
+is respected in indexing. (:issue:`24076`, :issue:`16785`)
+
+*Previous Behavior*:
+
+.. code-block:: ipython
+
+ In [1]: df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
+
+ In [2]: df
+ Out[2]:
+ 0
+ 2019-01-01 00:00:00-08:00 0
+
+ In [3]: df['2019-01-01 00:00:00+04:00':'2019-01-01 01:00:00+04:00']
+ Out[3]:
+ 0
+ 2019-01-01 00:00:00-08:00 0
+
+*New Behavior*:
+
+.. ipython:: ipython
+
+ df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
+ df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00']
.. _whatsnew_0250.api.other:
@@ -38,7 +68,7 @@ Other API Changes
- :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`)
- ``Timestamp`` and ``Timedelta`` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`)
--
+- :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`)
-
.. _whatsnew_0250.deprecations:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b5f3c929a7f36..1cdacc908b663 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6,9 +6,10 @@
import numpy as np
from pandas._libs import (
- Timedelta, algos as libalgos, index as libindex, join as libjoin, lib,
- tslibs)
+ algos as libalgos, index as libindex, join as libjoin, lib)
from pandas._libs.lib import is_datetime_array
+from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp
+from pandas._libs.tslibs.timezones import tz_compare
import pandas.compat as compat
from pandas.compat import range, set_function_name, u
from pandas.compat.numpy import function as nv
@@ -447,7 +448,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
- except tslibs.OutOfBoundsDatetime:
+ except OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
@@ -4867,6 +4868,20 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
+ # GH 16785: If start and end happen to be date strings with UTC offsets
+ # attempt to parse and check that the offsets are the same
+ if (isinstance(start, (compat.string_types, datetime))
+ and isinstance(end, (compat.string_types, datetime))):
+ try:
+ ts_start = Timestamp(start)
+ ts_end = Timestamp(end)
+ except (ValueError, TypeError):
+ pass
+ else:
+ if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
+ raise ValueError("Both dates must have the "
+ "same UTC offset")
+
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 1037e2d9a3bd6..527991f15364b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -32,9 +32,8 @@
from pandas.core.ops import get_op_result_name
import pandas.core.tools.datetimes as tools
-from pandas.tseries import offsets
from pandas.tseries.frequencies import Resolution, to_offset
-from pandas.tseries.offsets import CDay, prefix_mapping
+from pandas.tseries.offsets import CDay, Nano, prefix_mapping
def _new_DatetimeIndex(cls, d):
@@ -826,54 +825,57 @@ def _parsed_string_to_bounds(self, reso, parsed):
lower, upper: pd.Timestamp
"""
+ valid_resos = {'year', 'month', 'quarter', 'day', 'hour', 'minute',
+ 'second', 'minute', 'second', 'microsecond'}
+ if reso not in valid_resos:
+ raise KeyError
if reso == 'year':
- return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
- Timestamp(datetime(parsed.year, 12, 31, 23,
- 59, 59, 999999), tz=self.tz))
+ start = Timestamp(parsed.year, 1, 1)
+ end = Timestamp(parsed.year, 12, 31, 23, 59, 59, 999999)
elif reso == 'month':
d = ccalendar.get_days_in_month(parsed.year, parsed.month)
- return (Timestamp(datetime(parsed.year, parsed.month, 1),
- tz=self.tz),
- Timestamp(datetime(parsed.year, parsed.month, d, 23,
- 59, 59, 999999), tz=self.tz))
+ start = Timestamp(parsed.year, parsed.month, 1)
+ end = Timestamp(parsed.year, parsed.month, d, 23, 59, 59, 999999)
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month
- return (Timestamp(datetime(parsed.year, parsed.month, 1),
- tz=self.tz),
- Timestamp(datetime(parsed.year, qe, d, 23, 59,
- 59, 999999), tz=self.tz))
+ start = Timestamp(parsed.year, parsed.month, 1)
+ end = Timestamp(parsed.year, qe, d, 23, 59, 59, 999999)
elif reso == 'day':
- st = datetime(parsed.year, parsed.month, parsed.day)
- return (Timestamp(st, tz=self.tz),
- Timestamp(Timestamp(st + offsets.Day(),
- tz=self.tz).value - 1))
+ start = Timestamp(parsed.year, parsed.month, parsed.day)
+ end = start + timedelta(days=1) - Nano(1)
elif reso == 'hour':
- st = datetime(parsed.year, parsed.month, parsed.day,
- hour=parsed.hour)
- return (Timestamp(st, tz=self.tz),
- Timestamp(Timestamp(st + offsets.Hour(),
- tz=self.tz).value - 1))
+ start = Timestamp(parsed.year, parsed.month, parsed.day,
+ parsed.hour)
+ end = start + timedelta(hours=1) - Nano(1)
elif reso == 'minute':
- st = datetime(parsed.year, parsed.month, parsed.day,
- hour=parsed.hour, minute=parsed.minute)
- return (Timestamp(st, tz=self.tz),
- Timestamp(Timestamp(st + offsets.Minute(),
- tz=self.tz).value - 1))
+ start = Timestamp(parsed.year, parsed.month, parsed.day,
+ parsed.hour, parsed.minute)
+ end = start + timedelta(minutes=1) - Nano(1)
elif reso == 'second':
- st = datetime(parsed.year, parsed.month, parsed.day,
- hour=parsed.hour, minute=parsed.minute,
- second=parsed.second)
- return (Timestamp(st, tz=self.tz),
- Timestamp(Timestamp(st + offsets.Second(),
- tz=self.tz).value - 1))
+ start = Timestamp(parsed.year, parsed.month, parsed.day,
+ parsed.hour, parsed.minute, parsed.second)
+ end = start + timedelta(seconds=1) - Nano(1)
elif reso == 'microsecond':
- st = datetime(parsed.year, parsed.month, parsed.day,
- parsed.hour, parsed.minute, parsed.second,
- parsed.microsecond)
- return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
- else:
- raise KeyError
+ start = Timestamp(parsed.year, parsed.month, parsed.day,
+ parsed.hour, parsed.minute, parsed.second,
+ parsed.microsecond)
+ end = start + timedelta(microseconds=1) - Nano(1)
+ # GH 24076
+ # If an incoming date string contained a UTC offset, need to localize
+ # the parsed date to this offset first before aligning with the index's
+ # timezone
+ if parsed.tzinfo is not None:
+ if self.tz is None:
+ raise ValueError("The index must be timezone aware "
+ "when indexing with a date string with a "
+ "UTC offset")
+ start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
+ end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
+ elif self.tz is not None:
+ start = start.tz_localize(self.tz)
+ end = end.tz_localize(self.tz)
+ return start, end
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index e1ba0e1708442..a3ee5fe39769f 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -102,7 +102,7 @@ def test_stringified_slice_with_tz(self):
# GH#2658
import datetime
start = datetime.datetime.now()
- idx = date_range(start=start, freq="1d", periods=10)
+ idx = date_range(start=start, freq="1d", periods=10, tz='US/Eastern')
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index a0c9d9f02385c..64693324521b3 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -396,3 +396,30 @@ def test_selection_by_datetimelike(self, datetimelike, op, expected):
result = op(df.A, datetimelike)
expected = Series(expected, name='A')
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('start', [
+ '2018-12-02 21:50:00+00:00', pd.Timestamp('2018-12-02 21:50:00+00:00'),
+ pd.Timestamp('2018-12-02 21:50:00+00:00').to_pydatetime()
+ ])
+ @pytest.mark.parametrize('end', [
+ '2018-12-02 21:52:00+00:00', pd.Timestamp('2018-12-02 21:52:00+00:00'),
+ pd.Timestamp('2018-12-02 21:52:00+00:00').to_pydatetime()
+ ])
+ def test_getitem_with_datestring_with_UTC_offset(self, start, end):
+ # GH 24076
+ idx = pd.date_range(start='2018-12-02 14:50:00-07:00',
+ end='2018-12-02 14:50:00-07:00', freq='1min')
+ df = pd.DataFrame(1, index=idx, columns=['A'])
+ result = df[start:end]
+ expected = df.iloc[0:3, :]
+ tm.assert_frame_equal(result, expected)
+
+ # GH 16785
+ start = str(start)
+ end = str(end)
+ with pytest.raises(ValueError, match="Both dates must"):
+ df[start:end[:-4] + '1:00']
+
+ with pytest.raises(ValueError, match="The index must be timezone"):
+ df = df.tz_localize(None)
+ df[start:end]
| - [x] closes #24076
- [x] closes #16785
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Additionally, date strings with UTC offsets that are using in indexing are now validated by the following:
- An error is raised if 2 date strings have different UTC offsets
- An error is raised if the date strings have a UTC offset and the index is not timezone aware. | https://api.github.com/repos/pandas-dev/pandas/pulls/25263 | 2019-02-11T03:16:48Z | 2019-02-24T03:37:40Z | 2019-02-24T03:37:39Z | 2019-11-25T12:47:06Z |
REF/TST: resample/test_base.py | diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 48debfa2848e7..8f912ea5c524a 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -28,15 +28,10 @@
period_range, 'pi', datetime(2005, 1, 1), datetime(2005, 1, 10))
TIMEDELTA_RANGE = (timedelta_range, 'tdi', '1 day', '10 day')
-ALL_TIMESERIES_INDEXES = [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE]
-
-
-def pytest_generate_tests(metafunc):
- # called once per each test function
- if metafunc.function.__name__.endswith('_all_ts'):
- metafunc.parametrize(
- '_index_factory,_series_name,_index_start,_index_end',
- ALL_TIMESERIES_INDEXES)
+all_ts = pytest.mark.parametrize(
+ '_index_factory,_series_name,_index_start,_index_end',
+ [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE]
+)
@pytest.fixture
@@ -84,7 +79,8 @@ def test_asfreq_fill_value(series, create_index):
assert_frame_equal(result, expected)
-def test_resample_interpolate_all_ts(frame):
+@all_ts
+def test_resample_interpolate(frame):
# # 12925
df = frame
assert_frame_equal(
@@ -101,8 +97,9 @@ def test_raises_on_non_datetimelike_index():
xp.resample('A').mean()
+@all_ts
@pytest.mark.parametrize('freq', ['M', 'D', 'H'])
-def test_resample_empty_series_all_ts(freq, empty_series, resample_method):
+def test_resample_empty_series(freq, empty_series, resample_method):
# GH12771 & GH12868
if resample_method == 'ohlc':
@@ -121,8 +118,9 @@ def test_resample_empty_series_all_ts(freq, empty_series, resample_method):
assert_series_equal(result, expected, check_dtype=False)
+@all_ts
@pytest.mark.parametrize('freq', ['M', 'D', 'H'])
-def test_resample_empty_dataframe_all_ts(empty_frame, freq, resample_method):
+def test_resample_empty_dataframe(empty_frame, freq, resample_method):
# GH13212
df = empty_frame
# count retains dimensions too
@@ -162,7 +160,8 @@ def test_resample_empty_dtypes(index, dtype, resample_method):
pass
-def test_resample_loffset_arg_type_all_ts(frame, create_index):
+@all_ts
+def test_resample_loffset_arg_type(frame, create_index):
# GH 13218, 15002
df = frame
expected_means = [df.values[i:i + 2].mean()
@@ -202,7 +201,8 @@ def test_resample_loffset_arg_type_all_ts(frame, create_index):
assert_frame_equal(result_how, expected)
-def test_apply_to_empty_series_all_ts(empty_series):
+@all_ts
+def test_apply_to_empty_series(empty_series):
# GH 14313
s = empty_series
for freq in ['M', 'D', 'H']:
@@ -212,7 +212,8 @@ def test_apply_to_empty_series_all_ts(empty_series):
assert_series_equal(result, expected, check_dtype=False)
-def test_resampler_is_iterable_all_ts(series):
+@all_ts
+def test_resampler_is_iterable(series):
# GH 15314
freq = 'H'
tg = TimeGrouper(freq, convention='start')
@@ -223,7 +224,8 @@ def test_resampler_is_iterable_all_ts(series):
assert_series_equal(rv, gv)
-def test_resample_quantile_all_ts(series):
+@all_ts
+def test_resample_quantile(series):
# GH 15023
s = series
q = 0.75
| follow-on from #24377
xref #25197
in #24377, the `pytest_generate_tests` hook was used to parametrize the base tests over date_range, period_range and timedelta_range indexes.
to make it easier to understand the code and use less pytest magic, this PR uses a simpler approach of applying markers to the tests instead of having the test name ending in _all_ts. | https://api.github.com/repos/pandas-dev/pandas/pulls/25262 | 2019-02-11T00:23:41Z | 2019-02-11T12:55:05Z | 2019-02-11T12:55:05Z | 2019-03-13T21:11:32Z |
BUG: DataFrame.join on tz-aware DatetimeIndex | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index b0f287cf0b9f6..3eb58798ab462 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -77,7 +77,7 @@ Bug Fixes
**Reshaping**
-
--
+- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`)
-
**Visualization**
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 4032dc20b2e19..0073f7ab11467 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -181,7 +181,6 @@ Reshaping
- Bug in :func:`pandas.merge` adds a string of ``None`` if ``None`` is assigned in suffixes instead of remain the column name as-is (:issue:`24782`).
- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`)
- :func:`to_records` now accepts dtypes to its `column_dtypes` parameter (:issue:`24895`)
--
Sparse
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index ad3327e694b67..fb50a3c60f705 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -909,7 +909,7 @@ def _get_merge_keys(self):
in zip(self.right.index.levels,
self.right.index.codes)]
else:
- right_keys = [self.right.index.values]
+ right_keys = [self.right.index._values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 5d7a9ab6f4cf0..62c9047b17f3d 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -682,6 +682,28 @@ def test_join_multi_to_multi(self, join_type):
with pytest.raises(ValueError, match=msg):
right.join(left, on=['abc', 'xy'], how=join_type)
+ def test_join_on_tz_aware_datetimeindex(self):
+ # GH 23931
+ df1 = pd.DataFrame(
+ {
+ 'date': pd.date_range(start='2018-01-01', periods=5,
+ tz='America/Chicago'),
+ 'vals': list('abcde')
+ }
+ )
+
+ df2 = pd.DataFrame(
+ {
+ 'date': pd.date_range(start='2018-01-03', periods=5,
+ tz='America/Chicago'),
+ 'vals_2': list('tuvwx')
+ }
+ )
+ result = df1.join(df2.set_index('date'), on='date')
+ expected = df1.copy()
+ expected['vals_2'] = pd.Series([np.nan] * len(expected), dtype=object)
+ assert_frame_equal(result, expected)
+
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
| - [x] closes #23931
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25260 | 2019-02-10T23:33:06Z | 2019-02-11T18:53:43Z | 2019-02-11T18:53:42Z | 2019-03-11T19:17:27Z |
Fix minor error in dynamic load function | diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index bb58449843096..09fb5a30cbc3b 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -4,6 +4,8 @@
import textwrap
import pytest
import numpy as np
+import pandas as pd
+
import validate_docstrings
validate_one = validate_docstrings.validate_one
@@ -1004,6 +1006,32 @@ def test_item_subsection(self, idx, subsection):
assert result[idx][3] == subsection
+class TestDocstringClass(object):
+ @pytest.mark.parametrize('name, expected_obj',
+ [('pandas.isnull', pd.isnull),
+ ('pandas.DataFrame', pd.DataFrame),
+ ('pandas.Series.sum', pd.Series.sum)])
+ def test_resolves_class_name(self, name, expected_obj):
+ d = validate_docstrings.Docstring(name)
+ assert d.obj is expected_obj
+
+ @pytest.mark.parametrize('invalid_name', ['panda', 'panda.DataFrame'])
+ def test_raises_for_invalid_module_name(self, invalid_name):
+ msg = 'No module can be imported from "{}"'.format(invalid_name)
+ with pytest.raises(ImportError, match=msg):
+ validate_docstrings.Docstring(invalid_name)
+
+ @pytest.mark.parametrize('invalid_name',
+ ['pandas.BadClassName',
+ 'pandas.Series.bad_method_name'])
+ def test_raises_for_invalid_attribute_name(self, invalid_name):
+ name_components = invalid_name.split('.')
+ obj_name, invalid_attr_name = name_components[-2], name_components[-1]
+ msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
+ with pytest.raises(AttributeError, match=msg):
+ validate_docstrings.Docstring(invalid_name)
+
+
class TestMainFunction(object):
def test_exit_status_for_validate_one(self, monkeypatch):
monkeypatch.setattr(
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index bce33f7e78daa..20f32124a2532 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -267,7 +267,7 @@ def _load_obj(name):
else:
continue
- if 'module' not in locals():
+ if 'obj' not in locals():
raise ImportError('No module can be imported '
'from "{}"'.format(name))
| - [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The `validate_docstrings.py` script dynamically loads modules/classes/functions from its command-line argument.
Currently, if the user supplies an invalid argument, an UnboundLocalError can be obtained:
```
Traceback (most recent call last):
...
File "./scripts/validate_docstrings.py", line 275, in _load_obj
obj = getattr(obj, part)
UnboundLocalError: local variable 'obj' referenced before assignment
```
This PR fixes a check within the relevant function, so that a more informative error can be raised:
```
$ ./scripts/validate_docstrings.py tslibs.Timestamp
Traceback (most recent call last):
...
ImportError: No module can be imported from "tslibs.Timestamp"
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/25256 | 2019-02-10T01:08:56Z | 2019-02-28T13:35:01Z | 2019-02-28T13:35:01Z | 2019-02-28T13:35:04Z |
PERF: avoid copy of dataframe while writing to SQL DBs | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aaface5415384..51dfffc63cb5f 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -608,9 +608,13 @@ def _execute_insert_multi(self, conn, keys, data_iter):
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(data))
- def insert_data(self):
+ def insert_data(self, start_index, end_index):
+ """
+ Creates list of col vectors for a slice of the dataframe.
+ Slice range specified by start_index, end_index
+ """
if self.index is not None:
- temp = self.frame.copy()
+ temp = self.frame[start_index:end_index].copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
@@ -618,7 +622,11 @@ def insert_data(self):
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
- temp = self.frame
+ if start_index == 0 and end_index == len(self.frame.index):
+ # avoid unnecessary copy by avoiding slicing
+ temp = self.frame
+ else:
+ temp = self.frame[start_index:end_index]
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
@@ -661,8 +669,6 @@ def insert(self, chunksize=None, method=None):
else:
raise ValueError('Invalid parameter `method`: {}'.format(method))
- keys, data_list = self.insert_data()
-
nrows = len(self.frame)
if nrows == 0:
@@ -681,8 +687,9 @@ def insert(self, chunksize=None, method=None):
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
+ keys, data_list = self.insert_data(start_i, end_i)
- chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
+ chunk_iter = zip(*[arr for arr in data_list])
exec_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] This change avoids making an entire copy of the dataframe while writing to SQL DBs with a chunksize. Instead we convert slices of the dataframe into a data list and write to the DB. Saves significant memory when writing large dataframes to SQL DBs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/25255 | 2019-02-10T00:58:17Z | 2019-03-19T04:07:13Z | null | 2019-03-19T04:07:14Z |
API: Ensure DatetimeTZDtype standardizes pytz timezones | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 5841125817d03..23f1aabd69ff3 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -321,6 +321,15 @@ which can be specified. These are computed from the starting point specified by
pd.to_datetime([1349720105100, 1349720105200, 1349720105300,
1349720105400, 1349720105500], unit='ms')
+Constructing a :class:`Timestamp` or :class:`DatetimeIndex` with an epoch timestamp
+with the ``tz`` argument specified will localize the epoch timestamps to UTC
+first then convert the result to the specified time zone.
+
+.. ipython:: python
+
+ pd.Timestamp(1262347200000000000, tz='US/Pacific')
+ pd.DatetimeIndex([1262347200000000000], tz='US/Pacific')
+
.. note::
Epoch times will be rounded to the nearest nanosecond.
@@ -2205,6 +2214,21 @@ you can use the ``tz_convert`` method.
rng_pytz.tz_convert('US/Eastern')
+.. note::
+
+ When using ``pytz`` time zones, :class:`DatetimeIndex` will construct a different
+ time zone object than a :class:`Timestamp` for the same time zone input. A :class:`DatetimeIndex`
+ can hold a collection of :class:`Timestamp` objects that may have different UTC offsets and cannot be
+ succinctly represented by one ``pytz`` time zone instance while one :class:`Timestamp`
+ represents one point in time with a specific UTC offset.
+
+ .. ipython:: python
+
+ dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific')
+ dti.tz
+ ts = pd.Timestamp('2019-01-01', tz='US/Pacific')
+ ts.tz
+
.. warning::
Be wary of conversions between libraries. For some time zones, ``pytz`` and ``dateutil`` have different
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 4032dc20b2e19..70a1cc7dab08e 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -33,7 +33,7 @@ Backwards incompatible API changes
Other API Changes
^^^^^^^^^^^^^^^^^
--
+- :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`)
-
-
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 8b9ac680493a1..f187d786d9f61 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -639,6 +639,7 @@ def __init__(self, unit="ns", tz=None):
if tz:
tz = timezones.maybe_get_tz(tz)
+ tz = timezones.tz_standardize(tz)
elif tz is not None:
raise pytz.UnknownTimeZoneError(tz)
elif tz is None:
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 0fe0a845f5129..710f215686eab 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+import pytz
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
@@ -302,6 +303,15 @@ def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
+ def test_tz_standardize(self):
+ # GH 24713
+ tz = pytz.timezone('US/Eastern')
+ dr = date_range('2013-01-01', periods=3, tz='US/Eastern')
+ dtype = DatetimeTZDtype('ns', dr.tz)
+ assert dtype.tz == tz
+ dtype = DatetimeTZDtype('ns', dr[0].tz)
+ assert dtype.tz == tz
+
class TestPeriodDtype(Base):
| - [x] closes #24713
- [x] closes #23815
- [x] closes #20257
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Includes some related-ish timezone documentation issues. | https://api.github.com/repos/pandas-dev/pandas/pulls/25254 | 2019-02-09T23:42:52Z | 2019-02-11T14:32:54Z | 2019-02-11T14:32:54Z | 2019-02-11T16:22:08Z |
Revert "BLD: prevent asv from calling sys.stdin.close() by using different launch method" | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index c86d5c50705a8..f0567d76659b6 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -104,7 +104,7 @@ jobs:
if git diff upstream/master --name-only | grep -q "^asv_bench/"; then
cd asv_bench
asv machine --yes
- ASV_OUTPUT="$(asv run --quick --show-stderr --python=same --launch-method=spawn)"
+ ASV_OUTPUT="$(asv dev)"
if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then
echo "##vso[task.logissue type=error]Benchmarks run with errors"
echo "$ASV_OUTPUT"
| Reverts pandas-dev/pandas#25237 | https://api.github.com/repos/pandas-dev/pandas/pulls/25253 | 2019-02-09T23:39:43Z | 2019-02-11T13:08:16Z | 2019-02-11T13:08:16Z | 2019-05-12T08:02:49Z |
Refactor groupby group_prod, group_var, group_mean, group_ohlc | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 950ba3f89ffb7..e6b6e2c8a0055 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -382,6 +382,10 @@ def group_any_all(uint8_t[:] out,
if values[i] == flag_val:
out[lab] = flag_val
+# ----------------------------------------------------------------------
+# group_add, group_prod, group_var, group_mean, group_ohlc
+# ----------------------------------------------------------------------
+
@cython.wraparound(False)
@cython.boundscheck(False)
@@ -396,9 +400,9 @@ def _group_add(floating[:, :] out,
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
floating val, count
- ndarray[floating, ndim=2] sumx, nobs
+ floating[:, :] sumx, nobs
- if not len(values) == len(labels):
+ if len(values) != len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
@@ -407,7 +411,6 @@ def _group_add(floating[:, :] out,
N, K = (<object>values).shape
with nogil:
-
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -433,5 +436,213 @@ def _group_add(floating[:, :] out,
group_add_float32 = _group_add['float']
group_add_float64 = _group_add['double']
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def _group_prod(floating[:, :] out,
+ int64_t[:] counts,
+ floating[:, :] values,
+ const int64_t[:] labels,
+ Py_ssize_t min_count=0):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
+ floating val, count
+ floating[:, :] prodx, nobs
+
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
+ nobs = np.zeros_like(out)
+ prodx = np.ones_like(out)
+
+ N, K = (<object>values).shape
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[lab, j] += 1
+ prodx[lab, j] *= val
+
+ for i in range(ncounts):
+ for j in range(K):
+ if nobs[i, j] < min_count:
+ out[i, j] = NAN
+ else:
+ out[i, j] = prodx[i, j]
+
+
+group_prod_float32 = _group_prod['float']
+group_prod_float64 = _group_prod['double']
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+@cython.cdivision(True)
+def _group_var(floating[:, :] out,
+ int64_t[:] counts,
+ floating[:, :] values,
+ const int64_t[:] labels,
+ Py_ssize_t min_count=-1):
+ cdef:
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
+ floating val, ct, oldmean
+ floating[:, :] nobs, mean
+
+ assert min_count == -1, "'min_count' only used in add and prod"
+
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
+ nobs = np.zeros_like(out)
+ mean = np.zeros_like(out)
+
+ N, K = (<object>values).shape
+
+ out[:, :] = 0.0
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
+
+ for i in range(ncounts):
+ for j in range(K):
+ ct = nobs[i, j]
+ if ct < 2:
+ out[i, j] = NAN
+ else:
+ out[i, j] /= (ct - 1)
+
+
+group_var_float32 = _group_var['float']
+group_var_float64 = _group_var['double']
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def _group_mean(floating[:, :] out,
+ int64_t[:] counts,
+ floating[:, :] values,
+ const int64_t[:] labels,
+ Py_ssize_t min_count=-1):
+ cdef:
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
+ floating val, count
+ floating[:, :] sumx, nobs
+
+ assert min_count == -1, "'min_count' only used in add and prod"
+
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
+ nobs = np.zeros_like(out)
+ sumx = np.zeros_like(out)
+
+ N, K = (<object>values).shape
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+ # not nan
+ if val == val:
+ nobs[lab, j] += 1
+ sumx[lab, j] += val
+
+ for i in range(ncounts):
+ for j in range(K):
+ count = nobs[i, j]
+ if nobs[i, j] == 0:
+ out[i, j] = NAN
+ else:
+ out[i, j] = sumx[i, j] / count
+
+
+group_mean_float32 = _group_mean['float']
+group_mean_float64 = _group_mean['double']
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def _group_ohlc(floating[:, :] out,
+ int64_t[:] counts,
+ floating[:, :] values,
+ const int64_t[:] labels,
+ Py_ssize_t min_count=-1):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, lab
+ floating val, count
+ Py_ssize_t ngroups = len(counts)
+
+ assert min_count == -1, "'min_count' only used in add and prod"
+
+ if len(labels) == 0:
+ return
+
+ N, K = (<object>values).shape
+
+ if out.shape[1] != 4:
+ raise ValueError('Output array must have 4 columns')
+
+ if K > 1:
+ raise NotImplementedError("Argument 'values' must have only "
+ "one dimension")
+ out[:] = np.nan
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab == -1:
+ continue
+
+ counts[lab] += 1
+ val = values[i, 0]
+ if val != val:
+ continue
+
+ if out[lab, 0] != out[lab, 0]:
+ out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
+ else:
+ out[lab, 1] = max(out[lab, 1], val)
+ out[lab, 2] = min(out[lab, 2], val)
+ out[lab, 3] = val
+
+
+group_ohlc_float32 = _group_ohlc['float']
+group_ohlc_float64 = _group_ohlc['double']
+
# generated from template
include "groupby_helper.pxi"
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index db7018e1a7254..63cd4d6ac6ff2 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -8,219 +8,6 @@ cdef extern from "numpy/npy_math.h":
float64_t NAN "NPY_NAN"
_int64_max = np.iinfo(np.int64).max
-# ----------------------------------------------------------------------
-# group_prod, group_var, group_mean, group_ohlc
-# ----------------------------------------------------------------------
-
-{{py:
-
-# name, c_type
-dtypes = [('float64', 'float64_t'),
- ('float32', 'float32_t')]
-
-def get_dispatch(dtypes):
-
- for name, c_type in dtypes:
- yield name, c_type
-}}
-
-{{for name, c_type in get_dispatch(dtypes)}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- Py_ssize_t min_count=0):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{c_type}} val, count
- ndarray[{{c_type}}, ndim=2] prodx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- N, K = (<object>values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- prodx[lab, j] *= val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] < min_count:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-@cython.cdivision(True)
-def group_var_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- Py_ssize_t min_count=-1):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{c_type}} val, ct, oldmean
- ndarray[{{c_type}}, ndim=2] nobs, mean
-
- assert min_count == -1, "'min_count' only used in add and prod"
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- mean = np.zeros_like(out)
-
- N, K = (<object>values).shape
-
- out[:, :] = 0.0
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- oldmean = mean[lab, j]
- mean[lab, j] += (val - oldmean) / nobs[lab, j]
- out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
-
- for i in range(ncounts):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] /= (ct - 1)
-# add passing bin edges, instead of labels
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_mean_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- Py_ssize_t min_count=-1):
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{c_type}} val, count
- ndarray[{{c_type}}, ndim=2] sumx, nobs
-
- assert min_count == -1, "'min_count' only used in add and prod"
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object>values).shape
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
-
- for i in range(ncounts):
- for j in range(K):
- count = nobs[i, j]
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_ohlc_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- Py_ssize_t min_count=-1):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab
- {{c_type}} val, count
- Py_ssize_t ngroups = len(counts)
-
- assert min_count == -1, "'min_count' only used in add and prod"
-
- if len(labels) == 0:
- return
-
- N, K = (<object>values).shape
-
- if out.shape[1] != 4:
- raise ValueError('Output array must have 4 columns')
-
- if K > 1:
- raise NotImplementedError("Argument 'values' must have only "
- "one dimension")
- out[:] = np.nan
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab == -1:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
- if val != val:
- continue
-
- if out[lab, 0] != out[lab, 0]:
- out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
- else:
- out[lab, 1] = max(out[lab, 1], val)
- out[lab, 2] = min(out[lab, 2], val)
- out[lab, 3] = val
-
-{{endfor}}
-
# ----------------------------------------------------------------------
# group_nth, group_last, group_rank
# ----------------------------------------------------------------------
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 3d28b17750540..888cf78a1c66a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1222,7 +1222,7 @@ def test_group_var_constant(self):
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
- algo = libgroupby.group_var_float64
+ algo = staticmethod(libgroupby.group_var_float64)
dtype = np.float64
rtol = 1e-5
@@ -1245,7 +1245,7 @@ def test_group_var_large_inputs(self):
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
- algo = libgroupby.group_var_float32
+ algo = staticmethod(libgroupby.group_var_float32)
dtype = np.float32
rtol = 1e-2
| Followup to a previous [PR](https://github.com/pandas-dev/pandas/pull/24954).
Continue refactoring cython function implemented in tempita into fused types. | https://api.github.com/repos/pandas-dev/pandas/pulls/25249 | 2019-02-09T20:17:01Z | 2019-02-11T19:39:27Z | 2019-02-11T19:39:27Z | 2019-02-11T19:39:30Z |
Backport PR #25171 on branch 0.24.x | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index c95ed818c9da0..b0f287cf0b9f6 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -52,6 +52,7 @@ Bug Fixes
**I/O**
- Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`)
+- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`)
- Bug where float indexes could have misaligned values when printing (:issue:`25061`)
-
diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py
index 2bd93b19d4225..971386c91944e 100644
--- a/pandas/io/json/table_schema.py
+++ b/pandas/io/json/table_schema.py
@@ -314,12 +314,13 @@ def parse_table_schema(json, precise_float):
df = df.astype(dtypes)
- df = df.set_index(table['schema']['primaryKey'])
- if len(df.index.names) == 1:
- if df.index.name == 'index':
- df.index.name = None
- else:
- df.index.names = [None if x.startswith('level_') else x for x in
- df.index.names]
+ if 'primaryKey' in table['schema']:
+ df = df.set_index(table['schema']['primaryKey'])
+ if len(df.index.names) == 1:
+ if df.index.name == 'index':
+ df.index.name = None
+ else:
+ df.index.names = [None if x.startswith('level_') else x for x in
+ df.index.names]
return df
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 23c40276072d6..51a1d5488b191 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1262,3 +1262,13 @@ def test_index_false_error_to_json(self, orient):
"'orient' is 'split' or 'table'")
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient, index=False)
+
+ @pytest.mark.parametrize('orient', ['split', 'table'])
+ @pytest.mark.parametrize('index', [True, False])
+ def test_index_false_from_json_to_json(self, orient, index):
+ # GH25170
+ # Test index=False in from_json to_json
+ expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
+ dfjson = expected.to_json(orient=orient, index=index)
+ result = read_json(dfjson, orient=orient)
+ assert_frame_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25248 | 2019-02-09T19:05:24Z | 2019-02-11T12:51:51Z | 2019-02-11T12:51:51Z | 2019-02-11T12:51:53Z |
BUG: pandas Timestamp tz_localize and tz_convert do not preserve `freq` attribute | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 4032dc20b2e19..ecce54b88fb04 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -93,7 +93,7 @@ Timezones
^^^^^^^^^
- Bug in :func:`to_datetime` with ``utc=True`` and datetime strings that would apply previously parsed UTC offsets to subsequent arguments (:issue:`24992`)
--
+- Bug in :func:`Timestamp.tz_localize` and :func:`Timestamp.tz_convert` does not propagate ``freq`` (:issue:`25241`)
-
Numeric
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 25b0b4069cf7c..8a95d2494dfa4 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1187,12 +1187,12 @@ class Timestamp(_Timestamp):
value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz,
ambiguous=ambiguous,
nonexistent=nonexistent)[0]
- return Timestamp(value, tz=tz)
+ return Timestamp(value, tz=tz, freq=self.freq)
else:
if tz is None:
# reset tz
value = tz_convert_single(self.value, UTC, self.tz)
- return Timestamp(value, tz=None)
+ return Timestamp(value, tz=tz, freq=self.freq)
else:
raise TypeError('Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions')
@@ -1222,7 +1222,7 @@ class Timestamp(_Timestamp):
'tz_localize to localize')
else:
# Same UTC timestamp, different time zone
- return Timestamp(self.value, tz=tz)
+ return Timestamp(self.value, tz=tz, freq=self.freq)
astimezone = tz_convert
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 12c1b15733895..b25918417efcd 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -825,6 +825,13 @@ def test_dti_drop_dont_lose_tz(self):
assert ind.tz is not None
+ def test_dti_tz_conversion_freq(self, tz_naive_fixture):
+ # GH25241
+ t3 = DatetimeIndex(['2019-01-01 10:00'], freq='H')
+ assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
+ t4 = DatetimeIndex(['2019-01-02 12:00'], tz='UTC', freq='T')
+ assert t4.tz_convert(tz='UTC').freq == t4.freq
+
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index b2c05d1564a48..c27ef3d0662c8 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -780,6 +780,13 @@ def test_hash_equivalent(self):
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
+ def test_tz_conversion_freq(self, tz_naive_fixture):
+ # GH25241
+ t1 = Timestamp('2019-01-01 10:00', freq='H')
+ assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
+ t2 = Timestamp('2019-01-02 12:00', tz='UTC', freq='T')
+ assert t2.tz_convert(tz='UTC').freq == t2.freq
+
class TestTimestampNsOperations(object):
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index a5855f68127f4..dbe667a166d0a 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -114,7 +114,8 @@ def test_getitem_get(test_data):
# missing
d = test_data.ts.index[0] - BDay()
- with pytest.raises(KeyError, match=r"Timestamp\('1999-12-31 00:00:00'\)"):
+ msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)"
+ with pytest.raises(KeyError, match=msg):
test_data.ts[d]
# None
| - [ ] closes #25241
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25247 | 2019-02-09T18:55:27Z | 2019-02-11T13:09:37Z | 2019-02-11T13:09:36Z | 2019-02-11T13:10:05Z |
BUG: Fix exceptions when Series.interpolate's `order` parameter is missing or invalid | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 4032dc20b2e19..cd087c6429d6d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -139,7 +139,7 @@ Indexing
Missing
^^^^^^^
--
+- Fixed misleading exception message in :meth:`Series.missing` if argument ``order`` is required, but omitted (:issue:`10633`, :issue:`24014`).
-
-
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index ac7d21de442db..4e2c04dba8b04 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1115,24 +1115,18 @@ def check_int_bool(self, inplace):
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
- # try an interp method
- try:
- m = missing.clean_interp_method(method, **kwargs)
- except ValueError:
- m = None
-
- if m is not None:
- r = check_int_bool(self, inplace)
- if r is not None:
- return r
- return self._interpolate(method=m, index=index, values=values,
- axis=axis, limit=limit,
- limit_direction=limit_direction,
- limit_area=limit_area,
- fill_value=fill_value, inplace=inplace,
- downcast=downcast, **kwargs)
-
- raise ValueError("invalid method '{0}' to interpolate.".format(method))
+ # validate the interp method
+ m = missing.clean_interp_method(method, **kwargs)
+
+ r = check_int_bool(self, inplace)
+ if r is not None:
+ return r
+ return self._interpolate(method=m, index=index, values=values,
+ axis=axis, limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ fill_value=fill_value, inplace=inplace,
+ downcast=downcast, **kwargs)
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index cc7bdf95200d1..9acdb1a06b2d1 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -293,9 +293,10 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
- # GH #10633
- if not order:
- raise ValueError("order needs to be specified and greater than 0")
+ # GH #10633, #24014
+ if isna(order) or (order <= 0):
+ raise ValueError("order needs to be specified and greater than 0; "
+ "got order: {}".format(order))
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 985288c439917..f07dd1dfb5fda 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -854,8 +854,23 @@ def test_series_pad_backfill_limit(self):
assert_series_equal(result, expected)
-class TestSeriesInterpolateData():
+@pytest.fixture(params=['linear', 'index', 'values', 'nearest', 'slinear',
+ 'zero', 'quadratic', 'cubic', 'barycentric', 'krogh',
+ 'polynomial', 'spline', 'piecewise_polynomial',
+ 'from_derivatives', 'pchip', 'akima', ])
+def nontemporal_method(request):
+ """ Fixture that returns an (method name, required kwargs) pair.
+
+ This fixture does not include method 'time' as a parameterization; that
+ method requires a Series with a DatetimeIndex, and is generally tested
+ separately from these non-temporal methods.
+ """
+ method = request.param
+ kwargs = dict(order=1) if method in ('spline', 'polynomial') else dict()
+ return method, kwargs
+
+class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
datetime_series.index)
@@ -875,12 +890,12 @@ def test_interpolate(self, datetime_series, string_series):
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
- # try time interpolation on a non-TimeSeries
- # Only raises ValueError if there are NaNs.
- non_ts = string_series.copy()
- non_ts[0] = np.NaN
- msg = ("time-weighted interpolation only works on Series or DataFrames"
- " with a DatetimeIndex")
+ def test_interpolate_time_raises_for_non_timeseries(self):
+ # When method='time' is used on a non-TimeSeries that contains a null
+ # value, a ValueError should be raised.
+ non_ts = Series([0, 1, 2, np.NaN])
+ msg = ("time-weighted interpolation only works on Series.* "
+ "with a DatetimeIndex")
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method='time')
@@ -1061,21 +1076,35 @@ def test_interp_limit(self):
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
- # GH 9217, make sure limit is an int and greater than 0
- methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
- 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
- 'polynomial', 'spline', 'piecewise_polynomial', None,
- 'from_derivatives', 'pchip', 'akima']
- s = pd.Series([1, 2, np.nan, np.nan, 5])
- msg = (r"Limit must be greater than 0|"
- "time-weighted interpolation only works on Series or"
- r" DataFrames with a DatetimeIndex|"
- r"invalid method '(polynomial|spline|None)' to interpolate|"
- "Limit must be an integer")
- for limit in [-1, 0, 1., 2.]:
- for method in methods:
- with pytest.raises(ValueError, match=msg):
- s.interpolate(limit=limit, method=method)
+ @pytest.mark.parametrize("limit", [-1, 0])
+ def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method,
+ limit):
+ # GH 9217: make sure limit is greater than zero.
+ s = pd.Series([1, 2, np.nan, 4])
+ method, kwargs = nontemporal_method
+ with pytest.raises(ValueError, match="Limit must be greater than 0"):
+ s.interpolate(limit=limit, method=method, **kwargs)
+
+ def test_interpolate_invalid_float_limit(self, nontemporal_method):
+ # GH 9217: make sure limit is an integer.
+ s = pd.Series([1, 2, np.nan, 4])
+ method, kwargs = nontemporal_method
+ limit = 2.0
+ with pytest.raises(ValueError, match="Limit must be an integer"):
+ s.interpolate(limit=limit, method=method, **kwargs)
+
+ @pytest.mark.parametrize("invalid_method", [None, 'nonexistent_method'])
+ def test_interp_invalid_method(self, invalid_method):
+ s = Series([1, 3, np.nan, 12, np.nan, 25])
+
+ msg = "method must be one of.* Got '{}' instead".format(invalid_method)
+ with pytest.raises(ValueError, match=msg):
+ s.interpolate(method=invalid_method)
+
+ # When an invalid method and invalid limit (such as -1) are
+ # provided, the error message reflects the invalid method.
+ with pytest.raises(ValueError, match=msg):
+ s.interpolate(method=invalid_method, limit=-1)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
@@ -1276,11 +1305,20 @@ def test_interp_limit_no_nans(self):
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ['polynomial', 'spline'])
def test_no_order(self, method):
+ # see GH-10633, GH-24014
s = Series([0, 1, np.nan, 3])
- msg = "invalid method '{}' to interpolate".format(method)
+ msg = "You must specify the order of the spline or polynomial"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method)
+ @td.skip_if_no_scipy
+ @pytest.mark.parametrize('order', [-1, -1.0, 0, 0.0, np.nan])
+ def test_interpolate_spline_invalid_order(self, order):
+ s = Series([0, 1, np.nan, 3])
+ msg = "order needs to be specified and greater than 0"
+ with pytest.raises(ValueError, match=msg):
+ s.interpolate(method='spline', order=order)
+
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
@@ -1313,19 +1351,6 @@ def test_spline_interpolation(self):
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
- @td.skip_if_no_scipy
- def test_spline_error(self):
- # see gh-10633
- s = pd.Series(np.arange(10) ** 2)
- s[np.random.randint(0, 9, 3)] = np.nan
- msg = "invalid method 'spline' to interpolate"
- with pytest.raises(ValueError, match=msg):
- s.interpolate(method='spline')
-
- msg = "order needs to be specified and greater than 0"
- with pytest.raises(ValueError, match=msg):
- s.interpolate(method='spline', order=0)
-
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
| closes #24014
xref #10633
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Before this change, when the `Series.interpolate()` method was called with invalid arguments, exceptions with informative messages would be raised internally, but then caught and suppressed. An exception with a potentially misleading message would be raised instead.
For example, when interpolate is called with `method='spline'`, an `order` argument must also be supplied. This is checked internally, but when the `order` argument was missing, a confusing error would be raised:
```
>>> import pandas as pd
>>> s = pd.Series([0, 1, pd.np.nan, 3, 4])
>>> s.interpolate(method='spline')
Traceback (most recent call last):
...
ValueError: invalid method 'spline' to interpolate.
```
This problem was reported as issues #10633 and #24014.
After this change, a specific and correct exception (previously caught and discarded internally) is raised:
```
>>> import pandas as pd
>>> s = pd.Series([0, 1, pd.np.nan, 3, 4])
>>> s.interpolate(method='spline')
...
ValueError: You must specify the order of the spline or polynomial.
```
In addition, light validation is now performed on the `order` parameter before it is passed to a `scipy` class. Previously, `pandas` would check if `order` was truthy in the Python sense. If `order` was non-zero and invalid, a scipy error would propagate to the user:
```
>>> s.interpolate(method='spline', order=-1)
Traceback (most recent call last):
...
dfitpack.error: (1<=k && k<=5) failed for 3rd argument k: fpcurf0:k=-1
```
After this change, a more understandable exception is raised in this case:
```
>>> s.interpolate(method='spline', order=-1)
Traceback (most recent call last):
...
ValueError: order needs to be specified and greater than 0
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/25246 | 2019-02-09T18:33:57Z | 2019-02-11T16:08:48Z | 2019-02-11T16:08:48Z | 2019-02-11T16:08:55Z |
Backport PR #25230 on branch 0.24.x (BUG: Fix regression in DataFrame.apply causing RecursionError) | diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index 73df504c89d5b..23d993923e697 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -21,8 +21,8 @@ Fixed Regressions
^^^^^^^^^^^^^^^^^
- Fixed regression in :meth:`DataFrame.all` and :meth:`DataFrame.any` where ``bool_only=True`` was ignored (:issue:`25101`)
-
- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`)
+- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`)
.. _whatsnew_0242.enhancements:
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index b11542622451c..dd05e2022f066 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -397,12 +397,15 @@ def is_dict_like(obj):
True
>>> is_dict_like([1, 2, 3])
False
+ >>> is_dict_like(dict)
+ False
+ >>> is_dict_like(dict())
+ True
"""
- for attr in ("__getitem__", "keys", "__contains__"):
- if not hasattr(obj, attr):
- return False
-
- return True
+ dict_like_attrs = ("__getitem__", "keys", "__contains__")
+ return (all(hasattr(obj, attr) for attr in dict_like_attrs)
+ # [GH 25196] exclude classes
+ and not isinstance(obj, type))
def is_named_tuple(obj):
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 89662b70a39ad..49a66efaffc11 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -159,13 +159,15 @@ def test_is_nested_list_like_fails(obj):
@pytest.mark.parametrize(
- "ll", [{}, {'A': 1}, Series([1])])
+ "ll", [{}, {'A': 1}, Series([1]), collections.defaultdict()])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
-@pytest.mark.parametrize(
- "ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
+@pytest.mark.parametrize("ll", [
+ '1', 1, [1, 2], (1, 2), range(2), Index([1]),
+ dict, collections.defaultdict, Series
+])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index ade527a16c902..a4cd1aa3bacb6 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -318,6 +318,13 @@ def test_apply_reduce_Series(self, float_frame):
result = float_frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
+ def test_apply_reduce_rows_to_dict(self):
+ # GH 25196
+ data = pd.DataFrame([[1, 2], [3, 4]])
+ expected = pd.Series([{0: 1, 1: 3}, {0: 2, 1: 4}])
+ result = data.apply(dict)
+ assert_series_equal(result, expected)
+
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
| Backport PR #25230: BUG: Fix regression in DataFrame.apply causing RecursionError | https://api.github.com/repos/pandas-dev/pandas/pulls/25245 | 2019-02-09T16:54:17Z | 2019-02-09T17:59:26Z | 2019-02-09T17:59:26Z | 2019-02-09T17:59:26Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.