title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Fix 20599 overflow error when int in json is too large | diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 1627b2f4d3ec3..17492f5d5cac9 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -19,7 +19,18 @@
from .table_schema import build_table_schema, parse_table_schema
from pandas.core.dtypes.common import is_period_dtype
-loads = json.loads
+
+def loads(*args, **kwargs):
+ try:
+ return json.loads(*args, **kwargs)
+ except ValueError as err:
+ # if ValueError is from too large aa value return []
+ if err.args[0] == 'Value is too big':
+ return []
+ else:
+ # in case of something like '{"key":b:a:d}' re raise
+ raise
+
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 7e497c395266f..0e72e5d3abd84 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1222,3 +1222,10 @@ def test_index_false_error_to_json(self, orient):
"valid when 'orient' is "
"'split' or 'table'"):
df.to_json(orient=orient, index=False)
+
+ @pytest.mark.parametrize('orient', [
+ 'records', 'index', 'columns', 'values'
+ ])
+ def test_int_overflow(self, orient):
+ bar = json.dumps({'foo': 2**100000})
+ read_json(bar, orient=orient)
| - [ ] closes #20599
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21410 | 2018-06-10T15:18:12Z | 2018-06-10T15:32:50Z | null | 2018-06-10T15:32:50Z |
Two tests didn't properly assert an exception was raised. Fixed. | diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index dd192db4b0eb3..8cffa035721b0 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -583,7 +583,6 @@ def test_get_indexer(self):
def test_reasonable_keyerror(self):
# GH#1062
index = DatetimeIndex(['1/3/2000'])
- try:
+ with pytest.raises(KeyError) as excinfo:
index.get_loc('1/1/2000')
- except KeyError as e:
- assert '2000' in str(e)
+ assert '2000' in str(excinfo.value)
diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py
index e0422249289b7..9dc7b070f889d 100644
--- a/pandas/tests/io/parser/c_parser_only.py
+++ b/pandas/tests/io/parser/c_parser_only.py
@@ -23,21 +23,19 @@
class CParserTests(object):
- def test_buffer_overflow(self):
+ @pytest.mark.parametrize(
+ 'malf',
+ ['1\r1\r1\r 1\r 1\r',
+ '1\r1\r1\r 1\r 1\r11\r',
+ '1\r1\r1\r 1\r 1\r11\r1\r'],
+ ids=['words pointer', 'stream pointer', 'lines pointer'])
+ def test_buffer_overflow(self, malf):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
-
- malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
- malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
- malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
-
cperr = 'Buffer overflow caught - possible malformed input file.'
-
- for malf in (malfw, malfs, malfl):
- try:
- self.read_table(StringIO(malf))
- except Exception as err:
- assert cperr in str(err)
+ with pytest.raises(pd.errors.ParserError) as excinfo:
+ self.read_table(StringIO(malf))
+ assert cperr in str(excinfo.value)
def test_buffer_rd_bytes(self):
# see gh-12098: src->buffer in the C parser can be freed twice leading
| - [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21409 | 2018-06-10T12:28:49Z | 2018-06-12T11:37:01Z | 2018-06-12T11:37:01Z | 2018-06-17T12:58:45Z |
BUG : read_json with table='orient' causes unexpected type coercion | diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 1627b2f4d3ec3..641265a95e93b 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -706,10 +706,10 @@ def _try_convert_data(self, name, data, use_dtypes=True,
except (TypeError, ValueError):
pass
- # do't coerce 0-len data
- if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
+ # don't coerce 0-len data
+ if len(data) and (data.dtype == 'object'):
- # coerce ints if we can
+ # coerce int if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
@@ -721,13 +721,23 @@ def _try_convert_data(self, name, data, use_dtypes=True,
# coerce ints to 64
if data.dtype == 'int':
- # coerce floats to 64
+ # coerce ints to 64
try:
data = data.astype('int64')
result = True
except (TypeError, ValueError):
pass
+ # coerce floats to 64
+ if data.dtype == 'float':
+
+ # coerce floats to 64
+ try:
+ data = data.astype('float64')
+ result = True
+ except (TypeError, ValueError):
+ pass
+
return data, result
def _try_convert_to_date(self, data):
| BUG : read_json with table='orient' causes unexpected type coercion #21345
- [x] closes #21345
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Changed `.astype` parameters according to the comments. Should work now
| https://api.github.com/repos/pandas-dev/pandas/pulls/21408 | 2018-06-10T09:45:57Z | 2018-06-19T17:23:51Z | null | 2018-06-19T17:26:00Z |
BUG: Construct Timestamp with tz correctly near DST border | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index c636e73fbd6c2..1de44ffeb4160 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -73,6 +73,10 @@ Bug Fixes
-
+**Timezones**
+- Bug in :class:`Timestamp` and :class:`DatetimeIndex` where passing a :class:`Timestamp` localized after a DST transition would return a datetime before the DST transition (:issue:`20854`)
+- Bug in comparing :class:`DataFrame`s with tz-aware :class:`DatetimeIndex` columns with a DST transition that raised a ``KeyError`` (:issue:`19970`)
+
**Other**
-
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index f4841e6abb7e8..3cbef82437544 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -347,25 +347,11 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz,
if tz is not None:
tz = maybe_get_tz(tz)
- # sort of a temporary hack
if ts.tzinfo is not None:
- if hasattr(tz, 'normalize') and hasattr(ts.tzinfo, '_utcoffset'):
- ts = tz.normalize(ts)
- obj.value = pydatetime_to_dt64(ts, &obj.dts)
- obj.tzinfo = ts.tzinfo
- else:
- # tzoffset
- try:
- tz = ts.astimezone(tz).tzinfo
- except:
- pass
- obj.value = pydatetime_to_dt64(ts, &obj.dts)
- ts_offset = get_utcoffset(ts.tzinfo, ts)
- obj.value -= int(ts_offset.total_seconds() * 1e9)
- tz_offset = get_utcoffset(tz, ts)
- obj.value += int(tz_offset.total_seconds() * 1e9)
- dt64_to_dtstruct(obj.value, &obj.dts)
- obj.tzinfo = tz
+ # Convert the current timezone to the passed timezone
+ ts = ts.astimezone(tz)
+ obj.value = pydatetime_to_dt64(ts, &obj.dts)
+ obj.tzinfo = ts.tzinfo
elif not is_utc(tz):
ts = _localize_pydatetime(ts, tz)
obj.value = pydatetime_to_dt64(ts, &obj.dts)
diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py
index fa589a0aa4817..3956968173070 100644
--- a/pandas/tests/frame/test_timezones.py
+++ b/pandas/tests/frame/test_timezones.py
@@ -133,3 +133,13 @@ def test_frame_reset_index(self, tz):
xp = df.index.tz
rs = roundtripped.index.tz
assert xp == rs
+
+ @pytest.mark.parametrize('tz', [None, 'America/New_York'])
+ def test_boolean_compare_transpose_tzindex_with_dst(self, tz):
+ # GH 19970
+ idx = date_range('20161101', '20161130', freq='4H', tz=tz)
+ df = DataFrame({'a': range(len(idx)), 'b': range(len(idx))},
+ index=idx)
+ result = df.T == df.T
+ expected = DataFrame(True, index=list('ab'), columns=idx)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index dae69a86910af..b138b79caac76 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -469,6 +469,15 @@ def test_constructor_with_non_normalized_pytz(self, tz):
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
+ def test_constructor_timestamp_near_dst(self):
+ # GH 20854
+ ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
+ Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
+ result = DatetimeIndex(ts)
+ expected = DatetimeIndex([ts[0].to_pydatetime(),
+ ts[1].to_pydatetime()])
+ tm.assert_index_equal(result, expected)
+
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 193804b66395b..ec37bbbcb6c02 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -278,6 +278,20 @@ def test_wom_len(self, periods):
res = date_range(start='20110101', periods=periods, freq='WOM-1MON')
assert len(res) == periods
+ def test_construct_over_dst(self):
+ # GH 20854
+ pre_dst = Timestamp('2010-11-07 01:00:00').tz_localize('US/Pacific',
+ ambiguous=True)
+ pst_dst = Timestamp('2010-11-07 01:00:00').tz_localize('US/Pacific',
+ ambiguous=False)
+ expect_data = [Timestamp('2010-11-07 00:00:00', tz='US/Pacific'),
+ pre_dst,
+ pst_dst]
+ expected = DatetimeIndex(expect_data)
+ result = date_range(start='2010-11-7', periods=3,
+ freq='H', tz='US/Pacific')
+ tm.assert_index_equal(result, expected)
+
class TestGenRangeGeneration(object):
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index ab87d98fca8eb..4689c7bea626f 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -528,6 +528,14 @@ def test_disallow_setting_tz(self, tz):
with pytest.raises(AttributeError):
ts.tz = tz
+ @pytest.mark.parametrize('offset', ['+0300', '+0200'])
+ def test_construct_timestamp_near_dst(self, offset):
+ # GH 20854
+ expected = Timestamp('2016-10-30 03:00:00{}'.format(offset),
+ tz='Europe/Helsinki')
+ result = Timestamp(expected, tz='Europe/Helsinki')
+ assert result == expected
+
class TestTimestamp(object):
| closes #19970
closes #20854
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
So I _think_ the reason we need to convert a `Timestamp` to `datetime` branch is because of how `Timestamp` behave with `normalize` explained by this https://github.com/pandas-dev/pandas/pull/18618#issuecomment-352271183 | https://api.github.com/repos/pandas-dev/pandas/pulls/21407 | 2018-06-10T07:45:52Z | 2018-06-13T10:51:43Z | 2018-06-13T10:51:42Z | 2018-06-29T14:47:48Z |
BUG: `line terminator` and '\n to \r\n' problem in Windows(Issue #20353) | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 9159f7c8056ec..1940f22fd0661 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -235,6 +235,97 @@ If installed, we now require:
| scipy | 0.18.1 | |
+-----------------+-----------------+----------+
+.. _whatsnew_0240.api_breaking.csv_line_terminator:
+
+`os.linesep` is used for ``line_terminator`` of ``DataFrame.to_csv``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:func:`DataFrame.to_csv` now uses :func:`os.linesep` rather than ``'\n'``
+ for the default line terminator (:issue:`20353`).
+This change only affects when running on Windows, where ``'\r\n'`` was used for line terminator
+even when ``'\n'`` was passed in ``line_terminator``.
+
+Previous Behavior on Windows:
+
+.. code-block:: ipython
+
+In [1]: data = pd.DataFrame({
+ ...: "string_with_lf": ["a\nbc"],
+ ...: "string_with_crlf": ["a\r\nbc"]
+ ...: })
+
+In [2]: # When passing file PATH to to_csv, line_terminator does not work, and csv is saved with '\r\n'.
+ ...: # Also, this converts all '\n's in the data to '\r\n'.
+ ...: data.to_csv("test.csv", index=False, line_terminator='\n')
+
+In [3]: with open("test.csv", mode='rb') as f:
+ ...: print(f.read())
+b'string_with_lf,string_with_crlf\r\n"a\r\nbc","a\r\r\nbc"\r\n'
+
+In [4]: # When passing file OBJECT with newline option to to_csv, line_terminator works.
+ ...: with open("test2.csv", mode='w', newline='\n') as f:
+ ...: data.to_csv(f, index=False, line_terminator='\n')
+
+In [5]: with open("test2.csv", mode='rb') as f:
+ ...: print(f.read())
+b'string_with_lf,string_with_crlf\n"a\nbc","a\r\nbc"\n'
+
+
+New Behavior on Windows:
+
+- By passing ``line_terminator`` explicitly, line terminator is set to that character.
+- The value of ``line_terminator`` only affects the line terminator of CSV,
+ so it does not change the value inside the data.
+
+.. code-block:: ipython
+
+In [1]: data = pd.DataFrame({
+ ...: "string_with_lf": ["a\nbc"],
+ ...: "string_with_crlf": ["a\r\nbc"]
+ ...: })
+
+In [2]: data.to_csv("test.csv", index=False, line_terminator='\n')
+
+In [3]: with open("test.csv", mode='rb') as f:
+ ...: print(f.read())
+b'string_with_lf,string_with_crlf\n"a\nbc","a\r\nbc"\n'
+
+
+- On Windows, the value of ``os.linesep`` is ``'\r\n'``,
+ so if ``line_terminator`` is not set, ``'\r\n'`` is used for line terminator.
+- Again, it does not affect the value inside the data.
+
+.. code-block:: ipython
+
+In [1]: data = pd.DataFrame({
+ ...: "string_with_lf": ["a\nbc"],
+ ...: "string_with_crlf": ["a\r\nbc"]
+ ...: })
+
+In [2]: data.to_csv("test.csv", index=False)
+
+In [3]: with open("test.csv", mode='rb') as f:
+ ...: print(f.read())
+b'string_with_lf,string_with_crlf\r\n"a\nbc","a\r\nbc"\r\n'
+
+
+- For files objects, specifying ``newline`` is not sufficient to set the line terminator.
+ You must pass in the ``line_terminator`` explicitly, even in this case.
+
+.. code-block:: ipython
+
+In [1]: data = pd.DataFrame({
+ ...: "string_with_lf": ["a\nbc"],
+ ...: "string_with_crlf": ["a\r\nbc"]
+ ...: })
+
+In [2]: with open("test2.csv", mode='w', newline='\n') as f:
+ ...: data.to_csv(f, index=False)
+
+In [3]: with open("test2.csv", mode='rb') as f:
+ ...: print(f.read())
+b'string_with_lf,string_with_crlf\r\n"a\nbc","a\r\nbc"\r\n'
+
.. _whatsnew_0240.api_breaking.interval_values:
``IntervalIndex.values`` is now an ``IntervalArray``
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ba050bfc8db77..e12a3f0d225eb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9518,7 +9518,7 @@ def last_valid_index(self):
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression='infer', quoting=None,
- quotechar='"', line_terminator='\n', chunksize=None,
+ quotechar='"', line_terminator=None, chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""
@@ -9583,9 +9583,12 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
- line_terminator : string, default ``'\n'``
+ line_terminator : string, optional
The newline character or character sequence to use in the output
- file.
+ file. Defaults to `os.linesep`, which depends on the OS in which
+ this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
+
+ .. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
tupleize_cols : bool, default False
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 9bf7c5af2cd3a..2056c25ddc5f4 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -417,13 +417,14 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
elif is_path:
if compat.PY2:
# Python 2
+ mode = "wb" if mode == "w" else mode
f = open(path_or_buf, mode)
elif encoding:
# Python 3 and encoding
- f = open(path_or_buf, mode, encoding=encoding)
+ f = open(path_or_buf, mode, encoding=encoding, newline="")
elif is_text:
# Python 3 and no explicit encoding
- f = open(path_or_buf, mode, errors='replace')
+ f = open(path_or_buf, mode, errors='replace', newline="")
else:
# Python 3 and binary mode
f = open(path_or_buf, mode)
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 0344689183dbb..115e885a23b96 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -11,6 +11,7 @@
from zipfile import ZipFile
import numpy as np
+import os
from pandas._libs import writers as libwriters
@@ -73,7 +74,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
self.doublequote = doublequote
self.escapechar = escapechar
- self.line_terminator = line_terminator
+ self.line_terminator = line_terminator or os.linesep
self.date_format = date_format
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index e1c3c29ef2846..aa91b7510a2b5 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -2,6 +2,7 @@
from __future__ import print_function
+import os
import csv
import pytest
@@ -841,11 +842,11 @@ def test_to_csv_unicodewriter_quoting(self):
encoding='utf-8')
result = buf.getvalue()
- expected = ('"A","B"\n'
- '1,"foo"\n'
- '2,"bar"\n'
- '3,"baz"\n')
-
+ expected_rows = ['"A","B"',
+ '1,"foo"',
+ '2,"bar"',
+ '3,"baz"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
@@ -855,8 +856,12 @@ def test_to_csv_quote_none(self):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
+
result = buf.getvalue()
- expected = 'A\nhello\n{"hello"}\n'
+ expected_rows = ['A',
+ 'hello',
+ '{"hello"}']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
@@ -865,31 +870,44 @@ def test_to_csv_index_no_leading_comma(self):
buf = StringIO()
df.to_csv(buf, index_label=False)
- expected = ('A,B\n'
- 'one,1,4\n'
- 'two,2,5\n'
- 'three,3,6\n')
+
+ expected_rows = ['A,B',
+ 'one,1,4',
+ 'two,2,5',
+ 'three,3,6']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
+ # see gh-20353
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
- buf = StringIO()
- df.to_csv(buf, line_terminator='\r\n')
- expected = (',A,B\r\n'
- 'one,1,4\r\n'
- 'two,2,5\r\n'
- 'three,3,6\r\n')
- assert buf.getvalue() == expected
+ with ensure_clean() as path:
+ # case 1: CRLF as line terminator
+ df.to_csv(path, line_terminator='\r\n')
+ expected = b',A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n'
- buf = StringIO()
- df.to_csv(buf) # The default line terminator remains \n
- expected = (',A,B\n'
- 'one,1,4\n'
- 'two,2,5\n'
- 'three,3,6\n')
- assert buf.getvalue() == expected
+ with open(path, mode='rb') as f:
+ assert f.read() == expected
+
+ with ensure_clean() as path:
+ # case 2: LF as line terminator
+ df.to_csv(path, line_terminator='\n')
+ expected = b',A,B\none,1,4\ntwo,2,5\nthree,3,6\n'
+
+ with open(path, mode='rb') as f:
+ assert f.read() == expected
+
+ with ensure_clean() as path:
+ # case 3: The default line terminator(=os.linesep)(gh-21406)
+ df.to_csv(path)
+ os_linesep = os.linesep.encode('utf-8')
+ expected = (b',A,B' + os_linesep + b'one,1,4' + os_linesep +
+ b'two,2,5' + os_linesep + b'three,3,6' + os_linesep)
+
+ with open(path, mode='rb') as f:
+ assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
@@ -1069,35 +1087,39 @@ def test_to_csv_quoting(self):
'c_string': ['a', 'b,c'],
})
- expected = """\
-,c_bool,c_float,c_int,c_string
-0,True,1.0,42.0,a
-1,False,3.2,,"b,c"
-"""
+ expected_rows = [',c_bool,c_float,c_int,c_string',
+ '0,True,1.0,42.0,a',
+ '1,False,3.2,,"b,c"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
+ expected_rows = [',c_bool,c_float,c_int,c_string',
+ '0,True,1.0,42.0,a',
+ '1,False,3.2,,"b,c"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
- expected = """\
-"","c_bool","c_float","c_int","c_string"
-"0","True","1.0","42.0","a"
-"1","False","3.2","","b,c"
-"""
+ expected_rows = ['"","c_bool","c_float","c_int","c_string"',
+ '"0","True","1.0","42.0","a"',
+ '"1","False","3.2","","b,c"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
- expected = """\
-"","c_bool","c_float","c_int","c_string"
-0,True,1.0,42.0,"a"
-1,False,3.2,"","b,c"
-"""
+ expected_rows = ['"","c_bool","c_float","c_int","c_string"',
+ '0,True,1.0,42.0,"a"',
+ '1,False,3.2,"","b,c"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
@@ -1108,28 +1130,29 @@ def test_to_csv_quoting(self):
quoting=csv.QUOTE_NONE,
escapechar=None)
- expected = """\
-,c_bool,c_float,c_int,c_string
-0,True,1.0,42.0,a
-1,False,3.2,,b!,c
-"""
+ expected_rows = [',c_bool,c_float,c_int,c_string',
+ '0,True,1.0,42.0,a',
+ '1,False,3.2,,b!,c']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='!')
assert result == expected
- expected = """\
-,c_bool,c_ffloat,c_int,c_string
-0,True,1.0,42.0,a
-1,False,3.2,,bf,c
-"""
+ expected_rows = [',c_bool,c_ffloat,c_int,c_string',
+ '0,True,1.0,42.0,a',
+ '1,False,3.2,,bf,c']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='f')
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
- text = 'a,b,c\n1,"test \r\n",3\n'
+ text_rows = ['a,b,c',
+ '1,"test \r\n",3']
+ text = tm.convert_rows_list_to_csv_str(text_rows)
df = pd.read_csv(StringIO(text))
+
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
assert buf.getvalue() == text
@@ -1138,7 +1161,11 @@ def test_to_csv_quoting(self):
# with multi-indexes
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
- expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
+
+ expected_rows = ['"a","b","c"',
+ '"1","3","5"',
+ '"2","4","6"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
@@ -1150,13 +1177,21 @@ def test_period_index_date_overflow(self):
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
- expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n'
+ expected_rows = [',0',
+ '1990-01-01,4',
+ '2000-01-01,5',
+ '3005-01-01,6']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
- expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n'
+ expected_rows = [',0',
+ '01-01-1990,4',
+ '01-01-2000,5',
+ '01-01-3005,6']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
# Overflow with pd.NaT
@@ -1166,7 +1201,11 @@ def test_period_index_date_overflow(self):
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
- expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n'
+ expected_rows = [',0',
+ '1990-01-01,4',
+ ',5',
+ '3005-01-01,6']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_multi_index_header(self):
@@ -1179,5 +1218,8 @@ def test_multi_index_header(self):
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
- expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n"
+ expected_rows = [',a,b,c,d',
+ '0,1,2,3,4',
+ '1,5,6,7,8']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index ea0b5f5cc0c66..7042cae526207 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -4,10 +4,11 @@
import pytest
+import os
import numpy as np
import pandas as pd
-from pandas import DataFrame
+from pandas import DataFrame, compat
from pandas.util import testing as tm
@@ -132,29 +133,46 @@ def test_to_csv_escapechar(self):
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
- expected = ',col\n0,1\n1,2\n'
+ expected_rows = [',col',
+ '0,1',
+ '1,2']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected
def test_to_csv_decimal(self):
- # GH 781
+ # see gh-781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
- expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
+ expected_rows = [',col1,col2,col3',
+ '0,1,a,10.1']
+ expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected_default
- expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
+ expected_rows = [';col1;col2;col3',
+ '0;1;a;10,1']
+ expected_european_excel = tm.convert_rows_list_to_csv_str(
+ expected_rows)
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
- expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
+ expected_rows = [',col1,col2,col3',
+ '0,1,a,10.10']
+ expected_float_format_default = tm.convert_rows_list_to_csv_str(
+ expected_rows)
assert df.to_csv(float_format='%.2f') == expected_float_format_default
- expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
+ expected_rows = [';col1;col2;col3',
+ '0;1;a;10,10']
+ expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
- # GH 11553: testing if decimal is taken into account for '0.0'
+ # see gh-11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
- expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
+
+ expected_rows = ['a,b,c',
+ '0^0,2^2,1',
+ '1^1,3^3,1']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
@@ -167,7 +185,11 @@ def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
- expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
+
+ expected_rows = ['a,b,c',
+ '0,2.20,1',
+ '1,3.30,1']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
@@ -175,22 +197,35 @@ def test_to_csv_float_format(self):
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
- # testing if NaN values are correctly represented in the index
- # GH 11553
+ # see gh-11553
+ #
+ # Testing if NaN values are correctly represented in the index.
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
- expected = "a,b,c\n0.0,0,2\n_,1,3\n"
+ expected_rows = ['a,b,c',
+ '0.0,0,2',
+ '_,1,3']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
- expected = "a,b,c\n_,0,2\n_,1,3\n"
+ expected_rows = ['a,b,c',
+ '_,0,2',
+ '_,1,3']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
- expected = "a,b,c\n0,0,2\n0,1,3\n"
+ expected_rows = ['a,b,c',
+ '0,0,2',
+ '0,1,3']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
@@ -201,63 +236,93 @@ def test_to_csv_date_format(self):
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
- expected_default_sec = (',A\n0,2013-01-01 00:00:00\n1,'
- '2013-01-01 00:00:01\n2,2013-01-01 00:00:02'
- '\n3,2013-01-01 00:00:03\n4,'
- '2013-01-01 00:00:04\n')
+ expected_rows = [',A',
+ '0,2013-01-01 00:00:00',
+ '1,2013-01-01 00:00:01',
+ '2,2013-01-01 00:00:02',
+ '3,2013-01-01 00:00:03',
+ '4,2013-01-01 00:00:04']
+ expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv() == expected_default_sec
- expected_ymdhms_day = (',A\n0,2013-01-01 00:00:00\n1,'
- '2013-01-02 00:00:00\n2,2013-01-03 00:00:00'
- '\n3,2013-01-04 00:00:00\n4,'
- '2013-01-05 00:00:00\n')
+ expected_rows = [',A',
+ '0,2013-01-01 00:00:00',
+ '1,2013-01-02 00:00:00',
+ '2,2013-01-03 00:00:00',
+ '3,2013-01-04 00:00:00',
+ '4,2013-01-05 00:00:00']
+ expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
- expected_ymd_sec = (',A\n0,2013-01-01\n1,2013-01-01\n2,'
- '2013-01-01\n3,2013-01-01\n4,2013-01-01\n')
+ expected_rows = [',A',
+ '0,2013-01-01',
+ '1,2013-01-01',
+ '2,2013-01-01',
+ '3,2013-01-01',
+ '4,2013-01-01']
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
- expected_default_day = (',A\n0,2013-01-01\n1,2013-01-02\n2,'
- '2013-01-03\n3,2013-01-04\n4,2013-01-05\n')
+ expected_rows = [',A',
+ '0,2013-01-01',
+ '1,2013-01-02',
+ '2,2013-01-03',
+ '3,2013-01-04',
+ '4,2013-01-05']
+ expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
- # testing if date_format parameter is taken into account for
- # multi-indexed dataframes (GH 7791)
+ # see gh-7791
+ #
+ # Testing if date_format parameter is taken into account
+ # for multi-indexed DataFrames.
df_sec['B'] = 0
df_sec['C'] = 1
- expected_ymd_sec = 'A,B,C\n2013-01-01,0,1\n'
+
+ expected_rows = ['A,B,C',
+ '2013-01-01,0,1']
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
- # GH 6618
+ # see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
- exp = ",1\n,2\n0,1\n"
+ exp_rows = [',1',
+ ',2',
+ '0,1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
- exp = "1\n2\n1\n"
+ exp_rows = ['1', '2', '1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]),
index=pd.MultiIndex.from_arrays([[1], [2]]))
- exp = ",,1\n,,2\n1,2,1\n"
+ exp_rows = [',,1', ',,2', '1,2,1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
- exp = "1\n2\n1\n"
+ exp_rows = ['1', '2', '1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame(
[1], columns=pd.MultiIndex.from_arrays([['foo'], ['bar']]))
- exp = ",foo\n,bar\n0,1\n"
+ exp_rows = [',foo', ',bar', '0,1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
- exp = "foo\nbar\n1\n"
+ exp_rows = ['foo', 'bar', '1']
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
def test_to_csv_string_array_ascii(self):
@@ -289,21 +354,113 @@ def test_to_csv_string_array_utf8(self):
with open(path, 'r') as f:
assert f.read() == expected_utf8
+ def test_to_csv_string_with_lf(self):
+ # GH 20353
+ data = {
+ 'int': [1, 2, 3],
+ 'str_lf': ['abc', 'd\nef', 'g\nh\n\ni']
+ }
+ df = pd.DataFrame(data)
+ with tm.ensure_clean('lf_test.csv') as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode('utf-8')
+ expected_noarg = (
+ b'int,str_lf' + os_linesep +
+ b'1,abc' + os_linesep +
+ b'2,"d\nef"' + os_linesep +
+ b'3,"g\nh\n\ni"' + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean('lf_test.csv') as path:
+ # case 2: LF as line terminator
+ expected_lf = (
+ b'int,str_lf\n'
+ b'1,abc\n'
+ b'2,"d\nef"\n'
+ b'3,"g\nh\n\ni"\n'
+ )
+ df.to_csv(path, line_terminator='\n', index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean('lf_test.csv') as path:
+ # case 3: CRLF as line terminator
+ # 'line_terminator' should not change inner element
+ expected_crlf = (
+ b'int,str_lf\r\n'
+ b'1,abc\r\n'
+ b'2,"d\nef"\r\n'
+ b'3,"g\nh\n\ni"\r\n'
+ )
+ df.to_csv(path, line_terminator='\r\n', index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_crlf
+
+ def test_to_csv_string_with_crlf(self):
+ # GH 20353
+ data = {
+ 'int': [1, 2, 3],
+ 'str_crlf': ['abc', 'd\r\nef', 'g\r\nh\r\n\r\ni']
+ }
+ df = pd.DataFrame(data)
+ with tm.ensure_clean('crlf_test.csv') as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode('utf-8')
+ expected_noarg = (
+ b'int,str_crlf' + os_linesep +
+ b'1,abc' + os_linesep +
+ b'2,"d\r\nef"' + os_linesep +
+ b'3,"g\r\nh\r\n\r\ni"' + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean('crlf_test.csv') as path:
+ # case 2: LF as line terminator
+ expected_lf = (
+ b'int,str_crlf\n'
+ b'1,abc\n'
+ b'2,"d\r\nef"\n'
+ b'3,"g\r\nh\r\n\r\ni"\n'
+ )
+ df.to_csv(path, line_terminator='\n', index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean('crlf_test.csv') as path:
+ # case 3: CRLF as line terminator
+ # 'line_terminator' should not change inner element
+ expected_crlf = (
+ b'int,str_crlf\r\n'
+ b'1,abc\r\n'
+ b'2,"d\r\nef"\r\n'
+ b'3,"g\r\nh\r\n\r\ni"\r\n'
+ )
+ df.to_csv(path, line_terminator='\r\n', index=False)
+ with open(path, 'rb') as f:
+ assert f.read() == expected_crlf
+
@tm.capture_stdout
def test_to_csv_stdout_file(self):
# GH 21561
df = pd.DataFrame([['foo', 'bar'], ['baz', 'qux']],
columns=['name_1', 'name_2'])
- expected_ascii = '''\
-,name_1,name_2
-0,foo,bar
-1,baz,qux
-'''
+ expected_rows = [',name_1,name_2',
+ '0,foo,bar',
+ '1,baz,qux']
+ expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows)
+
df.to_csv(sys.stdout, encoding='ascii')
output = sys.stdout.getvalue()
+
assert output == expected_ascii
assert not sys.stdout.closed
+ @pytest.mark.xfail(
+ compat.is_platform_windows(),
+ reason=("Especially in Windows, file stream should not be passed"
+ "to csv writer without newline='' option."
+ "(https://docs.python.org/3.6/library/csv.html#csv.writer)"))
def test_to_csv_write_to_open_file(self):
# GH 21696
df = pd.DataFrame({'a': ['x', 'y', 'z']})
@@ -320,6 +477,42 @@ def test_to_csv_write_to_open_file(self):
with open(path, 'r') as f:
assert f.read() == expected
+ @pytest.mark.skipif(compat.PY2, reason="Test case for python3")
+ def test_to_csv_write_to_open_file_with_newline_py3(self):
+ # see gh-21696
+ # see gh-20353
+ df = pd.DataFrame({'a': ['x', 'y', 'z']})
+ expected_rows = ["x",
+ "y",
+ "z"]
+ expected = ("manual header\n" +
+ tm.convert_rows_list_to_csv_str(expected_rows))
+ with tm.ensure_clean('test.txt') as path:
+ with open(path, 'w', newline='') as f:
+ f.write('manual header\n')
+ df.to_csv(f, header=None, index=None)
+
+ with open(path, 'rb') as f:
+ assert f.read() == bytes(expected, 'utf-8')
+
+ @pytest.mark.skipif(compat.PY3, reason="Test case for python2")
+ def test_to_csv_write_to_open_file_with_newline_py2(self):
+ # see gh-21696
+ # see gh-20353
+ df = pd.DataFrame({'a': ['x', 'y', 'z']})
+ expected_rows = ["x",
+ "y",
+ "z"]
+ expected = ("manual header\n" +
+ tm.convert_rows_list_to_csv_str(expected_rows))
+ with tm.ensure_clean('test.txt') as path:
+ with open(path, 'wb') as f:
+ f.write('manual header\n')
+ df.to_csv(f, header=None, index=None)
+
+ with open(path, 'rb') as f:
+ assert f.read() == expected
+
@pytest.mark.parametrize("to_infer", [True, False])
@pytest.mark.parametrize("read_infer", [True, False])
def test_to_csv_compression(self, compression_only,
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index e08899a03d2d7..b748e9aa5ef5b 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -270,7 +270,7 @@ def test_invalid_url(self):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
- assert str(e) == 'No tables found'
+ assert 'No tables found' in str(e)
@pytest.mark.slow
def test_file_url(self):
diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py
index da84973274933..d968005a25006 100644
--- a/pandas/tests/util/test_testing.py
+++ b/pandas/tests/util/test_testing.py
@@ -12,6 +12,7 @@
assert_index_equal, assert_series_equal,
assert_frame_equal, assert_numpy_array_equal,
RNGContext)
+from pandas import compat
class TestAssertAlmostEqual(object):
@@ -164,6 +165,17 @@ def test_raise_with_traceback(self):
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
+ def test_convert_rows_list_to_csv_str(self):
+ rows_list = ["aaa", "bbb", "ccc"]
+ ret = tm.convert_rows_list_to_csv_str(rows_list)
+
+ if compat.is_platform_windows():
+ expected = "aaa\r\nbbb\r\nccc\r\n"
+ else:
+ expected = "aaa\nbbb\nccc\n"
+
+ assert ret == expected
+
class TestAssertNumpyArrayEqual(object):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 1bd9043f42634..b5ec0912c5c26 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2983,3 +2983,24 @@ def skipna_wrapper(x):
return alternative(nona)
return skipna_wrapper
+
+
+def convert_rows_list_to_csv_str(rows_list):
+ """
+ Convert list of CSV rows to single CSV-formatted string for current OS.
+
+ This method is used for creating expected value of to_csv() method.
+
+ Parameters
+ ----------
+ rows_list : list
+ The list of string. Each element represents the row of csv.
+
+ Returns
+ -------
+ expected : string
+ Expected output of to_csv() in current OS
+ """
+ sep = os.linesep
+ expected = sep.join(rows_list) + sep
+ return expected
| - [ ] closes #20353
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21406 | 2018-06-10T07:01:37Z | 2018-10-19T12:55:28Z | 2018-10-19T12:55:27Z | 2018-10-19T12:55:35Z |
parametrize tests, unify repeated tests | diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 5369b1a94a956..e310feba9f700 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -89,6 +89,7 @@ def test_to_m8():
class Base(object):
_offset = None
+ d = Timestamp(datetime(2008, 1, 2))
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
@@ -148,6 +149,56 @@ def test_apply_out_of_range(self, tz):
# so ignore
pass
+ def test_offsets_compare_equal(self):
+ # root cause of GH#456: __ne__ was not implemented
+ if self._offset is None:
+ return
+ offset1 = self._offset()
+ offset2 = self._offset()
+ assert not offset1 != offset2
+ assert offset1 == offset2
+
+ def test_rsub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d - self.offset2 == (-self.offset2).apply(self.d)
+
+ def test_radd(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d + self.offset2 == self.offset2 + self.d
+
+ def test_sub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ off = self.offset2
+ with pytest.raises(Exception):
+ off - self.d
+
+ assert 2 * off - off == off
+ assert self.d - self.offset2 == self.d + self._offset(-2)
+ assert self.d - self.offset2 == self.d - (2 * off - off)
+
+ def testMult1(self):
+ if self._offset is None or not hasattr(self, "offset1"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset1 attr
+ return
+ assert self.d + 10 * self.offset1 == self.d + self._offset(10)
+ assert self.d + 5 * self.offset1 == self.d + self._offset(5)
+
+ def testMult2(self):
+ if self._offset is None:
+ return
+ assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
+ assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
+
class TestCommon(Base):
# exected value created by Base._get_offset
@@ -515,6 +566,7 @@ def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
+ self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
@@ -536,7 +588,7 @@ def test_with_offset(self):
assert (self.d + offset) == datetime(2008, 1, 2, 2)
- def testEQ(self):
+ def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
@@ -545,28 +597,9 @@ def test_mul(self):
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
- def testCall(self):
+ def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
- def testRAdd(self):
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def testSub(self):
- off = self.offset2
- pytest.raises(Exception, off.__sub__, self.d)
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d + BDay(-2)
-
- def testRSub(self):
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def testMult1(self):
- assert self.d + 10 * self.offset == self.d + BDay(10)
-
- def testMult2(self):
- assert self.d + (-5 * BDay(-10)) == self.d + BDay(50)
-
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
@@ -678,12 +711,6 @@ def test_apply_large_n(self):
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = BDay()
- offset2 = BDay()
- assert not offset1 != offset2
-
class TestBusinessHour(Base):
_offset = BusinessHour
@@ -735,7 +762,7 @@ def test_with_offset(self):
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
- def testEQ(self):
+ def test_eq(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
@@ -749,31 +776,22 @@ def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
- def testCall(self):
+ def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
- def testRAdd(self):
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def testSub(self):
+ def test_sub(self):
+ # we have to override test_sub here becasue self.offset2 is not
+ # defined as self._offset(2)
off = self.offset2
- pytest.raises(Exception, off.__sub__, self.d)
+ with pytest.raises(Exception):
+ off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
- def testRSub(self):
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def testMult1(self):
- assert self.d + 5 * self.offset1 == self.d + self._offset(5)
-
- def testMult2(self):
- assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
-
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
@@ -1323,12 +1341,6 @@ def test_apply_nanoseconds(self):
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = self._offset()
- offset2 = self._offset()
- assert not offset1 != offset2
-
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
@@ -1367,6 +1379,8 @@ def test_datetimeindex(self):
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
+ holidays = ['2014-06-27', datetime(2014, 6, 30),
+ np.datetime64('2014-07-02')]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
@@ -1377,8 +1391,6 @@ def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
- self.holidays = ['2014-06-27', datetime(2014, 6, 30),
- np.datetime64('2014-07-02')]
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
@@ -1407,7 +1419,7 @@ def test_with_offset(self):
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
- def testEQ(self):
+ def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
@@ -1424,33 +1436,19 @@ def testEQ(self):
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
+ def test_sub(self):
+ # override the Base.test_sub implementation because self.offset2 is
+ # defined differently in this class than the test expects
+ pass
+
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
- def testCall(self):
+ def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
- def testRAdd(self):
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def testSub(self):
- off = self.offset2
- pytest.raises(Exception, off.__sub__, self.d)
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d - (2 * off - off)
-
- def testRSub(self):
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def testMult1(self):
- assert self.d + 5 * self.offset1 == self.d + self._offset(5)
-
- def testMult2(self):
- assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
-
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
@@ -1490,49 +1488,51 @@ def test_roll_date_object(self):
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
- def test_normalize(self):
- tests = []
-
- tests.append((CustomBusinessHour(normalize=True,
- holidays=self.holidays),
- {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
-
- tests.append((CustomBusinessHour(-1, normalize=True,
- holidays=self.holidays),
- {datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
-
- tests.append((CustomBusinessHour(1, normalize=True, start='17:00',
- end='04:00', holidays=self.holidays),
- {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
-
- for offset, cases in tests:
- for dt, expected in compat.iteritems(cases):
- assert offset.apply(dt) == expected
+ normalize_cases = []
+ normalize_cases.append((
+ CustomBusinessHour(normalize=True, holidays=holidays),
+ {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
+
+ normalize_cases.append((
+ CustomBusinessHour(-1, normalize=True, holidays=holidays),
+ {datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
+
+ normalize_cases.append((
+ CustomBusinessHour(1, normalize=True,
+ start='17:00', end='04:00',
+ holidays=holidays),
+ {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
+
+ @pytest.mark.parametrize('norm_cases', normalize_cases)
+ def test_normalize(self, norm_cases):
+ offset, cases = norm_cases
+ for dt, expected in compat.iteritems(cases):
+ assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
@@ -1550,75 +1550,75 @@ def test_onOffset(self):
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
- def test_apply(self):
- tests = []
-
- tests.append((
- CustomBusinessHour(holidays=self.holidays),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30,
- 30)}))
-
- tests.append((
- CustomBusinessHour(4, holidays=self.holidays),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30,
- 30)}))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_apply_nanoseconds(self):
- tests = []
-
- tests.append((CustomBusinessHour(holidays=self.holidays),
- {Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
- '2014-07-01 16:00') + Nano(5),
- Timestamp('2014-07-01 16:00') + Nano(5): Timestamp(
- '2014-07-03 09:00') + Nano(5),
- Timestamp('2014-07-01 16:00') - Nano(5): Timestamp(
- '2014-07-01 17:00') - Nano(5)}))
-
- tests.append((CustomBusinessHour(-1, holidays=self.holidays),
- {Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
- '2014-07-01 14:00') + Nano(5),
- Timestamp('2014-07-01 10:00') + Nano(5): Timestamp(
- '2014-07-01 09:00') + Nano(5),
- Timestamp('2014-07-01 10:00') - Nano(5): Timestamp(
- '2014-06-26 17:00') - Nano(5), }))
+ apply_cases = []
+ apply_cases.append((
+ CustomBusinessHour(holidays=holidays),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
+
+ apply_cases.append((
+ CustomBusinessHour(4, holidays=holidays),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
+
+ @pytest.mark.parametrize('apply_case', apply_cases)
+ def test_apply(self, apply_case):
+ offset, cases = apply_case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ nano_cases = []
+ nano_cases.append(
+ (CustomBusinessHour(holidays=holidays),
+ {Timestamp('2014-07-01 15:00') + Nano(5):
+ Timestamp('2014-07-01 16:00') + Nano(5),
+ Timestamp('2014-07-01 16:00') + Nano(5):
+ Timestamp('2014-07-03 09:00') + Nano(5),
+ Timestamp('2014-07-01 16:00') - Nano(5):
+ Timestamp('2014-07-01 17:00') - Nano(5)}))
+
+ nano_cases.append(
+ (CustomBusinessHour(-1, holidays=holidays),
+ {Timestamp('2014-07-01 15:00') + Nano(5):
+ Timestamp('2014-07-01 14:00') + Nano(5),
+ Timestamp('2014-07-01 10:00') + Nano(5):
+ Timestamp('2014-07-01 09:00') + Nano(5),
+ Timestamp('2014-07-01 10:00') - Nano(5):
+ Timestamp('2014-06-26 17:00') - Nano(5)}))
+
+ @pytest.mark.parametrize('nano_case', nano_cases)
+ def test_apply_nanoseconds(self, nano_case):
+ offset, cases = nano_case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
@@ -1629,6 +1629,7 @@ def setup_method(self, method):
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
+ self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
@@ -1650,7 +1651,7 @@ def test_with_offset(self):
assert (self.d + offset) == datetime(2008, 1, 2, 2)
- def testEQ(self):
+ def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
@@ -1659,29 +1660,10 @@ def test_mul(self):
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
- def testCall(self):
+ def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
- def testRAdd(self):
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def testSub(self):
- off = self.offset2
- pytest.raises(Exception, off.__sub__, self.d)
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d + CDay(-2)
-
- def testRSub(self):
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def testMult1(self):
- assert self.d + 10 * self.offset == self.d + CDay(10)
-
- def testMult2(self):
- assert self.d + (-5 * CDay(-10)) == self.d + CDay(50)
-
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
@@ -1789,12 +1771,6 @@ def test_apply_large_n(self):
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = CDay()
- offset2 = CDay()
- assert not offset1 != offset2
-
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
@@ -1863,10 +1839,11 @@ class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
- self.offset = self._object()
- self.offset2 = self._object(2)
+ self.offset = self._offset()
+ self.offset1 = self.offset
+ self.offset2 = self._offset(2)
- def testEQ(self):
+ def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
@@ -1875,47 +1852,23 @@ def test_mul(self):
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
- def testRAdd(self):
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def testSub(self):
- off = self.offset2
- pytest.raises(Exception, off.__sub__, self.d)
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d + self._object(-2)
-
- def testRSub(self):
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def testMult1(self):
- assert self.d + 10 * self.offset == self.d + self._object(10)
-
- def testMult2(self):
- assert self.d + (-5 * self._object(-10)) == self.d + self._object(50)
-
- def test_offsets_compare_equal(self):
- offset1 = self._object()
- offset2 = self._object()
- assert not offset1 != offset2
-
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
- _check_roundtrip(self._object())
- _check_roundtrip(self._object(2))
- _check_roundtrip(self._object() * 2)
+ _check_roundtrip(self._offset())
+ _check_roundtrip(self._offset(2))
+ _check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
- off = self._object(weekmask='Mon Wed Fri')
+ off = self._offset(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
- _object = CBMonthEnd
+ _offset = CBMonthEnd
def test_different_normalize_equals(self):
# equivalent in this special case
@@ -2032,7 +1985,7 @@ def test_datetimeindex(self):
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
- _object = CBMonthBegin
+ _offset = CBMonthBegin
def test_different_normalize_equals(self):
# equivalent in this special case
@@ -2150,6 +2103,9 @@ def test_datetimeindex(self):
class TestWeek(Base):
_offset = Week
+ d = Timestamp(datetime(2008, 1, 2))
+ offset1 = _offset()
+ offset2 = _offset(2)
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
@@ -2157,9 +2113,11 @@ def test_repr(self):
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
- pytest.raises(ValueError, Week, weekday=7)
- tm.assert_raises_regex(
- ValueError, "Day must be", Week, weekday=-1)
+ with pytest.raises(ValueError):
+ Week(weekday=7)
+
+ with pytest.raises(ValueError, match="Day must be"):
+ Week(weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
@@ -2204,38 +2162,37 @@ def test_offset(self, case):
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
- def test_onOffset(self):
- for weekday in range(7):
- offset = Week(weekday=weekday)
-
- for day in range(1, 8):
- date = datetime(2008, 1, day)
+ @pytest.mark.parametrize('weekday', range(7))
+ def test_onOffset(self, weekday):
+ offset = Week(weekday=weekday)
- if day % 7 == weekday:
- expected = True
- else:
- expected = False
- assert_onOffset(offset, date, expected)
+ for day in range(1, 8):
+ date = datetime(2008, 1, day)
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = Week()
- offset2 = Week()
- assert not offset1 != offset2
+ if day % 7 == weekday:
+ expected = True
+ else:
+ expected = False
+ assert_onOffset(offset, date, expected)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
def test_constructor(self):
- tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
- n=1, week=4, weekday=0)
- tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
- n=1, week=-1, weekday=0)
- tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
- n=1, week=0, weekday=-1)
- tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
- n=1, week=0, weekday=7)
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=4, weekday=0)
+
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=-1, weekday=0)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
@@ -2322,15 +2279,18 @@ def test_onOffset(self, case):
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
def test_constructor(self):
- tm.assert_raises_regex(ValueError, "^N cannot be 0",
- LastWeekOfMonth, n=0, weekday=1)
+ with pytest.raises(ValueError, match="^N cannot be 0"):
+ LastWeekOfMonth(n=0, weekday=1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=-1)
- tm.assert_raises_regex(ValueError, "^Day", LastWeekOfMonth, n=1,
- weekday=-1)
- tm.assert_raises_regex(
- ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7)
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=7)
def test_offset(self):
# Saturday
@@ -2396,6 +2356,8 @@ def test_onOffset(self, case):
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
+ offset1 = _offset()
+ offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
@@ -2566,6 +2528,8 @@ def test_vectorized_offset_addition(self, klass, assert_func):
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
+ offset1 = _offset()
+ offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
@@ -2773,9 +2737,9 @@ def test_get_offset_name(self):
def test_get_offset():
- with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
+ with pytest.raises(ValueError, match=_INVALID_FREQ_ERROR):
get_offset('gibberish')
- with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
+ with pytest.raises(ValueError, match=_INVALID_FREQ_ERROR):
get_offset('QS-JAN-B')
pairs = [
@@ -2793,7 +2757,7 @@ def test_get_offset():
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
- with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
+ with pytest.raises(ValueError, match=_INVALID_FREQ_ERROR):
get_offset(name)
| There's more to do, but the diff is already pretty big as it is.
There are a handful of test methods that are defined in 5-6 test classes. By moving those to the base class we a) get rid of duplicate code and b) run the tests for all (well, most) `DateOffset` subclasses. | https://api.github.com/repos/pandas-dev/pandas/pulls/21405 | 2018-06-10T02:57:10Z | 2018-06-13T10:54:56Z | 2018-06-13T10:54:56Z | 2018-06-22T03:27:40Z |
fix DateOffset eq to depend on normalize attr | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 68c1839221508..6d5e40d37c8df 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -91,7 +91,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
--
+- Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`)
-
-
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 8bf0d9f915d04..6fd525f02f55c 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -564,11 +564,10 @@ def setup_method(self, method):
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
- # equivalent in this special case
- offset = BDay()
- offset2 = BDay()
- offset2.normalize = True
- assert offset == offset2
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
@@ -734,11 +733,10 @@ def test_constructor_errors(self):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
- # equivalent in this special case
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
- offset2 = self._offset()
- offset2.normalize = True
- assert offset == offset2
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
@@ -1397,11 +1395,10 @@ def test_constructor_errors(self):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
- # equivalent in this special case
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
- offset2 = self._offset()
- offset2.normalize = True
- assert offset == offset2
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
@@ -1627,11 +1624,10 @@ def setup_method(self, method):
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
- # equivalent in this special case
- offset = CDay()
- offset2 = CDay()
- offset2.normalize = True
- assert offset == offset2
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
@@ -1865,11 +1861,10 @@ class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
- # equivalent in this special case
- offset = CBMonthEnd()
- offset2 = CBMonthEnd()
- offset2.normalize = True
- assert offset == offset2
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
@@ -1982,11 +1977,10 @@ class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
- # equivalent in this special case
- offset = CBMonthBegin()
- offset2 = CBMonthBegin()
- offset2.normalize = True
- assert offset == offset2
+ # GH#21404 changed __eq__ to return False when `normalize` doesnt match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index a5a983bf94bb8..99f97d8fc7bc0 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -290,7 +290,7 @@ def _params(self):
all_paras = self.__dict__.copy()
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
- exclude = ['kwds', 'name', 'normalize', 'calendar']
+ exclude = ['kwds', 'name', 'calendar']
attrs = [(k, v) for k, v in all_paras.items()
if (k not in exclude) and (k[0] != '_')]
attrs = sorted(set(attrs))
| This has a bullet point in #18854 but doesn't appear to have its own issue.
Current:
```
now = pd.Timestamp.now()
off = pd.offsets.Week(n=1, normalize=False)
normed = pd.offsets.Week(n=1, normalize=True)
>>> now + off == now + normed
False
>>> off == normed # PR changes this comparison to False
True
```
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21404 | 2018-06-10T02:09:11Z | 2018-06-13T19:59:20Z | 2018-06-13T19:59:20Z | 2018-06-22T03:27:33Z |
DOC: isin() docstring change DataFrame to pd.DataFrame | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ca572e2e56b6c..21e3cfcdd195e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7283,11 +7283,11 @@ def isin(self, values):
When ``values`` is a Series or DataFrame:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
- >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
- >>> df.isin(other)
+ >>> df2 = pd.DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
+ >>> df.isin(df2)
A B
0 True False
- 1 False False # Column A in `other` has a 3, but not at index 1.
+ 1 False False # Column A in `df2` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
| isin() docstring change DataFrame to pd.DataFrame | https://api.github.com/repos/pandas-dev/pandas/pulls/21403 | 2018-06-09T20:49:25Z | 2018-06-13T12:12:38Z | 2018-06-13T12:12:38Z | 2018-06-13T12:13:27Z |
CI: Enable Spellcheck in dockbuild | diff --git a/ci/build_docs.sh b/ci/build_docs.sh
index 90a666dc34ed7..2bbc2ac2d4000 100755
--- a/ci/build_docs.sh
+++ b/ci/build_docs.sh
@@ -34,6 +34,9 @@ if [ "$DOC" ]; then
echo ./make.py
./make.py
+ echo "Running spell check on documentation"
+ ./make.py spellcheck
+
echo ########################
echo # Create and send docs #
echo ########################
@@ -69,6 +72,7 @@ if [ "$DOC" ]; then
pandas/core/reshape/reshape.py \
pandas/core/reshape/tile.py
+
fi
exit 0
diff --git a/doc/make.py b/doc/make.py
index 4d54a2415a194..c8e06cac502c9 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -307,6 +307,12 @@ def zip_html(self):
def spellcheck(self):
"""Spell check the documentation."""
+ try:
+ from scripts.announce import update_name_wordlist
+ update_name_wordlist()
+ except ImportError:
+ print("Unable to update name wordlist, run "
+ "scripts/announce.py to update the list.")
self._sphinx_build('spelling')
output_location = os.path.join('build', 'spelling', 'output.txt')
with open(output_location) as output:
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 29f947e1144ea..44a5b34383698 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -80,15 +80,17 @@
try:
import sphinxcontrib.spelling # noqa
except ImportError as err:
- logger.warn(('sphinxcontrib.spelling failed to import with error "{}". '
- '`spellcheck` command is not available.'.format(err)))
+ logger.warning(('sphinxcontrib.spelling failed to import with error "{}". '
+ '`spellcheck` command is not available.'.format(err)))
else:
extensions.append('sphinxcontrib.spelling')
+ spelling_ignore_pypi_package_names = True
+ spelling_word_list_filename = [
+ 'spelling_wordlist.txt',
+ 'names_wordlist.txt']
exclude_patterns = ['**.ipynb_checkpoints']
-spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt']
-spelling_ignore_pypi_package_names = True
with open("index.rst") as f:
index_rst_lines = f.readlines()
diff --git a/doc/source/names_wordlist.txt b/doc/source/names_wordlist.txt
index 032883b7febf6..956b87a77899c 100644
--- a/doc/source/names_wordlist.txt
+++ b/doc/source/names_wordlist.txt
@@ -1,1652 +1,1969 @@
-Critchley
-Villanova
-del
-Hohmann
-Rychyk
-Buchkovsky
-Lenail
-Schade
-datetimeindex
-Aly
-Sivji
-Költringer
-Bui
-András
-Novoszáth
-Anh
-Anil
-Pallekonda
-Pitrou
-Linde
-Quinonez
-Varshokar
-Artem
-Bogachev
-Avi
-Azeez
-Oluwafemi
-Auffarth
-Thiel
-Bhavesh
-Poddar
-Haffner
-Naul
-Guinta
-Moreira
-García
-Márquez
-Cheuk
-Chitrank
-Dixit
-Catalfo
-Mazzullo
-Chwala
-Cihan
-Ceyhan
-Brunner
-Riemenschneider
-Dixey
-Garrido
-Sakuma
-Hirschfeld
-Adrián
-Cañones
-Castellano
-Arcos
-Hoese
-Stansby
-Kamau
-Niederhut
-Dror
-Atariah
-Chea
-Kisslinger
-Retkowski
-Sar
-Maeztu
+joders
+Hopper
+Pal
+Lurie
+Deshpande
+Yicheng
+Yu
+Yuval
+O'Leary
+immerrr
+Warth
+Virtanen
+Ibrahim
+amuta
+Wiecki
+alinde
+Lasiman
+omtinez
+Pumar
+kernc
+chaimdemulder
+Gupta
+Camilo
+unknown
Gianpaolo
-Macario
-Giftlin
-Rajaiah
-Olimpio
-Gjelt
-Inggs
-Grzegorz
-Konefał
-Guilherme
-Beltramini
-Pitkeathly
-Mashkoor
-Ferchland
-Haochen
+dwkenefick
+waitingkuo
+Harris
Hissashi
-Sharaf
-Ignasi
-Fosch
-Alves
-Shelvinskyi
-Imanflow
-Ingolf
-Saeta
-Pérez
-Koevska
-Jakub
-Nowacki
-Werkmann
-Zoutkamp
-Bandlow
-Jaume
-Bonet
+Basso
+Pratap
+Hameed
+Arneson
+readyready
+Foti
+Liekezer
+Fox
+M
+Konge
+WillAyd
+Lucas
+Lasley
+Mittal
+Pietro
+Sadvilkar
+Frans
+tmnhat
+Drewrey
+Golubyev
+Reaver
+Craze
+Miholic
+Hayes
+Burns
+timmie
+Stone
+Ondrej
+Irv
+anomrake
+Yanovich
+watercrossing
+Pollastri
+hshimizu77
+Tobias
+Dav
Alammar
-Reback
-Jing
-Qiang
-Goh
-Miralles
-Nothman
-Joeun
-Metz
-Mease
-Schulze
-Jongwony
-Jordi
-Contestí
-Joris
-Bossche
-José
-Fonseca
-Jovixe
-Jörg
-Döpfert
-Ittoku
-Surta
-Kuhl
-Krzysztof
-Chomski
-Ksenia
-Ksenia
-Bobrova
-Kunal
-Gosar
-Kerstein
-Laksh
-Arora
-Geffert
-Licht
+Azari
+Laurent
Takeuchi
-Liudmila
+Gross
+Bark
+Thouis
+Hoegen
+Overmeire
+Jeremy
+Rohan
+Floris
+Pepe
+cel
+Fernando
+Sharaf
+Hyunjin
+robertzk
+Kramer
+Lind
+Schiratti
+O'Brien
+Serafim
+Yang
+xgdgsc
+RahulHP
+Gregory
+froessler
+Michaud
+step4me
+Nicholas
+priyankjain
+fdsi
+Mar
+ruiann
+Kluyver
+Via
+Mulligan
+Senthil
+onesandzeroes
+Levy
+Sales
+Smith
+Hausmann
+Coleman
+Hugo
+zhangjinjie
+Brenda
+Matthieu
+Hamamura
Villalba
-Manan
-Singh
-Manraj
-Singh
-Hemken
-Bibiloni
-Corchero
-Woodbridge
-Journois
-Gallo
-Heikkilä
-Braymer
-Maybeno
-Rocklin
-Roeschke
-Bussonnier
+Ondřej
+Jacques
+Cihan
+Anton
+Bob
+ETF
+nileracecrew
+Riel
+Eberhard
+Jean
+Will
+Jančauskas
+Lima
+Maxim
+Shannon
+Xbar
+Brian
+Jaehoon
+BielStela
+Paddy
+cmazzullo
+Rodolfo
+Pete
+Hughitt
+Tejero
+Mario
+dickreuter
+cgohlke
+Ernesto
+Iblis
+Kittredge
+Negusse
+Neil
+Kuhl
+Ringwalt
+luzpaz
Mikhaylov
-Veksler
-Roos
-Maximiliano
-Greco
-Penkov
-Röttger
-Selik
-Waskom
-Mie
-Kutzma
-Mitar
-Negus
-Münst
-Mortada
-Mehyar
-Braithwaite
-Chmura
-Karagiannakis
-Nipun
-Sadvilkar
-Martensen
-Noémi
-Éltető
-Bilodeau
-Ondrej
-Kokes
-Onno
-Ganssle
+PKEuS
+Du
+akielbowicz
+Mahmoud
+chebee7i
+Farnbauer
+Stahl
+gfr
+Safia
+Figueiredo
+Halen
+Spies
+Li
Mannino
-Reidy
-Oliveira
-Hoffmann
-Ngo
-Battiston
-Pranav
-Suri
-Priyanka
-Ojha
-Pulkit
-Maloo
-Magliocchetti
-Ridhwan
-Luthra
-Kiplang'at
-Rohan
-Pandit
-Rok
-Mihevc
-Rouz
-Azari
-Ryszard
-Kaleta
-Samir
-Musali
-Sinayoko
-Sangwoong
-Yoon
-Sharad
-Vijalapuram
-Shubham
+stephenwlin
+leo
+Zhang
+dieterv77
+Makait
+Bradley
+Tomczak
+manuels
+Jörg
+hshimizu
+Gaudecker
+Ram
+Aiyong
+Jacobowski
Chaudhary
-Sietse
-Brouwer
-Delprete
-Cianciulli
-Childs
-Stijn
-Hoey
-Talitha
-Pumar
-Tarbo
-Fukazawa
-Petrou
-Caswell
+Varun
+Bean
+Brandon
+Turzanska
+Elliott
+Schettino
+Garaud
Hoffmann
-Swast
-Augspurger
-Tulio
-Casagrande
-Tushar
-Tushar
-Mittal
-Upkar
-Lidder
-Vinícius
-Figueiredo
-Vipin
-WBare
-Wenhuan
+Garrett
+Piotr
+Read
+Facai
+Liam3851
+Qin
+Naveen
+Agustín
+Kyle
+Junya
+Chen
+Evripiotis
+Fernandez
+Kamal
+Hugues
+fding
+Martinez
Ayd
-Xbar
-Yaroslav
-Halchenko
-Yee
-Mey
-Yeongseon
-Choe
-Yian
-Yimeng
-Zhang
-Zihao
-Zhao
-adatasetaday
-akielbowicz
-akosel
-alinde
-amuta
-bolkedebruin
-cbertinato
-cgohlke
-charlie
-chris
-csfarkas
-dajcs
-deflatSOCO
-derestle
-htwg
-discort
-dmanikowski
-donK
-elrubio
-fivemok
-fjdiod
-fjetter
-froessler
-gabrielclow
-gfyoung
-ghasemnaddaf
-vetinari
-himanshu
-awasthi
-ignamv
-jayfoad
-jazzmuesli
-jbrockmendel
-jjames
-joaoavf
-joders
-jschendel
-juan
-huguet
-luzpaz
+Rubbert
+zachcp
+Schendel
+Thakral
+Gleave
+Antao
+hesham
+Fiore
+Jevnik
+Liudmila
+Fabio
+Peter
+Cornelius
+Burroughs
+Macario
+sinhrks
+someben
+Braithwaite
+Hsiaoming
+Hayden
+Abernot
+Henriksen
+Pawel
+Chitrank
+DSM
+kiwirob
+La
mdeboc
+Van
miguelmorin
-miker
-miquelcamprodon
-orereta
-ottiP
-peterpanmj
-rafarui
-raph
-readyready
-rmihael
-samghelms
-scriptomation
-sfoo
-stefansimik
-stonebig
-tmnhat
+jaredsnyder
+Ter
+Julia
+Bradish
+Gruen
+Sietse
+Schulze
+abarber
+James
+Carnevale
+yui
+András
+Cottrell
+Davison
+Cook
+Kamil
+Hwang
+F
+lloydkirk
+Juarez
tomneep
-tv
-verakai
-xpvpc
-zhanghui
-API
-Mazzullo
-Riemenschneider
-Hirschfeld
-Stansby
-Dror
-Atariah
-Kisslinger
-Ingolf
+Jacobsmühlen
+Kamau
+Roth
+Gagne
+Benedikt
+Tong
+HagaiHargil
+Fukazawa
+Geier
+Mendel
+Guy
+Waeber
+Casagrande
+Bryce
+Joerg
+Joeun
Werkmann
-Reback
-Joris
-Bossche
-Jörg
-Döpfert
-Kuhl
-Krzysztof
-Chomski
-Licht
-Takeuchi
-Manraj
-Singh
-Braymer
-Waskom
-Mie
-Hoffmann
-Sietse
-Brouwer
-Swast
-Augspurger
-Ayd
-Yee
-Mey
-bolkedebruin
-cgohlke
-derestle
-htwg
-fjdiod
-gabrielclow
-gfyoung
-ghasemnaddaf
-jbrockmendel
-jschendel
-miker
-pypy
-Gleave
-Liaw
-Velasco
-Yee
-Marchenko
-Amol
-Winkler
-亮
-André
-Jonasson
-Sweger
-Berkay
-Haffner
-Tu
-Chankey
-Pathak
-Billington
-Filo
-Gorgolewski
-Mazzullo
-Prinoth
-Stade
-Schuldt
-Moehl
-Himmelstein
-Willmer
-Niederhut
-Wieser
-Fredriksen
-Kint
-Giftlin
-Giftlin
-Rajaiah
-Guilherme
-Beltramini
-Guillem
-Borrell
-Hanmin
-Qin
-Makait
-Hussain
-Tamboli
-Miholic
-Novotný
-Helie
-Schiratti
-Deschenes
-Knupp
-Reback
-Tratner
-Nothman
-Crall
-Mease
-Helmus
-Joris
-Bossche
-Bochi
+Nisar
+mattip
+maxwasserman
+Aleksandr
Kuhlmann
-Brabandere
-Keeton
-Keiron
-Pizzey
-Kernc
-Licht
-Takeuchi
-Kushner
-Jelloul
-Makarov
-Malgorzata
-Turzanska
-Sy
-Roeschke
-Picus
-Mehmet
-Akmanalp
-Gasvoda
-Penkov
-Eubank
-Shteynbuk
-Tillmann
-Pankaj
-Pandey
-Luo
-O'Melveny
-Reidy
-Quackenbush
-Yanovich
-Haessig
-Battiston
-Pradyumna
-Reddy
-Chinthala
-Prasanjit
-Prakash
-Sangwoong
+Farnham
+Haseeb
+bertrandhaut
+broessli
+Scott
+juricast
+Airas
+Chainz
+Vijalapuram
+zach
+Ross
+Ivanov
+Kieran
+d10genes
+pandas
+Riccardo
+Ignasi
+Saumitra
+Jakub
+Woo
+fivemok
+jsexauer
+Åsmund
+John
+Cera
+D'Agostino
+benjamin
+Cantero
+Luthra
+Ittoku
+C
+Corlay
Yoon
-Sudeep
-Telt
-Caswell
-Swast
-Augspurger
-Tuan
-Utkarsh
-Upadhyay
-Vivek
-Aiyong
-WBare
-Yi
-Liu
-Yosuke
-Nakabayashi
-aaron
-abarber
-gh
-aernlund
-agustín
-méndez
-andymaheshw
-aviolov
-bpraggastis
+Fabian
+Morgan243
+Alvaro
+brian
+keitakurita
+Csizsek
+Benoît
+Zanini
+Cloud
+Hiebert
+Dieter
+Justin
+N
+Rudd
+Tucker
cbertinato
-cclauss
-chernrick
-chris
-dkamm
-dwkenefick
-faic
-fding
-gfyoung
-guygoldberg
-hhuuggoo
-huashuai
-ian
-iulia
-jaredsnyder
+Pandit
+Ho
+cel4
+Himmelstein
+Pastafarianist
+Katie
+rmihael
+Fosch
+Wes
+Yoav
+Prettenhofer
+Corchero
+andymaheshw
+azuranski
+Koevska
+Andreas
+Rémy
+Riddell
+Crall
+Taylor
+Maximilian
+Keith
+Catalfo
+Simmons
+Giacomo
+Conway
+Tu
+ojdo
+Jennings
+JanSchulz
+Rishipuri
+Damien
+Michelangelo
+elpres
+mcjcode
+Abdalla
+cr3
+Alessandro
+Experimenting
+Abramowitz
+Hjulstad
+Roman
+Bilodeau
+Rosenfeld
+Ming
+Curtis
+Kuznetsov
+ksanghai
+Haren
+carljv
+Brandt
+Mykola
+Hoyer
+lodagro
+Jordahl
+Brock
+Carol
+Donald
+Miroslav
+Iulius
+Ivan
+Pranav
+Deluz
+Haessig
+dsm
+Garvey
+Swast
+Mitar
+ante328
+Séguin
+Picus
+Höchenberger
+Chase
+Jelloul
jbrockmendel
-jdeschenes
-jebob
-jschendel
-keitakurita
-kernc
-kiwirob
-kjford
-linebp
-lloydkirk
-louispotok
-majiang
-manikbhandari
-matthiashuschle
-mattip
-maxwasserman
-mjlove
-nmartensen
-parchd
-philipphanemann
-rdk
-reidy
-ri
-ruiann
-rvernica
-weigand
-scotthavard
-skwbc
+Huang
+faic
+Bot
+Shapiro
+Santucci
+Gilbert
+Sweger
tobycheese
-tsdlovell
-ysau
-zzgao
-cov
-abaldenko
-adrian
-stepien
-Saxena
-Akash
-Tandon
-Aleksey
-Bilogur
-alexandercbooth
-Amol
-Kahat
-Winkler
-Kittredge
-Anthonios
-Partheniou
-Arco
-Ashish
-Singal
-atbd
-bastewart
-Baurzhan
-Muftakhidinov
-Kandel
-bmagnusson
-carlosdanielcsantos
-Souza
-chaimdemulder
-chris
-Aycock
-Gohlke
-Paulik
-Warth
-Brunner
-Himmelstein
-Willmer
-Krych
-dickreuter
-Dimitris
-Spathis
-discort
-Dmitry
-Suria
-Wijaya
-Stanczak
-dr
-leo
+vytas
+Dillon
dubourg
-dwkenefick
-Andrade
-Ennemoser
-Francesc
-Alted
-Fumito
-Hamamura
-funnycrab
-gfyoung
-Ferroni
-goldenbull
-Jeffries
-Guilherme
-Beltramini
-Guilherme
-Samora
-Hao
-Harshit
-Patni
-Ilya
-Schurov
-Iván
-Vallés
-Pérez
-Leng
-Jaehoon
-Hwang
-Goppert
-Santucci
-Reback
-Crist
-Jevnik
-Nothman
-Zwinck
jojomdt
-Whitmore
-Mease
-Mease
-Joost
-Kranendonk
-Joris
-Bossche
-Bradt
-Santander
-Julien
-Marrec
-Solinsky
-Kacawi
-Kamal
-Kamalaldin
-Shedden
+Konefał
+Parlar
+Contestí
+abarber4gh
Kernc
-Keshav
-Ramaswamy
-Ren
-linebp
-Pedersen
-Cestaro
-Scarabello
-Lukasz
-paramstyle
-Lababidi
-Unserialized
-manu
-manuels
-Roeschke
-mattip
-Picus
-Roeschke
-maxalbert
-Roos
-mcocdawc
-Lamparski
+Chong
+Filimonov
+Kirk
+Ken
+Jake
+Maoyuan
+Caswell
+Gábor
+Hoffman
+ri938
+Paulo
+Wignall
+Pölsterl
+Furnass
+Hohmann
+Le
+Bundock
+Sebastian
+Filo
+Staple
+J
+Parley
+Øystein
+tshauck
+Jon
+Nimmi
+van
+Chlebek
+Jimmy
+kaustuv
+Ferchland
+Rafal
+Yin
+Keeton
+Bjorn
+Charlton
+Lam
+Andrew
+Barr
+Wiktor
+Hudon
+Jöud
+Bussonnier
+Jay
+mschmohl
+Hoover
+Jun
+tworec
+Data
+dr
+Matthew
+Woodbridge
+Ngo
+O'Keeffe
+méndez
+Tuan
+rockg
+Erenrich
+Crystal
+Rychyk
+AdamShamlian
+Bochi
+Marcus
+Uddeshya
+Buitinck
+Sylvain
+Gura
+FAN
+Qiang
+jazzmuesli
+SplashDance
+Samir
+Vipin
+McNamara
+Bast
+Davar
+mjlove12
+Freeman
+Stefania
+ssikdar
+jebob
+Lars
+Jev
+TrigonaMinima
+htwg
+Marchenko
+Jesse
+alinde1
+Davis
+atbd
+Jonathan
+Sami
+W
+Aly
+Ray
+jjames34
+chapman
+Expert
+Yevgeniy
+Berisa
+Russell
+Keiron
+Kranendonk
+Andrade
+Rajaiah
+Batra
+Moreira
+Ali
+Partheniou
+chebee
+Brouwer
+miker985
+Kiplang'at
+Smirnov
+Dmitry
+ajenkins
+De
+zertrin
+Bell
+André
+Negus
+Miller
+Granger
+Dror
+Vikram
+Park
+Henning
+Wouter
+McNeil
+Leng
Michiel
+Kutzma
+parchd
+cclauss
+Patrick
+Ravi
+Suggit
Mikolaj
-Chwalisz
-Miroslav
+josham
+Flores
+Birken
+Greg
+Bray
+rdk1024
+Masurel
+Brandys
+Kerkez
+Drew
+Poulin
+Alexis
+Stefano
+3553x
+Tang
+journal
+Johnny
+Shen
+Henry
+Jan
+Költringer
+Chapman
+with
+Clearfield
+Münst
+manu
+Luis
+the
+Gong
+stepien
+springcoil
+Thompson
+Thierry
+hhuuggoo
+akosel
+Fustin
+Phil
Šedivý
-Mykola
-Golubyev
-Rud
-Halen
-Chmura
-nuffe
-Pankaj
-Pandey
-paul
-mannino
-Pawel
-Kordek
-pbreach
-Csizsek
-Petio
-Petrov
-Ruffwind
-Battiston
-Chromiec
-Prasanjit
-Prakash
-Forgione
-Rouz
-Azari
-Sahil
-Dua
-sakkemo
-Sami
-Salonen
-Sarma
-Tangirala
-scls
-Gsänger
-Sébastien
-Menten
-Heide
-Shyam
-Saladi
-sinhrks
-Sinhrks
-Rauch
-stijnvanhoey
-Adiseshan
-themrmax
-Thiago
-Serafim
+Bui
+c
+Bibiloni
+deolal
+guygoldberg
+Kerstein
+Kanter
+Schuldt
+kdiether
+Marczinowski
Thoralf
-Thrasibule
-Gustafsson
-Augspurger
-tomrod
-Shen
-tzinckgraf
-Uwe
-wandersoncferreira
-watercrossing
-wcwagner
-Wiktor
-Tomczak
-xgdgsc
-Yaroslav
-Halchenko
-Yimeng
-Zhang
-yui
-knk
-Saxena
-Kandel
-Aycock
-Himmelstein
-Willmer
-gfyoung
-hesham
-shabana
-Reback
-Jevnik
-Joris
-Bossche
-Santander
-Shedden
-Keshav
-Ramaswamy
+Holmgren
+Nikos
Scarabello
-Picus
-Roeschke
-Roos
-Mykola
-Golubyev
-Halen
-Pawel
+H
+Kelsey
+ZhuBaohe
+Mitch
+Aye
+Mabel
+Trent
+Utkarsh
+Cutting
+Webber
+Young
+Sarah
+Angelos
+Lamparski
+Bhavesh
+babakkeyvani
+surveymedia
+Artem
+Aycock
Kordek
-Battiston
-sinhrks
-Adiseshan
-Augspurger
-wandersoncferreira
-Yaroslav
-Halchenko
-Chainz
-Anthonios
-Partheniou
-Arash
+Rocha
+Pulkit
+McGuirk
+skwbc
+Marrec
+Muftakhidinov
+manikbhandari
+&
+Laksh
+Critchley
+Arcos
+Mullen
+Han
+Nicolas
+Bozonier
+Hammond
+Carlos
+Lim
+Weston
+Valois
+Welsh
+Tim
+de
+Diego
+ischwabacher
+Stein
+Mike
+Paula
+powers
+Younggun
+Rob
+silentquasar
+readyready15728
+Tamboli
+Mihevc
+1
+Langer
+Casbon
+Petio
+Kaneko
+Douglas
+Nis
+Vankerschaver
+dos
+AllenDowney
+Chromiec
+Mie~~~
+Green
+Olimpio
+boombard
+Grender
+Doug
Rouhani
-Kandel
-chris
-Warth
-Krych
-dubourg
-gfyoung
-Iván
-Vallés
-Pérez
-Reback
-Jevnik
-Mease
-Joris
+jeps
+Joost
+Hocking
+Wolever
+Shubham
+FragLegs
+Matias
+Kehoe
+samghelms
+Petchler
+Mark
+aaron
+Satya
+dsm054
+Musali
+agijsberts
+Dimitri
+Lorenzo
+Kurtis
+Buchkovsky
+d
+david
+cyrusmaher
Bossche
+lenolib
+Charlie
+Aman
+Gallo
+Aleksey
+Alejandro
+Wieser
+Christoph
+Joe
+Hemken
+Barron
+Roberto
+Schaf
+Brundu
+seales
+DrIrv
+Fonseca
+Gautier
+smith
Keshav
-Ramaswamy
-Ren
-mattrijk
-paul
-mannino
-Chromiec
-Sinhrks
-Thiago
-Serafim
-adneu
-agraboso
-Alekseyev
+Blair
+Matt
+Francis
+bmu
+Evans
+Castellano
+wandersoncferreira
+agustín
+Laurens
+Callin
+Tandon
+Schumacher
+Avi
+Whitmore
+pantano
+Christopher
+Ham
+Wu
+Delprete
+Vallés
+gfyoung
+goldenbull
+joaoavf
Vig
-Riddell
-Amol
-Amol
-Agrawal
-Anthonios
-Partheniou
-babakkeyvani
-Kandel
-Baxley
-Camilo
-Cota
-chris
-Grinolds
-Hudon
-Aycock
-Warth
-cmazzullo
-cr
+Velasco
+mpuels
+Pentreath
+Owen
+Priyanka
+zhanghui
+Zwinck
Siladji
-Drewrey
-Lupton
-dsm
-Blancas
-Marsden
-Marczinowski
-O'Donovan
-Gábor
-Lipták
+Singh
+Ro
+Chris
+ganego
+Peng
+Sen
+Maybeno
+Waller
+zquez
+juan
+Talitha
+aviolov
+Wilhelm
+Pitrou
+German
+Colvin
+Torcasso
+Ben
+Mac
+ranarag
+DataOmbudsman
+Luca
+lucas
+Berkay
+Ramaswamy
+Petrou
+scotthavard
+terrytangyuan
+Mehmet
+Agarwal
+Skolasinski
+engstrom
+Fumito
+Muhammad
+Sereger13
+Tony
+Felt
+hunterowens
+Akash
+Schaer,
+svaksha
+Constantine
+wcwagner
+R
+economy
+arsenovic
+Gomez
+Kunal
+Villanova
+Steven
+majiang
+颜发才
+Florian
+Philipp
+Paulik
+Kolchinsky
+louispotok
+funnycrab
+bastewart
+Lobo
+Reyes
+Rosen
+Estève
+Mazières
+Heide
+Allison
+gabrielclow
+Gerard
+dgram0
+Kim
+Schulz
+Yimeng
+anton
+Sten
+Herranz
+Kvam
+Vandenbussche
+Ilya
+Alex
+Forgione
+Braymer
+Lučanin
+Tariq
+Letson
+Sven
+Nate
+Sar
+Victor
+Tommy
+Terry
+MattRijk
+weigand
+Gabe
+Magnus
+Greenhall
+Mortada
+linebp
+Vladimir
+Leahy
+Arakaki
+charalampos
+flying
+Shamim
+tv
+Reynolds
+Tushar
+pijucha
+Niznan
+chris
+phaebz
+pbreach
+McKinney
+Ren
+Suria
+Eubank
+Shedden
+Cant
+rsamson
+ajcr
+Nassrat
Geraint
-gfyoung
-Ferroni
-Haleemur
-harshul
+Pinxing
+Filip
+Mellen
+Tjerk
+Asthana
+Hendrickson
+Prabhjot
+Liaw
+deflatSOCO
+Cavazos
+Hoese
+Prasanjit
+Requeijo
+Paul
+Amici
+Reback
+emilydolson
+Battiston
+BrenBarn
+O
+Myles
+maxalbert
+Da
+Santegoeds
+Klostermann
+del
+T
+Reddy
+Stuart
+Ford
+Jack
Hassan
-Shamim
-iamsimha
-Iulius
+MasonGallo
+Saeta
+elrubio
+Becky
+Grzegorz
+Julian
+Jim
+Jordan
+Drozd
+Fortunov
Nazarov
-jackieleng
-Reback
-Crist
-Jevnik
-Liekezer
-Zwinck
-Erenrich
-Joris
-Bossche
-Howes
-Brandys
-Kamil
-Sindi
-Ka
+Tangirala
+chromy
+Charalampos
+Patni
+I
+Wiebe
Wo
-Shedden
-Kernc
-Brucher
-Roos
-Scherer
-Mortada
-Mehyar
-mpuels
-Haseeb
-Tariq
-Bonnotte
-Virtanen
-Mestemaker
-Pawel
-Kordek
-Battiston
-pijucha
-Jucha
-priyankjain
-Nimmi
-Gieseke
-Keyes
-Sahil
-Dua
-Sanjiv
-Lobo
-Sašo
-Stanovnik
-Heide
-sinhrks
-Sinhrks
+y
+on
+Alted
+Carrucciu
+sl
+Morgan
+Sanjiv
+Armin
+Stansby
+Yuichiro
+Howard
+Lin
+Ni
+Clemens
+Leon
+(Terry)
+Renoud
+Prakash
+Donut
+Bobrova
+Krzysztof
+Uga
+Kushner
+csfarkas
+zur
+Chmura
+alex
+Childs
+m
+shabana@hotmail
+Toth
+Robert
+Poddar
+iamsimha
+ptype
+Shelvinskyi
Kappel
-Choi
+BorisVerk
+Jozef
+deWerd
+Baurzhan
+nuffe
+Manan
+ElDen
+Koch
+Toby
+Liam
+Yi
+Hyungtae
+Ashwini
+msund
+Lawrence
+Würl
+Seabold
+alcorn
+Maloo
+Atkinson
+Frank
+Karmel
+Jancauskas
+Travis
+Brucher
+paul
+Sinhrks
+sakkemo
+mcocdawc
+scls19fr
+TomAugspurger
+jreback
+Agrawal
+Sipos
+Heikkilä
+StephenVoland
+Simone
+Clark
+KOBAYASHI
+Lababidi
+Barber
+Stoafer
+Bayle
+Winterflower
+Storck
+Wijaya
+davidovitch
+Telt
+(EOL)
+Akmanalp
+Montana
+Jing
+Haenel
+Blais
+Piersall
+mannino
+Mehyar
+daydreamt
+Gregg
+README
+Chinthala
+Vyomkesh
+Andrey
+Riemenschneider
+Papaloizou
+Carey
+Doran
+David
+Saxena
+Jeff
+Jaume
+Lee
+helger
+Surta
+Henderson
+SimonBaron
+hsperr
+Buran
+Gianluca
+Draper
+Larry
+jjames
+huguet
+She
+Inggs
+Pankaj
+Singhal
+AbdealiJK
+davidjameshumphreys
+Novoszáth
+Thayer
Sudarshan
-Konge
-Caswell
-Augspurger
+lexual
+nose
+Jonasson
+zzgao
+Maximiliano
+ghasemnaddaf
+bolkedebruin
+Vázquez
+Hatem
+Kate
+SHEN
+Lecher
+Hans
Uwe
-Hoffmann
-wcwagner
-Xiang
-Zhang
-Yadunandan
-Yaroslav
-Halchenko
+Adiseshan
+Anthonios
+Pizzey
+Varshokar
+Kenneth
+Kelly
+Léone
+Bonnotte
+Joshua
+Zaslavsky
+Emery
+jayfoad
+Gilmer
+Baxley
+reef
+Mueller
YG
-Riku
-Yuichiro
-Kaneko
-yui
-knk
-zhangjinjie
-znmean
-颜发才
-Yan
-Facai
-Fiore
-Gartland
-Bastiaan
-Benoît
-Vinot
-Fustin
-Freitas
-Ter
-Livschitz
-Gábor
-Lipták
-Hassan
-Kibirige
-Iblis
-Saeta
-Pérez
-Wolosonovich
-Reback
-Jevnik
-Joris
-Bossche
-Storck
-Ka
-Wo
-Shedden
-Kieran
-O'Mahony
-Lababidi
-Maoyuan
-Liu
-Wittmann
-MaxU
+Razoumov
+fding253
+Baeza
+Reyfman
+Caleb
+Souza
+Imanflow
+Keyes
+Drapala
+bwignall
+Yian
+docs
+Polo
+Iain
+adatasetaday
+Hansen
+Sam
+bashtage
+Pyry
+Veksler
+Sauer
+jmorris0x0
+RenzoBertocchi
Roos
+Gina
+Lewis
+locojaydev
+themrmax
+Yosuke
+Pinger
Droettboom
-Eubank
-Bonnotte
-Virtanen
-Battiston
-Prabhjot
-Singh
-Augspurger
-Aiyong
-Winand
-Xbar
-Yan
-Facai
-adneu
-ajenkins
-cargometrics
-behzad
-nouri
-chinskiy
-gfyoung
-jeps
-jonaslb
-kotrfa
-nileracecrew
-onesandzeroes
-sinhrks
-tsdlovell
-Alekseyev
-Rosenfeld
-Anthonios
-Partheniou
-Sipos
-Carroux
-Aycock
+Wilfred
+Hamish
+hugo
+Choi
+Brunner
+McBride
+Sanders
+Stanovnik
+Choe
+Suri
Scanlin
-Da
-Dorozhko
+Jonas
+Sergio
+Wittmann
+Reeson
+Acanthostega
+Gilberto
+Kandel
+deCarvalho
+Cody
+Benjamin
+Artemy
+Chang
+Iván
+Khomenko
+Grant
+Martin
+miquelcamprodon
+Arel
+fjetter
+Cestaro
+Bourque
+Luo
+thauck
+gh
+Gibboni
+García
+clham
+Milo
+miker
+Guilherme
+Livschitz
+conquistador1492
+Pandey
+Azeez
+Zach
+Halchenko
+cargometrics
+awasthi
+Ferroni
+shabana
+Gutierrez
+Hall
+Ashish
+Chau
+Borrell
+Balaraman
+Prinoth
+thatneat
+Siu
+Taifi
+Oliveira
+ankostis
+Mandel
+fabriziop
+Shawn
+Sexauer
+Williams
+Stewart
+Antonio
+Israel
+Code
+Philip
+Dominik
+matthiashuschle
+Perez
+Shahapure
O'Donovan
-Cleary
-Gianluca
-Jeffries
-Horel
-Schwabacher
-Deschenes
-Reback
-Jevnik
-Fremlin
-Hoersch
-Joris
-Bossche
-Joris
-Vankerschaver
-Ka
-Wo
-Keming
-Zhang
-Shedden
-Farrugia
-Lurie
-Roos
-Mayank
-Asthana
-Mortada
-Mehyar
-Moussa
-Taifi
+bmagnusson
+Haochen
+Zeke
+RuiDC
+davidshinn
+jaimefrio
+ottiP
+Dixit
+Daniel
+Brabandere
+Fredriksen
+OXPHOS
+Gay
+Lutz
+bot
+Éltető
+Sangmin
+Roger
+Ryszard
+tom
+Waskom
+stefansimik
Navreet
-Bonnotte
-Reiners
-Gura
-Battiston
-Carnevale
+Pallekonda
+Slavitt
+Starr
+Kern
+Pérez
+Stock
+carlosdanielcsantos
+com
+Yoong
+alexandercbooth
+Marc
+Mashkoor
+tv3141
+José
+Anil
+Dražen
+Gaudio
+Burr
+Luc
+Kokes
+Gsänger
+Ortiz
+knk
+Aaron
+conmai
+Ajamian
+Nolan
+hack
+Upadhyay
+Rocklin
+Gerigk
+Sanghee
+Stefan
+GOD
+Gjelt
Rinoc
-Rishipuri
-Sangmin
-Lasley
+Baptiste
+Ursa
+123
+Russo
+Vardhan
+Forbidden
+Curt
+der
+Yulong
+Abraham
+Lidder
+nipunreddevil
+Stanczak
+Sangwoong
+Villas
+Bernhard
+Elliot
+michaelws
+ribonoous
+himanshu
+Niederhut
+aernlund
+DaanVanHauwermeiren
+abaldenko
+Grady
+Arco
Sereger
-Seabold
-Thierry
-Moisan
-Caswell
-Augspurger
-Hauck
-Varun
-Yoong
-Kang
-Lim
-Yoshiki
-Vázquez
-Baeza
-Joong
-Younggun
-Yuval
-Langer
-argunov
-behzad
-nouri
-boombard
-brian
-pantano
-chromy
-daniel
-dgram
-gfyoung
-hcontrast
-jfoo
-kaustuv
-deolal
-llllllllll
-ranarag
-rockg
-scls
-seales
-sinhrks
-srib
-surveymedia
-tworec
-Drozd
-Anthonios
-Partheniou
+Shahul
+Schiller
+Bandlow
+Fitzgerald
+Josh
+Francesco
+Steve
+Andy
+Jaidev
+Tillmann
+Lubbock
+Eduardo
+Winkler
+Zora
+William
+Farrugia
+stahlous
+Garrido
+Rudolph
+SleepingPills
+Jordi
+Olivier
+Crist
+Leif
+theandygross
Berendt
-Piersall
-Hamed
-Saljooghinejad
-Iblis
-Deschenes
-Reback
-Callin
-Joris
-Bossche
-Ka
-Wo
-Loïc
-Séguin
-Luo
-Yicheng
-Magnus
-Jöud
-Leonhardt
-Roos
-Bonnotte
-Pastafarianist
-Chong
-Schaf
-Philipp
-deCarvalho
-Khomenko
-Rémy
-Léone
-Thierry
-Moisan
-Augspurger
-Varun
-Hoffmann
-Winterflower
-Younggun
-ajcr
-azuranski
+Howes
+Ted
+Igor
+Illia
+/
+peterpanmj
+Reiners
+Grinolds
+znmean
+Meeren
+Gambogi
+Molina
+Quistorff
+knows
+Viktor
+Harshit
behzad
-nouri
-cel
-emilydolson
-hironow
-lexual
-llllllllll
-rockg
-silentquasar
-sinhrks
-taeold
-unparseable
-Rothberg
-Bedini
-Rosenfeld
-Anthonios
-Partheniou
-Artemy
-Kolchinsky
+Pekar
+Yaroslav
+donK
+Julius
+Cianciulli
+Kesters
+dajcs
+acorbe
+Anthony
+Brad
+Pathak
+MinRK
+Gasvoda
+颜发才(Yan
+Júlio
+Garcia
+danielballan
+Deschenes
+Bastiaan
+Giacometti
+Ruffwind
+JosephWagner
+Brett
+Mathieu
+rafarui
+Kerr
+Kang
+P
+thuske
+newaxis
+Spencer
+Spathis
+Quackenbush
+Ceyhan
+Joachim
+North
+Robin
+Pascual
+Schwabacher
+JennaVergeynst
+Mason
+Wasserman
+Jacob
+Sheppard
+Olsen
+Petrov
+Buglet
+huashuai
+Bogachev
+Thyreau
+Margaret
Willers
-Gohlke
-Clearfield
-Ringwalt
-Cottrell
-Gagne
-Schettino
-Panfilov
-Araujo
-Gianluca
-Poulin
-Nisar
-Henriksen
-Hoegen
-Jaidev
-Deshpande
-Swails
-Reback
-Buyl
-Joris
-Bossche
-Joris
-Vankerschaver
-Julien
-Danjou
+Adams
+kjford
+DaCoEx
+Roy
+Pinter
+Gaëtan
+sheep
+Santander
+Bilogur
+Fremlin
+wavedatalab
+Arfer
+Stijn
+Noémi
+Keming
+peadarcoyle
+Petra
+Lyon
+Rouz
+Tarbo
+Haffner
+Roeschke
+Ajay
+Ennemoser
+Gartland
+Stephens
+Moehl
+Billington
+(Ray)
+Cheuk
+chinskiy
+Sakuma
+Tiago
Ka
-Wo
-Kehoe
-Jordahl
-Shedden
-Buitinck
-Gambogi
-Savoie
-Roos
-D'Agostino
-Mortada
-Mehyar
-Eubank
-Nipun
-Batra
-Ondřej
-Čertík
-Pratap
-Vardhan
-Rafal
-Skolasinski
-Rinoc
-Gieseke
-Safia
-Abdalla
-Saumitra
-Shahapure
-Pölsterl
-Rubbert
-Sinhrks
-Siu
-Kwan
-Seabold
-Carrucciu
-Hoyer
+Riku
+Janelle
+Swails
+reidy
Pascoe
-Santegoeds
+ogiaquino
+tim
+Guinta
+Randy
+MHC
+Sharad
+Nichols
+Kisslinger
+Sinayoko
+Akinbo
+Bradshaw
+Saljooghinejad
+Adrien
+Chomski
+Ting
+monicaBee
+Martensen
+Kwan
Grainger
-Tjerk
-Santegoeds
-Augspurger
-Winterflower
-Yaroslav
-Halchenko
-agijsberts
-ajcr
-behzad
-nouri
-cel
-cyrusmaher
-davidovitch
-ganego
-jreback
-juricast
-larvian
-maximilianr
-msund
+Ver
+bkandel
+srib
+O'Melveny
+WANG
+Mazzullo
+Thrasibule
+Wang
+llllllllll
+Bill
+McFee
+papaloizou
+Retkowski
+Hammerbacher
+Goh
+Moisan
+Ehsan
+stijnvanhoey
+mjlove
+Rittinger
+Ridhwan
+George
+Whelan
+亮
+Nick
+Tom
+Karagiannakis
+Jason
+Loïc
+Journois
+Helmus
+Allen
+Shteynbuk
+Naul
+Epstein
+Hogman
+Damini
+Zoutkamp
+Nowacki
+Kint
+floydsoft
+Shirgur
+Nothman
+austinc
+Noah
+siu
+Kleynhans
+Nipun
+Alfonso
+Valentin
+Joris
+Pan
+agraboso
+MarsGuy
+Wieland
rekcahpassyla
-robertzk
-scls
-seth
-sinhrks
-springcoil
-terrytangyuan
-tzinckgraf
-Rosenfeld
-Artemy
-Kolchinsky
-Willers
+Gosar
+Evan
+Eric
+ysau
+jackieleng
+Walt
+roch
+Joan
+w
+Samora
+Glen
+Kelley
+Hooper
+Giftlin
+S
+Palanisami
+Winand
+Novotný
+Jackie
+Nicholaus
+charlie
+Hamed
+jdeschenes
Christer
-der
-Meeren
-Hudon
-Lasiman
-Brundu
-Gaëtan
-Menten
-Hiebert
-Reback
-Joris
-Bossche
-Ka
-Wo
-Mortada
-Mehyar
-Grainger
-Ajamian
-Augspurger
-Yoshiki
-Vázquez
-Baeza
-Younggun
-austinc
-behzad
+Cohan
nouri
-jreback
-lexual
-rekcahpassyla
-scls
-sinhrks
-Artemy
-Kolchinsky
-Gilmer
-Grinolds
-Birken
+Gustafsson
+Marsden
+E
+Antoine
+Rauch
+Rasch
+Manuel
+Kisiel
+daniel
+stonebig
+Bolla
+rvernica
+Colin
+Shane
+A
+Tripathi
+Cañones
+Ojha
+Rupert
+Lučanin
+LEBIGOT
+Helie
+Herrero
+Graham
+Matthias
+Wilson
+Ludovico
+Tulio
+Sahil
+mikebailey
+Julien
+Dr
+Gokhale
+Francesc
+dgram
+Dixey
+Simon
+dkamm
+Gouthaman
+Karel
+Allan
+mattrijk
+Felix
+Wagner
+Sylvia
+Erik
+vetinari
+Schurov
+ian
+Zihao
+Döpfert
+Kamalaldin
+iulia
+Max
+Skipper
+Dale
+Leonhardt
+Julio
+Menten
+s
+Fischer
+Thiago
+Alves
+Cobzarenco
+Grechka
+Sanderson
+Kahat
+sanguineturtle
+Luke
+scotthavard92
+larvian
+Stephan
+Arora
+Schade
+Oleg
+Mease
+Pitkeathly
+Kodi
+Dave
+Lenail
+Nathan
+Halecky
+Duck
+Dody
+scriptomation
+Michael
+Yoder
+Deng
+Reidy
+SarahJessica
+Salonen
+Kuan
+Kevin
+Matti
+jfoo
+Bonet
Hirschfeld
-Dunné
-Hatem
-Nassrat
-Sperr
+charlie0389
+Sivji
+Kovanen
+Rok
+Blancas
+Alexander
+Egor
+Bernard
+jschendel
+Vinot
+SiYoungOh
+Salgado
+Kalyan
+Manraj
+Conrado
+B
+Walsh
+Pauli
+ignamv
Herter
-Blackburne
-Reback
-Crist
-Abernot
-Joris
-Bossche
-Shedden
-Razoumov
-Riel
-Mortada
-Mehyar
-Eubank
-Grisel
-Battiston
-Hyunjin
-Zhang
-Hoyer
-Tiago
-Antao
-Ajamian
-Augspurger
-Tomaz
-Berisa
-Shirgur
-Filimonov
-Hogman
-Yasin
-Younggun
-behzad
-nouri
-dsm
-floydsoft
-gfr
-jnmclarty
-jreback
-ksanghai
-lucas
-mschmohl
-ptype
-rockg
-scls
-sinhrks
-Toth
-Amici
-Artemy
-Kolchinsky
-Ashwini
-Chaudhary
-Letson
-Chau
+Beltramini
+Mignon
+Panfilov
+dengemann
+Ksenia
+Mey
+h
+Wenhuan
+Phillip
+Foo
+sfoo
+ARF
+Terrel
+tsdlovell
+VanderPlas
+scls
+Tratner
Hoang
-Christer
-der
-Meeren
-Cottrell
-Ehsan
-Azarnasab
-Torcasso
-Sexauer
-Reback
-Joris
-Bossche
+Giulio
+Chwalisz
+Márquez
+Nakabayashi
+RobinFiveWords
+V
+Adrián
+Santos
+Shashank
+Magliocchetti
Joschka
-zur
-Jacobsmühlen
-Bochi
-Junya
-Hayashi
-Shedden
-Kieran
-O'Mahony
-Kodi
-Arfer
-Airas
-Mortada
-Mehyar
-Lasley
-Lasley
-Pascual
-Seabold
-Hoyer
-Grainger
-Augspurger
-Filimonov
-Vyomkesh
-Tripathi
-Holmgren
-Yulong
-behzad
-nouri
-bertrandhaut
-bjonen
-cel
-clham
-hsperr
-ischwabacher
-jnmclarty
-josham
-jreback
-omtinez
-roch
-sinhrks
-unutbu
-Angelos
-Evripiotis
-Artemy
-Kolchinsky
-Pointet
-Jacobowski
-Charalampos
-Papaloizou
-Warth
-Zanini
-Francesc
-Kleynhans
-Reback
-Tratner
-Joris
-Bossche
-Suggit
-Lasley
-Hoyer
-Sylvain
-Corlay
-Grainger
-Tiago
-Antao
-Hauck
-Chaves
-Salgado
-Bhandoh
-Aiyong
-Holmgren
-behzad
-nouri
-broessli
-charalampos
-papaloizou
-immerrr
-jnmclarty
-jreback
-mgilbert
-onesandzeroes
-peadarcoyle
-rockg
-seth
-sinhrks
-unutbu
-wavedatalab
-Åsmund
-Hjulstad
-Rosenfeld
-Sipos
-Artemy
-Kolchinsky
-Letson
-Horel
-Reback
-Joris
-Bossche
-Sanghee
-Hoyer
-Aiyong
-behzad
-nouri
-immerrr
-jnmclarty
-jreback
-pallav
-fdsi
-unutbu
-Greenhall
-Artemy
-Kolchinsky
-behzad
-nouri
-Sauer
-benjamin
-Thyreau
-bjonen
-Stoafer
-dlovell
-dsm
-Herrero
-Hsiaoming
-Huan
-hunterowens
-Hyungtae
-immerrr
-Slavitt
-ischwabacher
-Schaer
-Tratner
-Farnham
+Kaleta
+Kibirige
+Willing
+Mahdi
+Hughes
+Ingolf
+Vinícius
+Yuan
+Flaxman
+Cota
+Yan
+Alekseyev
+Makarov
jmorris
-jnmclarty
-Bradish
-Joerg
-Rittinger
-Joris
-Bossche
-jreback
-klonuo
-lexual
-mcjcode
+Hammad
+Ralph
+Samuel
+Moritz
+Klein
+Nathalie
+Jeffrey
+Freitas
+discort
+"Mali"
+Carroux
+Moon
+Sindi
+den
+Vincent
+Stade
+Castro
+Min
+tmnhat2001
+DeLuca
+akittredge
+nprad
+Hauck
+Onno
+Sudeep
+Knupp
+orereta
+Maeztu
+Fabrizio
+Upkar
+adrian
+Mestemaker
+Kerby
+Andrea
+Bradt
+hironow
+Harsh
+Shanks
+Richard
+D
+Wolosonovich
+Juraj
+tomrod
+Fer
+Grisel
+yelite
+stas
+jen
+Willmer
+Roch
Schatzow
-Mortada
-Mehyar
-mtrbean
+Haleemur
+Pradyumna
Typanski
-onesandzeroes
-Masurel
-Battiston
-rockg
-Petchler
+Xiang
+Low
+Jung
+Dan
+Chea
+Hao
+Kassandra
+Alok
+Greco
+Gill
+Kacawi
+Azarnasab
+Line
+Johnson
+Hilboll
+raph
+Rud
+Singal
+Tara
+Vytautas
+Todd
+Geffert
+JimStearns206
+Huan
+Hendrik
+Dylan
+Bedini
+Carter
seth
-Shahul
-Hameed
-Shashank
-Agarwal
-sinhrks
-someben
-stahlous
-stas
-sl
-Hoyer
-thatneat
-alcorn
-Augspurger
-unutbu
-Yevgeniy
-Grechka
-Yoshiki
-VÃ
-zquez
-Baeza
-zachcp
-Rosenfeld
-Quistorff
-Wignall
-bwignall
-clham
-Waeber
-Bew
-dsm
-helger
-immerrr
-Schaer
-jaimefrio
-Reaver
-Joris
-Bossche
-jreback
-Julien
-Danjou
-lexual
-Wittmann
-Mortada
-Mehyar
-onesandzeroes
-rockg
-sanguineturtle
-Schaer
-seth
-sinhrks
-Hoyer
-Kluyver
-yelite
-hexbin
-Acanthostega
-agijsberts
-akittredge
-Gaudio
-Rothberg
-Rosenfeld
-ankostis
-anomrake
-Mazières
-anton
-bashtage
-Sauer
-benjamin
-Buran
-bwignall
-cgohlke
-chebee
-clham
-Birken
-danielballan
-Waeber
-Drapala
-Gouthaman
-Balaraman
-Poulin
-hshimizu
-hugo
-immerrr
-ischwabacher
-Schaer
-jaimefrio
-Sexauer
-Reback
-Tratner
-Reaver
-Joris
-Bossche
-jreback
-jsexauer
-Júlio
-kdiether
-Jordahl
-Wittmann
-Grender
-Gruen
-michaelws
-mikebailey
-Nipun
-Batra
-ojdo
-onesandzeroes
-phaebz
-Battiston
-Carnevale
-ribonoous
-Gibboni
-rockg
-sinhrks
-Seabold
-Hoyer
-Cera
-Augspurger
-unutbu
-westurner
-Yaroslav
-Halchenko
-lexual
-danbirken
-travis
-Billington
-Cobzarenco
-Gamboa
-Cavazos
-Gaudecker
-Gerigk
-Yaroslav
-Halchenko
-sharey
-Vytautas
-Jancauskas
-Hammerbacher
-Hilboll
-Luc
-Kesters
-JanSchulz
-Negusse
-Wouter
-Overmeire
-Reeson
-Aman
-Thakral
-Uga
-Vandenbussche
-Pinxing
-astype
-Buglet
-Beltrame
-Hilboll
-Jev
-Kuznetsov
-Wouter
-Overmeire
-Reyfman
-Joon
-Ro
-Uga
-Vandenbussche
-setupegg
-Hammerbacher
-Jev
-Kuznetsov
-Wouter
-Overmeire
-Aman
-Thakral
-Uga
-Vandenbussche
-carljv
-rsamson
-newaxis
-Fortunov
-Aman
-Thakral
+Jenn
Beltrame
-Wouter
-Overmeire
-rsamson
+Joel
+lgautier
+jnmclarty
+Vaught
+Moral
+Mie
+Chaves
+Shyam
+Dua
+fjdiod
+O'Mahony
Laserson
-Pentreath
+Dunné
+Gieseke
+argunov
+mtrbean
+Lipták
+Dennis
+Horel
+Marco
+Taavi
+Metz
+adneu
+Dorozhko
+Ryan
+chernrick
+Pierre
+Guillaume
+jniznan
+Yadunandan
+Saladi
+Rhodes
+Buyl
+Albert
+RK
+VÃ
+Haaland
+Becker
+Vince
+Sébastien
+Jovixe
+derestle
+Bran
+dlovell
+prossahl
+Chankey
+Anh
Joon
-Ro
-Uga
-Fortunov
+l736x
+Iva
+Moussa
+Chalmer
+Stephen
+Čertík
+Oluwafemi
+Ian
+b1
+Goyo
+K
+hcontrast
+Chwala
+xpvpc
+Solinsky
+Dwiel
+Hoersch
Berka
-Vandenbussche
-krogh
-akima
-BPoly
-isna
-kurt
+Gohlke
+harshul1610
+Burrows
+Zhao
+Cleary
+westurner
+Scherer
+claudiobertoldi
+jonaslb
+tzinckgraf
+Adrian
+kotrfa
+ejnens
+Quinonez
+unutbu
+Krych
+Joong
+mgilbert
+verakai
+dmanikowski
+Atariah
+L
+Thomas
+Turner
+Mayank
+Guillem
+Hussain
+Marshall
+Benoit
+Arash
+Röttger
+harshul
+Yee
+Lupton
+bpraggastis
+topper
+mwaskom
+p
+Sašo
+Wright
+nmartensen
+Tux1
+ca
+Bank
+Jones
+Pedersen
+Amol
+Gorgolewski
+Bruin
+Penkov
+chappers
+bjonen
+Bew
+Auffarth
+Yeongseon
+Hannah
+Reda
+Araujo
+c123w
+Dražen
+Miralles
+taeold
+Rothberg
+Gabi
+Sheppard,
+Morton
+duozhang
+WBare
+Tomaz
+Isaac
+Selik
+Gamboa
+Malgorzata
+Gibbons
+Linde
+Sperr
+MaxU
+Facai)
+Karrie
+Thiel
+Takafumi
+Kumar
+aaron315
+Licht
+Gwynne
+joshuaar
+Pointet
+Hoey
+Blackburne
+Alan
+Jucha
+Hayashi
+maximilianr
+donK23
+Savoie
+philipphanemann
+Sarma
+setupegg
+Dimitris
+Hanmin
+Denny
+Adam
+Danjou
+Christian
+Chambers
+Jeffries
+Sy
+Yasin
+Withers
+Maria
+herrfz
+Bird
+Rossi
+Vivek
+XF
+klonuo
+rs2
+Jongwony
+Augspurger
+Liu
+Yoshiki
+Kuhn
+Leo
+Gabriel
+Bhandoh
+Ganssle
+gliptak
+Obeng
+Schaer
+McPherson
+Goppert
+pallav
+Lukasz
+Polosukhin
+Meyer
+BROCHART
diff --git a/doc/source/spelling_wordlist.txt b/doc/source/spelling_wordlist.txt
index be93cdad083e9..950f55b5f0fa9 100644
--- a/doc/source/spelling_wordlist.txt
+++ b/doc/source/spelling_wordlist.txt
@@ -5,7 +5,16 @@ NumPy
Reindexing
reindexing
ga
+pypy
+internet
fe
+astype
+hexbin
+unparseable
+paramstyle
+ec
+Featuretools
+Hexbin
reindexed
automagic
closedness
@@ -74,6 +83,11 @@ Groupby
Unioning
csv
Upcase
+Unserialized
+rdk
+ri
+cr
+multivalue
resampling
Upcase
Lowcase
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index af4eeffd87d01..b3f200a619a78 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -18,7 +18,7 @@ Fixed Regressions
**Comparing Series with datetime.date**
We've reverted a 0.23.0 change to comparing a :class:`Series` holding datetimes and a ``datetime.date`` object (:issue:`21152`).
-In pandas 0.22 and earlier, comparing a Series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comapring.
+In pandas 0.22 and earlier, comparing a Series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comparing.
This was inconsistent with Python, NumPy, and :class:`DatetimeIndex`, which never consider a datetime and ``datetime.date`` equal.
In 0.23.0, we unified operations between DatetimeIndex and Series, and in the process changed comparisons between a Series of datetimes and ``datetime.date`` without warning.
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 90fc579ae69e5..5e5b908b92c68 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -128,10 +128,10 @@ Removal of prior version deprecations/changes
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-- Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`)
+- Improved performance of :func:`Series.describe` in case of numeric dtypes (:issue:`21274`)
- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` when dealing with tied rankings (:issue:`21237`)
- Improved performance of :func:`DataFrame.set_index` with columns consisting of :class:`Period` objects (:issue:`21582`)
--
+
.. _whatsnew_0240.docs:
diff --git a/scripts/announce.py b/scripts/announce.py
index 7b7933eba54dd..466e099a1d6b4 100755
--- a/scripts/announce.py
+++ b/scripts/announce.py
@@ -73,6 +73,31 @@ def get_authors(revision_range):
return authors
+def update_name_wordlist():
+ # Update doc/source/names_wordlist.txt with the names of every author
+ all_names = set()
+
+ path = os.path.dirname(os.path.dirname(__file__))
+ wordlist_path = os.path.join(path, 'doc', 'source', 'names_wordlist.txt')
+
+ # Get all names from wordlist and add them to the set
+ with open(wordlist_path, encoding='utf-8') as namelist:
+ for name in namelist:
+ all_names.add(name.strip())
+
+ latest_tag = sorted(this_repo.tags,
+ key=lambda t: t.commit.committed_datetime)[-1]
+ authors = get_authors('0.3.0..{}'.format(latest_tag))
+
+ # Add author names to set from version 0.3.0 until latest version
+ for author in authors:
+ all_names = all_names.union(re.sub('[.+\-!?]', ' ', author).split())
+
+ with open(wordlist_path, 'w', encoding='utf-8') as wordlist:
+ for name in all_names:
+ wordlist.write('{}\n'.format(name))
+
+
def get_pull_requests(repo, revision_range):
prnums = []
@@ -108,6 +133,7 @@ def main(revision_range, repo):
print(heading)
print(u"=" * len(heading))
print(author_msg % len(authors))
+ update_name_wordlist()
for s in authors:
print(u'* ' + s)
| This is the initial PR to try and fix the issue #21354 the sphinx spelling extension needs every name to be divided and added to each line in order to get the name marked as spelt correctly. The file `names_wordlist.txt` is updated on every run of the function `get_authors()` located inside`scripts/announce.py`.
Since the issue #21396 is related to my previous PR I attempted to resolve this issue here as well, I followed the suggestion of @jorisvandenbossche and added a try/except to the `conf.py` if the dependency doesn't exist then nothing will happen.
@datapythonista since you submitted a PR(#21397) to try and fix the issue, perhaps you have an opinion about this bit of code. Also, if you would like to do the changes on your own PR I will just delete this bit and use your solution 😄 👍
I would like some feedback on this first attempt, the generator `[names.extend(re.sub('\W+', ' ', x).split()) for x in cur.union(pre)]` seems a bit smelly but does the trick, should this be refactored to use a long form and make it more readable? I extended the list to names because split() was creating a list inside the main list and this makes every name to be split and added to the same list (this is probably very memory inefficient though)
I tried to run the script from my machine but I got an issue with it, probably I'm doing something wrong but when I run `./scripts/announce.py $GITHUB v0.15.1..v0.23.0` I get the error message:
```
usage: announce.py [-h] [--repo REPO] revision_range
announce.py: error: unrecognized arguments: v0.15.1..v0.23.0
```
Not sure what I'm doing wrong, to be honest. If I run just the `get_authors()` function everything seems to be fine (including the v0.15.1..v0.23.0 format) and the names_wordlist is updated successfully.
I will look forward to read your opinions about this PR 👍
----
- [ ] closes #21396 #21354
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/21402 | 2018-06-09T16:31:45Z | 2018-11-03T13:22:57Z | null | 2018-11-03T13:22:58Z |
ENH: to_sql() add parameter "method" to control insertions method (#8… | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9aff1e54d8e98..fa6a8b1d01530 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4989,6 +4989,54 @@ with respect to the timezone.
timezone aware or naive. When reading ``TIMESTAMP WITH TIME ZONE`` types, pandas
will convert the data to UTC.
+.. _io.sql.method:
+
+Insertion Method
+++++++++++++++++
+
+.. versionadded:: 0.24.0
+
+The parameter ``method`` controls the SQL insertion clause used.
+Possible values are:
+
+- ``None``: Uses standard SQL ``INSERT`` clause (one per row).
+- ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
+ It uses a *special* SQL syntax not supported by all backends.
+ This usually provides better performance for analytic databases
+ like *Presto* and *Redshift*, but has worse performance for
+ traditional SQL backend if the table contains many columns.
+ For more information check the SQLAlchemy `documention
+ <http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.Insert.values.params.*args>`__.
+- callable with signature ``(pd_table, conn, keys, data_iter)``:
+ This can be used to implement a more performant insertion method based on
+ specific backend dialect features.
+
+Example of a callable using PostgreSQL `COPY clause
+<https://www.postgresql.org/docs/current/static/sql-copy.html>`__::
+
+ # Alternative to_sql() *method* for DBs that support COPY FROM
+ import csv
+ from io import StringIO
+
+ def psql_insert_copy(table, conn, keys, data_iter):
+ # gets a DBAPI connection that can provide a cursor
+ dbapi_conn = conn.connection
+ with dbapi_conn.cursor() as cur:
+ s_buf = StringIO()
+ writer = csv.writer(s_buf)
+ writer.writerows(data_iter)
+ s_buf.seek(0)
+
+ columns = ', '.join('"{}"'.format(k) for k in keys)
+ if table.schema:
+ table_name = '{}.{}'.format(table.schema, table.name)
+ else:
+ table_name = table.name
+
+ sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
+ table_name, columns)
+ cur.copy_expert(sql=sql, file=s_buf)
+
Reading Tables
''''''''''''''
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 018820b09dba4..6cb08afadec31 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -373,6 +373,7 @@ Other Enhancements
- :meth:`DataFrame.between_time` and :meth:`DataFrame.at_time` have gained the an ``axis`` parameter (:issue:`8839`)
- The ``scatter_matrix``, ``andrews_curves``, ``parallel_coordinates``, ``lag_plot``, ``autocorrelation_plot``, ``bootstrap_plot``, and ``radviz`` plots from the ``pandas.plotting`` module are now accessible from calling :meth:`DataFrame.plot` (:issue:`11978`)
- :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`)
+- :func:`pandas.DataFrame.to_sql` has gained the ``method`` argument to control SQL insertion clause. See the :ref:`insertion method <io.sql.method>` section in the documentation. (:issue:`8953`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 63d9b5265cdc7..3c28fef024b79 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2386,7 +2386,7 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
**kwargs)
def to_sql(self, name, con, schema=None, if_exists='fail', index=True,
- index_label=None, chunksize=None, dtype=None):
+ index_label=None, chunksize=None, dtype=None, method=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -2424,6 +2424,17 @@ def to_sql(self, name, con, schema=None, if_exists='fail', index=True,
Specifying the datatype for columns. The keys should be the column
names and the values should be the SQLAlchemy types or strings for
the sqlite3 legacy mode.
+ method : {None, 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ * None : Uses standard SQL ``INSERT`` clause (one per row).
+ * 'multi': Pass multiple values in a single ``INSERT`` clause.
+ * callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method <io.sql.method>`.
+
+ .. versionadded:: 0.24.0
Raises
------
@@ -2505,7 +2516,7 @@ def to_sql(self, name, con, schema=None, if_exists='fail', index=True,
from pandas.io import sql
sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,
index=index, index_label=index_label, chunksize=chunksize,
- dtype=dtype)
+ dtype=dtype, method=method)
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index e54d29148c6d0..6093c6c3fd0fc 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -8,6 +8,7 @@
from contextlib import contextmanager
from datetime import date, datetime, time
+from functools import partial
import re
import warnings
@@ -395,7 +396,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
def to_sql(frame, name, con, schema=None, if_exists='fail', index=True,
- index_label=None, chunksize=None, dtype=None):
+ index_label=None, chunksize=None, dtype=None, method=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -429,6 +430,17 @@ def to_sql(frame, name, con, schema=None, if_exists='fail', index=True,
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
+ method : {None, 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ - None : Uses standard SQL ``INSERT`` clause (one per row).
+ - 'multi': Pass multiple values in a single ``INSERT`` clause.
+ - callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method <io.sql.method>`.
+
+ .. versionadded:: 0.24.0
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
@@ -443,7 +455,7 @@ def to_sql(frame, name, con, schema=None, if_exists='fail', index=True,
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
- chunksize=chunksize, dtype=dtype)
+ chunksize=chunksize, dtype=dtype, method=method)
def has_table(table_name, con, schema=None):
@@ -568,8 +580,29 @@ def create(self):
else:
self._execute_create()
- def insert_statement(self):
- return self.table.insert()
+ def _execute_insert(self, conn, keys, data_iter):
+ """Execute SQL statement inserting data
+
+ Parameters
+ ----------
+ conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
+ keys : list of str
+ Column names
+ data_iter : generator of list
+ Each item contains a list of values to be inserted
+ """
+ data = [dict(zip(keys, row)) for row in data_iter]
+ conn.execute(self.table.insert(), data)
+
+ def _execute_insert_multi(self, conn, keys, data_iter):
+ """Alternative to _execute_insert for DBs support multivalue INSERT.
+
+ Note: multi-value insert is usually faster for analytics DBs
+ and tables containing a few columns
+ but performance degrades quickly with increase of columns.
+ """
+ data = [dict(zip(keys, row)) for row in data_iter]
+ conn.execute(self.table.insert(data))
def insert_data(self):
if self.index is not None:
@@ -612,11 +645,18 @@ def insert_data(self):
return column_names, data_list
- def _execute_insert(self, conn, keys, data_iter):
- data = [dict(zip(keys, row)) for row in data_iter]
- conn.execute(self.insert_statement(), data)
+ def insert(self, chunksize=None, method=None):
+
+ # set insert method
+ if method is None:
+ exec_insert = self._execute_insert
+ elif method == 'multi':
+ exec_insert = self._execute_insert_multi
+ elif callable(method):
+ exec_insert = partial(method, self)
+ else:
+ raise ValueError('Invalid parameter `method`: {}'.format(method))
- def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
@@ -639,7 +679,7 @@ def insert(self, chunksize=None):
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
- self._execute_insert(conn, keys, chunk_iter)
+ exec_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
@@ -1085,7 +1125,8 @@ def read_query(self, sql, index_col=None, coerce_float=True,
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None, schema=None, chunksize=None, dtype=None):
+ index_label=None, schema=None, chunksize=None, dtype=None,
+ method=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -1115,7 +1156,17 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
+ method : {None', 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ * None : Uses standard SQL ``INSERT`` clause (one per row).
+ * 'multi': Pass multiple values in a single ``INSERT`` clause.
+ * callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method <io.sql.method>`.
+ .. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
@@ -1131,7 +1182,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
- table.insert(chunksize)
+ table.insert(chunksize, method=method)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
@@ -1442,7 +1493,8 @@ def _fetchall_as_list(self, cur):
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None, schema=None, chunksize=None, dtype=None):
+ index_label=None, schema=None, chunksize=None, dtype=None,
+ method=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -1471,7 +1523,17 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
+ method : {None, 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ * None : Uses standard SQL ``INSERT`` clause (one per row).
+ * 'multi': Pass multiple values in a single ``INSERT`` clause.
+ * callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method <io.sql.method>`.
+ .. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
@@ -1486,7 +1548,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
- table.insert(chunksize)
+ table.insert(chunksize, method)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index eeeb55cb8e70c..c346103a70c98 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -375,12 +375,16 @@ def _read_sql_iris_named_parameter(self):
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
- def _to_sql(self):
+ def _to_sql(self, method=None):
self.drop_table('test_frame1')
- self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
+ self.pandasSQL.to_sql(self.test_frame1, 'test_frame1', method=method)
assert self.pandasSQL.has_table('test_frame1')
+ num_entries = len(self.test_frame1)
+ num_rows = self._count_rows('test_frame1')
+ assert num_rows == num_entries
+
# Nuke table
self.drop_table('test_frame1')
@@ -434,6 +438,25 @@ def _to_sql_append(self):
assert num_rows == num_entries
self.drop_table('test_frame1')
+ def _to_sql_method_callable(self):
+ check = [] # used to double check function below is really being used
+
+ def sample(pd_table, conn, keys, data_iter):
+ check.append(1)
+ data = [dict(zip(keys, row)) for row in data_iter]
+ conn.execute(pd_table.table.insert(), data)
+ self.drop_table('test_frame1')
+
+ self.pandasSQL.to_sql(self.test_frame1, 'test_frame1', method=sample)
+ assert self.pandasSQL.has_table('test_frame1')
+
+ assert check == [1]
+ num_entries = len(self.test_frame1)
+ num_rows = self._count_rows('test_frame1')
+ assert num_rows == num_entries
+ # Nuke table
+ self.drop_table('test_frame1')
+
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
@@ -1193,7 +1216,7 @@ def setup_connect(self):
pytest.skip(
"Can't connect to {0} server".format(self.flavor))
- def test_aread_sql(self):
+ def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
@@ -1217,6 +1240,12 @@ def test_to_sql_replace(self):
def test_to_sql_append(self):
self._to_sql_append()
+ def test_to_sql_method_multi(self):
+ self._to_sql(method='multi')
+
+ def test_to_sql_method_callable(self):
+ self._to_sql_method_callable()
+
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
@@ -1930,6 +1959,36 @@ def test_schema_support(self):
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
+ def test_copy_from_callable_insertion_method(self):
+ # GH 8953
+ # Example in io.rst found under _io.sql.method
+ # not available in sqlite, mysql
+ def psql_insert_copy(table, conn, keys, data_iter):
+ # gets a DBAPI connection that can provide a cursor
+ dbapi_conn = conn.connection
+ with dbapi_conn.cursor() as cur:
+ s_buf = compat.StringIO()
+ writer = csv.writer(s_buf)
+ writer.writerows(data_iter)
+ s_buf.seek(0)
+
+ columns = ', '.join('"{}"'.format(k) for k in keys)
+ if table.schema:
+ table_name = '{}.{}'.format(table.schema, table.name)
+ else:
+ table_name = table.name
+
+ sql_query = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
+ table_name, columns)
+ cur.copy_expert(sql=sql_query, file=s_buf)
+
+ expected = DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2],
+ 'col3': ['a', 'n']})
+ expected.to_sql('test_copy_insert', self.conn, index=False,
+ method=psql_insert_copy)
+ result = sql.read_sql_table('test_copy_insert', self.conn)
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.single
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
| …953)
- [x] closes #8953
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21401 | 2018-06-09T11:01:04Z | 2018-12-28T21:03:13Z | 2018-12-28T21:03:13Z | 2019-01-04T11:20:54Z |
MAINT: Deprecate encoding from stata reader/writer | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index de985d4db5fa3..68c1839221508 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -45,7 +45,7 @@ Other API Changes
Deprecations
~~~~~~~~~~~~
--
+- :meth:`DataFrame.to_stata`, :meth:`read_stata`, :class:`StataReader` and :class:`StataWriter` have deprecated the ``encoding`` argument. The encoding of a Stata dta file is determined by the file type and cannot be changed (:issue:`21244`).
-
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ca572e2e56b6c..0985de3126c5a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -80,7 +80,8 @@
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
- rewrite_axis_style_signature)
+ rewrite_axis_style_signature,
+ deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
@@ -1764,6 +1765,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
+ @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
@@ -1869,9 +1871,8 @@ def to_stata(self, fname, convert_dates=None, write_index=True,
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
- encoding=encoding, byteorder=byteorder,
- time_stamp=time_stamp, data_label=data_label,
- write_index=write_index,
+ byteorder=byteorder, time_stamp=time_stamp,
+ data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 8584e1f0e3f14..b2a5bec2a4837 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -33,11 +33,7 @@
from pandas.core.series import Series
from pandas.io.common import (get_filepath_or_buffer, BaseIterator,
_stringify_path)
-from pandas.util._decorators import Appender
-from pandas.util._decorators import deprecate_kwarg
-
-VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
- 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
+from pandas.util._decorators import Appender, deprecate_kwarg
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
@@ -169,6 +165,7 @@
@Appender(_read_stata_doc)
+@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index_col=None,
@@ -952,6 +949,7 @@ def __init__(self):
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
+ @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index_col=None,
@@ -970,7 +968,7 @@ def __init__(self, path_or_buf, convert_dates=True,
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
- self._encoding = encoding
+ self._encoding = None
self._chunksize = chunksize
# State variables for the file
@@ -1962,17 +1960,14 @@ class StataWriter(StataParser):
_max_string_length = 244
+ @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
- if encoding is not None:
- if encoding not in VALID_ENCODINGS:
- raise ValueError('Unknown encoding. Only latin-1 and ascii '
- 'supported.')
- self._encoding = encoding
+ self._encoding = 'latin-1'
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
@@ -2731,6 +2726,7 @@ class StataWriter117(StataWriter):
_max_string_length = 2045
+ @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, convert_strl=None):
@@ -2738,9 +2734,10 @@ def __init__(self, fname, data, convert_dates=None, write_index=True,
self._convert_strl = [] if convert_strl is None else convert_strl[:]
super(StataWriter117, self).__init__(fname, data, convert_dates,
- write_index, encoding, byteorder,
- time_stamp, data_label,
- variable_labels)
+ write_index, byteorder=byteorder,
+ time_stamp=time_stamp,
+ data_label=data_label,
+ variable_labels=variable_labels)
self._map = None
self._strl_blob = None
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index e5585902a9dd6..bfb72be80400e 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -361,7 +361,8 @@ def test_encoding(self, version):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
- encoded = read_stata(self.dta_encoding, encoding="latin-1")
+ with tm.assert_produces_warning(FutureWarning):
+ encoded = read_stata(self.dta_encoding, encoding='latin-1')
result = encoded.kreis1849[0]
expected = raw.kreis1849[0]
@@ -369,8 +370,9 @@ def test_encoding(self, version):
assert isinstance(result, compat.string_types)
with tm.ensure_clean() as path:
- encoded.to_stata(path, encoding='latin-1',
- write_index=False, version=version)
+ with tm.assert_produces_warning(FutureWarning):
+ encoded.to_stata(path, write_index=False, version=version,
+ encoding='latin-1')
reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
@@ -1349,13 +1351,6 @@ def test_out_of_range_float(self):
assert 'ColumnTooBig' in cm.exception
assert 'infinity' in cm.exception
- def test_invalid_encoding(self):
- # GH15723, validate encoding
- original = self.read_csv(self.csv3)
- with pytest.raises(ValueError):
- with tm.ensure_clean() as path:
- original.to_stata(path, encoding='utf-8')
-
def test_path_pathlib(self):
df = tm.makeDataFrame()
df.index.name = 'index'
| Deprecate the encoding parameter from all Stata reading and writing
methods and classes. The encoding depends only on the file format and
cannot be changed by users.
- [X] closes #21244
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21400 | 2018-06-09T10:22:35Z | 2018-06-12T00:05:45Z | 2018-06-12T00:05:45Z | 2018-09-20T15:49:38Z |
BUG to_clipboard passes the wrong sep to to_csv | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index de985d4db5fa3..26478601fcde8 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -147,7 +147,7 @@ MultiIndex
I/O
^^^
--
+- Bug in :func:`to_clipboard` which passes r'\t' instead of '\t' to to_csv (:issue:`21385`)
-
-
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index dcc221ce978b3..21c627c49b074 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -99,7 +99,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
if excel:
try:
if sep is None:
- sep = r'\t'
+ sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
| Originally, the sep is `\t` as to_csv expects. A commit changes the sep to `r'\t',` causing an exception (because `csv.writer` expects the delimiter to be a one-character string). The exception is suppressed (with `pass` in the `except` block), causing the data frame to be passed to `to_string` downstream (even though it should be handled by `to_csv`)
Changing `r't'` back to `\t` is a quick fix, but just `pass` in the `except` block seems like the more problematic root cause (that I'm not handling here).
- [x ] closes #21385
- [ ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21398 | 2018-06-09T02:57:37Z | 2018-06-09T16:04:22Z | null | 2023-05-11T01:17:56Z |
DOC: Loading sphinxcontrib.spelling to sphinx only if it's available (#21396) | diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml
index f9f9208519d61..5733857b55dd4 100644
--- a/ci/environment-dev.yaml
+++ b/ci/environment-dev.yaml
@@ -13,3 +13,4 @@ dependencies:
- pytz
- setuptools>=24.2.0
- sphinx
+ - sphinxcontrib-spelling
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index 3430e778a4573..83ee30b52071d 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -9,3 +9,4 @@ python-dateutil>=2.5.0
pytz
setuptools>=24.2.0
sphinx
+sphinxcontrib-spelling
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 97081bec863b7..909bd5a80b76e 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -16,9 +16,11 @@
import re
import inspect
import importlib
-from sphinx.ext.autosummary import _import_by_name
+import logging
import warnings
+from sphinx.ext.autosummary import _import_by_name
+logger = logging.getLogger(__name__)
try:
raw_input # Python 2
@@ -73,9 +75,16 @@
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
'nbsphinx',
- 'sphinxcontrib.spelling'
]
+try:
+ import sphinxcontrib.spelling
+except ImportError as err:
+ logger.warn(('sphinxcontrib.spelling failed to import with error "{}". '
+ '`spellcheck` command is not available.'.format(err)))
+else:
+ extensions.append('sphinxcontrib.spelling')
+
exclude_patterns = ['**.ipynb_checkpoints']
spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt']
| - [X] closes #21396
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I think the optional dependencies go directly into the `.txt` files for pip and conda, and not into the yaml file. Please correct me if I'm wrong.
Seems like besides `sphinxcontrib-spelling`, a binary program `enchant` is required, and it's not available in conda for what I've seen. Not sure what's the right approach for it. | https://api.github.com/repos/pandas-dev/pandas/pulls/21397 | 2018-06-08T23:11:07Z | 2018-06-12T07:57:04Z | 2018-06-12T07:57:04Z | 2018-06-20T15:22:33Z |
Bugfix timedelta notimplemented eq | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 7a10e8d1073d0..308748a1856d8 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -193,6 +193,7 @@ Other Enhancements
- :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`).
The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`).
- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`)
+- Comparing :class:`Timedelta` with unknown types now return ``NotImplemented`` instead of ``False`` (:issue:`20829`)
- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`)
- :class:`DatetimeIndex` gained :attr:`DatetimeIndex.timetz` attribute. Returns local time with timezone information. (:issue:`21358`)
- :meth:`round`, :meth:`ceil`, and meth:`floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`)
@@ -1009,7 +1010,7 @@ Timedelta
- Bug in :class:`TimedeltaIndex` incorrectly allowing indexing with ``Timestamp`` object (:issue:`20464`)
- Fixed bug where subtracting :class:`Timedelta` from an object-dtyped array would raise ``TypeError`` (:issue:`21980`)
- Fixed bug in adding a :class:`DataFrame` with all-`timedelta64[ns]` dtypes to a :class:`DataFrame` with all-integer dtypes returning incorrect results instead of raising ``TypeError`` (:issue:`22696`)
--
+
Timezones
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 9c8be1901d1dc..b5b3abd01328c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -724,27 +724,12 @@ cdef class _Timedelta(timedelta):
if is_timedelta64_object(other):
other = Timedelta(other)
else:
- if op == Py_EQ:
- return False
- elif op == Py_NE:
- return True
-
- # only allow ==, != ops
- raise TypeError('Cannot compare type {cls} with '
- 'type {other}'
- .format(cls=type(self).__name__,
- other=type(other).__name__))
+ return NotImplemented
if util.is_array(other):
return PyObject_RichCompare(np.array([self]), other, op)
return PyObject_RichCompare(other, self, reverse_ops[op])
else:
- if op == Py_EQ:
- return False
- elif op == Py_NE:
- return True
- raise TypeError('Cannot compare type {cls} with type {other}'
- .format(cls=type(self).__name__,
- other=type(other).__name__))
+ return NotImplemented
return cmp_scalar(self.value, ots.value, op)
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 017606dc42d59..0cac1119f76b5 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -42,8 +42,10 @@ def test_ops_error_str(self):
with pytest.raises(TypeError):
left + right
- with pytest.raises(TypeError):
- left > right
+ # GH 20829: python 2 comparison naturally does not raise TypeError
+ if compat.PY3:
+ with pytest.raises(TypeError):
+ left > right
assert not left == right
assert left != right
@@ -103,6 +105,55 @@ def test_compare_timedelta_ndarray(self):
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
+ def test_compare_custom_object(self):
+ """Make sure non supported operations on Timedelta returns NonImplemented
+ and yields to other operand (GH20829)."""
+ class CustomClass(object):
+
+ def __init__(self, cmp_result=None):
+ self.cmp_result = cmp_result
+
+ def generic_result(self):
+ if self.cmp_result is None:
+ return NotImplemented
+ else:
+ return self.cmp_result
+
+ def __eq__(self, other):
+ return self.generic_result()
+
+ def __gt__(self, other):
+ return self.generic_result()
+
+ t = Timedelta('1s')
+
+ assert not (t == "string")
+ assert not (t == 1)
+ assert not (t == CustomClass())
+ assert not (t == CustomClass(cmp_result=False))
+
+ assert t < CustomClass(cmp_result=True)
+ assert not (t < CustomClass(cmp_result=False))
+
+ assert t == CustomClass(cmp_result=True)
+
+ @pytest.mark.skipif(compat.PY2,
+ reason="python 2 does not raise TypeError for \
+ comparisons of different types")
+ @pytest.mark.parametrize("val", [
+ "string", 1])
+ def test_compare_unknown_type(self, val):
+ # GH20829
+ t = Timedelta('1s')
+ with pytest.raises(TypeError):
+ t >= val
+ with pytest.raises(TypeError):
+ t > val
+ with pytest.raises(TypeError):
+ t <= val
+ with pytest.raises(TypeError):
+ t < val
+
class TestTimedeltas(object):
| - [x] closes #20829
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21394 | 2018-06-08T20:21:47Z | 2018-10-24T16:25:39Z | 2018-10-24T16:25:38Z | 2018-10-26T08:34:41Z |
TST: adding test cases for verifying correct values shown by pivot_table() #21378 | diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 3ec60d50f2792..ca95dde1a20c9 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -161,6 +161,24 @@ def test_pivot_with_non_observable_dropna(self, dropna):
tm.assert_frame_equal(result, expected)
+ # gh-21378
+ df = pd.DataFrame(
+ {'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
+ categories=['low', 'high', 'left'],
+ ordered=True),
+ 'B': range(5)})
+
+ result = df.pivot_table(index='A', values='B', dropna=dropna)
+ expected = pd.DataFrame(
+ {'B': [2, 3, 0]},
+ index=pd.Index(
+ pd.Categorical.from_codes([0, 1, 2],
+ categories=['low', 'high', 'left'],
+ ordered=True),
+ name='A'))
+
+ tm.assert_frame_equal(result, expected)
+
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
| BUG: Incorrect values shown by pivot_table() #21378
- [x] closes #21378
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Just added a new test to verify the included patch in `master` branch | https://api.github.com/repos/pandas-dev/pandas/pulls/21393 | 2018-06-08T19:31:00Z | 2018-06-15T17:33:36Z | 2018-06-15T17:33:35Z | 2018-06-15T17:33:43Z |
DOC: Whatsnew for string coercion fix [ci skip] | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 07120e26b4ecd..2d50235885042 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -100,6 +100,8 @@ Bug Fixes
- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`)
- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`)
- Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+- Bug in :class:`DataFrame` and :class:`Series` constructors not coercing values to strings when ``dtype=str`` is passed (:issue:`21083`)
+
**Sparse**
| xref https://github.com/pandas-dev/pandas/pull/21366/files/a94d3995160a07a09693a883ceb0641e82ca7bdd#r194103331
That PR had
tests (https://github.com/pandas-dev/pandas/pull/21366/files/a94d3995160a07a09693a883ceb0641e82ca7bdd#diff-10e7ab03eb0363417c9b042860d3ba73R814)
but no release note for this specifically, since I didn't know it was a bug.
[ci skip] | https://api.github.com/repos/pandas-dev/pandas/pulls/21392 | 2018-06-08T19:12:50Z | 2018-06-08T20:10:21Z | null | 2023-05-11T01:17:56Z |
Release 0.23.1 backports part I | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index ae1d7029217a4..5464e7cba22c3 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -173,3 +173,23 @@ def setup(self, dtype):
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
+
+
+class IsMonotonic(object):
+
+ def setup(self):
+ N = 1000
+ self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
+ self.s = pd.Series(self.c)
+
+ def time_categorical_index_is_monotonic_increasing(self):
+ self.c.is_monotonic_increasing
+
+ def time_categorical_index_is_monotonic_decreasing(self):
+ self.c.is_monotonic_decreasing
+
+ def time_categorical_series_is_monotonic_increasing(self):
+ self.s.is_monotonic_increasing
+
+ def time_categorical_series_is_monotonic_decreasing(self):
+ self.s.is_monotonic_decreasing
diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
index fe057e714761e..006276ba1a65f 100644
--- a/ci/travis-36.yaml
+++ b/ci/travis-36.yaml
@@ -18,12 +18,10 @@ dependencies:
- numexpr
- numpy
- openpyxl
- - pandas-datareader
- psycopg2
- pyarrow
- pymysql
- pytables
- - python-dateutil
- python-snappy
- python=3.6*
- pytz
@@ -45,3 +43,5 @@ dependencies:
- pip:
- brotlipy
- coverage
+ - pandas-datareader
+ - python-dateutil
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index c81842d3d9212..ec517d3e07bdf 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -924,6 +924,55 @@ bins, with ``NaN`` representing a missing value similar to other dtypes.
pd.cut([0, 3, 5, 1], bins=c.categories)
+
+Generating Ranges of Intervals
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If we need intervals on a regular frequency, we can use the :func:`interval_range` function
+to create an ``IntervalIndex`` using various combinations of ``start``, ``end``, and ``periods``.
+The default frequency for ``interval_range`` is a 1 for numeric intervals, and calendar day for
+datetime-like intervals:
+
+.. ipython:: python
+
+ pd.interval_range(start=0, end=5)
+
+ pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4)
+
+ pd.interval_range(end=pd.Timedelta('3 days'), periods=3)
+
+The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety
+of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals:
+
+.. ipython:: python
+
+ pd.interval_range(start=0, periods=5, freq=1.5)
+
+ pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W')
+
+ pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H')
+
+Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals
+are closed on. Intervals are closed on the right side by default.
+
+.. ipython:: python
+
+ pd.interval_range(start=0, end=4, closed='both')
+
+ pd.interval_range(start=0, end=4, closed='neither')
+
+.. versionadded:: 0.23.0
+
+Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced
+intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements
+in the resulting ``IntervalIndex``:
+
+.. ipython:: python
+
+ pd.interval_range(start=0, end=6, periods=4)
+
+ pd.interval_range(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-02-28'), periods=3)
+
Miscellaneous indexing FAQ
--------------------------
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 5f3a01f0725d4..745810704f665 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -352,8 +352,8 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the
TimedeltaIndex
--------------
-To generate an index with time delta, you can use either the ``TimedeltaIndex`` or
-the ``timedelta_range`` constructor.
+To generate an index with time delta, you can use either the :class:`TimedeltaIndex` or
+the :func:`timedelta_range` constructor.
Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``,
or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values.
@@ -363,13 +363,47 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss
pd.TimedeltaIndex(['1 days', '1 days, 00:00:05',
np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)])
-Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``:
+Generating Ranges of Time Deltas
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Similar to :func:`date_range`, you can construct regular ranges of a ``TimedeltaIndex``
+using :func:`timedelta_range`. The default frequency for ``timedelta_range`` is
+calendar day:
+
+.. ipython:: python
+
+ pd.timedelta_range(start='1 days', periods=5)
+
+Various combinations of ``start``, ``end``, and ``periods`` can be used with
+``timedelta_range``:
+
+.. ipython:: python
+
+ pd.timedelta_range(start='1 days', end='5 days')
+
+ pd.timedelta_range(end='10 days', periods=4)
+
+The ``freq`` parameter can passed a variety of :ref:`frequency aliases <timeseries.offset_aliases>`:
.. ipython:: python
- pd.timedelta_range(start='1 days', periods=5, freq='D')
pd.timedelta_range(start='1 days', end='2 days', freq='30T')
+ pd.timedelta_range(start='1 days', periods=5, freq='2D5H')
+
+
+.. versionadded:: 0.23.0
+
+Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced
+timedeltas from ``start`` to ``end`` inclusively, with ``periods`` number of elements
+in the resulting ``TimedeltaIndex``:
+
+.. ipython:: python
+
+ pd.timedelta_range('0 days', '4 days', periods=5)
+
+ pd.timedelta_range('0 days', '4 days', periods=10)
+
Using the TimedeltaIndex
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 73e3e721aad71..1b0cf86995a39 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -393,6 +393,18 @@ of those specified will not be generated:
pd.bdate_range(start=start, periods=20)
+.. versionadded:: 0.23.0
+
+Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced
+dates from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the
+resulting ``DatetimeIndex``:
+
+.. ipython:: python
+
+ pd.date_range('2018-01-01', '2018-01-05', periods=5)
+
+ pd.date_range('2018-01-01', '2018-01-05', periods=10)
+
.. _timeseries.custom-freq-ranges:
Custom Frequency Ranges
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 3f89de1dc22d8..feba9d856789b 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1,6 +1,6 @@
.. _whatsnew_0230:
-v0.23.0 (May 15, 2017)
+v0.23.0 (May 15, 2018)
----------------------
This is a major release from 0.22.0 and includes a number of API changes,
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
new file mode 100644
index 0000000000000..b3c1dbc86525d
--- /dev/null
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -0,0 +1,111 @@
+.. _whatsnew_0231:
+
+v0.23.1
+-------
+
+This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes
+and bug fixes. We recommend that all users upgrade to this version.
+
+.. contents:: What's new in v0.23.1
+ :local:
+ :backlinks: none
+
+.. _whatsnew_0231.enhancements:
+
+New features
+~~~~~~~~~~~~
+
+
+.. _whatsnew_0231.deprecations:
+
+Deprecations
+~~~~~~~~~~~~
+
+-
+-
+
+.. _whatsnew_0231.performance:
+
+Performance Improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`)
+-
+-
+
+Documentation Changes
+~~~~~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. _whatsnew_0231.bug_fixes:
+
+Bug Fixes
+~~~~~~~~~
+
+Groupby/Resample/Rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
+- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
+- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
+
+Strings
+^^^^^^^
+
+- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`)
+
+Timedelta
+^^^^^^^^^
+- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`)
+
+Categorical
+^^^^^^^^^^^
+
+- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
+
+Sparse
+^^^^^^
+
+- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`)
+
+Conversion
+^^^^^^^^^^
+
+-
+-
+
+Indexing
+^^^^^^^^
+
+- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
+- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
+- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
+-
+
+I/O
+^^^
+
+- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
+- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
+-
+
+Plotting
+^^^^^^^^
+
+-
+-
+
+Reshaping
+^^^^^^^^^
+
+- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
+-
+
+Other
+^^^^^
+
+- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`)
+- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 43afd1e0f5969..a6dbaff17e543 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -297,7 +297,8 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
# Make sure all arrays are the same size
assert N == len(labels) == len(mask)
- sorted_labels = np.argsort(labels).astype(np.int64, copy=False)
+ sorted_labels = np.argsort(labels, kind='mergesort').astype(
+ np.int64, copy=False)
if direction == 'bfill':
sorted_labels = sorted_labels[::-1]
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 6a33e4a09476d..b3e9b7c9e69ee 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -418,7 +418,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
bint is_datetimelike, object ties_method,
bint ascending, bint pct, object na_option):
"""
- Provides the rank of values within each group.
+ Provides the rank of values within each group.
Parameters
----------
@@ -451,8 +451,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
"""
cdef:
TiebreakEnumType tiebreak
- Py_ssize_t i, j, N, K, val_start=0, grp_start=0, dups=0, sum_ranks=0
- Py_ssize_t grp_vals_seen=1, grp_na_count=0
+ Py_ssize_t i, j, N, K, grp_start=0, dups=0, sum_ranks=0
+ Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0
ndarray[int64_t] _as
ndarray[float64_t, ndim=2] grp_sizes
ndarray[{{c_type}}] masked_vals
@@ -563,6 +563,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
dups = sum_ranks = 0
val_start = i
grp_vals_seen += 1
+ grp_tie_count +=1
# Similar to the previous conditional, check now if we are moving
# to a new group. If so, keep track of the index where the new
@@ -571,11 +572,16 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# (used by pct calculations later). also be sure to reset any of
# the items helping to calculate dups
if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]:
- for j in range(grp_start, i + 1):
- grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ if tiebreak != TIEBREAK_DENSE:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ else:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = (grp_tie_count -
+ (grp_na_count > 0))
dups = sum_ranks = 0
grp_na_count = 0
- val_start = i + 1
+ grp_tie_count = 0
grp_start = i + 1
grp_vals_seen = 1
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index d17d4e7139d72..e2b0b33053f83 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -202,22 +202,22 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
if unit == 'D' or unit == 'd':
m = 1000000000L * 86400
- p = 6
+ p = 9
elif unit == 'h':
m = 1000000000L * 3600
- p = 6
+ p = 9
elif unit == 'm':
m = 1000000000L * 60
- p = 6
+ p = 9
elif unit == 's':
m = 1000000000L
- p = 6
+ p = 9
elif unit == 'ms':
m = 1000000L
- p = 3
+ p = 6
elif unit == 'us':
m = 1000L
- p = 0
+ p = 3
elif unit == 'ns' or unit is None:
m = 1L
p = 0
@@ -231,10 +231,10 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
base = <int64_t> ts
- frac = ts -base
+ frac = ts - base
if p:
frac = round(frac, p)
- return <int64_t> (base *m) + <int64_t> (frac *m)
+ return <int64_t> (base * m) + <int64_t> (frac * m)
cdef inline _decode_if_necessary(object ts):
@@ -760,7 +760,32 @@ cdef class _Timedelta(timedelta):
@property
def delta(self):
- """ return out delta in ns (for internal compat) """
+ """
+ Return the timedelta in nanoseconds (ns), for internal compatibility.
+
+ Returns
+ -------
+ int
+ Timedelta in nanoseconds.
+
+ Examples
+ --------
+ >>> td = pd.Timedelta('1 days 42 ns')
+ >>> td.delta
+ 86400000000042
+
+ >>> td = pd.Timedelta('3 s')
+ >>> td.delta
+ 3000000000
+
+ >>> td = pd.Timedelta('3 ms 5 us')
+ >>> td.delta
+ 3005000
+
+ >>> td = pd.Timedelta(42, unit='ns')
+ >>> td.delta
+ 42
+ """
return self.value
@property
@@ -791,9 +816,32 @@ cdef class _Timedelta(timedelta):
@property
def nanoseconds(self):
"""
- Number of nanoseconds (>= 0 and less than 1 microsecond).
+ Return the number of nanoseconds (n), where 0 <= n < 1 microsecond.
+
+ Returns
+ -------
+ int
+ Number of nanoseconds.
+
+ See Also
+ --------
+ Timedelta.components : Return all attributes with assigned values
+ (i.e. days, hours, minutes, seconds, milliseconds, microseconds,
+ nanoseconds).
+
+ Examples
+ --------
+ **Using string input**
+
+ >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns')
+ >>> td.nanoseconds
+ 42
+
+ **Using integer input**
- .components will return the shown components
+ >>> td = pd.Timedelta(42, unit='ns')
+ >>> td.nanoseconds
+ 42
"""
self._ensure_components()
return self._ns
@@ -1198,7 +1246,7 @@ class Timedelta(_Timedelta):
deprecated. Use 'array // timedelta.value' instead.
If you want to obtain epochs from an array of timestamps,
you can rather use
- 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'.
+ '(array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'.
""")
warnings.warn(msg, FutureWarning)
return other // self.value
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 12517372fedd1..5ae22694d0da7 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -425,7 +425,7 @@ def raise_with_traceback(exc, traceback=Ellipsis):
# In Python 3.7, the private re._pattern_type is removed.
# Python 3.5+ have typing.re.Pattern
-if PY35:
+if PY36:
import typing
re_type = typing.re.Pattern
else:
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index c638b9e4ea117..7a853d575aa69 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -12,7 +12,8 @@
class DirNamesMixin(object):
_accessors = frozenset([])
- _deprecations = frozenset(['asobject'])
+ _deprecations = frozenset(
+ ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides'])
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index abcb9ae3494b5..a1a8f098b582e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -12,6 +12,7 @@
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
@@ -1751,7 +1752,7 @@ def fillna(self, value=None, method=None, limit=None):
values[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
- elif is_scalar(value):
+ elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
diff --git a/pandas/core/base.py b/pandas/core/base.py
index fa78c89ed4ee7..c331ead8d2fef 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -114,7 +114,7 @@ def _reset_cache(self, key=None):
def __sizeof__(self):
"""
- Generates the total memory usage for a object that returns
+ Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
@@ -590,9 +590,10 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
# multiples
else:
- for col in obj:
+ for index, col in enumerate(obj):
try:
- colg = self._gotitem(col, ndim=1, subset=obj[col])
+ colg = self._gotitem(col, ndim=1,
+ subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
@@ -675,7 +676,6 @@ def _gotitem(self, key, ndim, subset=None):
subset : object, default None
subset to act on
"""
-
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
diff --git a/pandas/core/common.py b/pandas/core/common.py
index b9182bfd2cbe2..1de8269c9a0c6 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -55,8 +55,11 @@ def flatten(l):
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
- if obj.name != name:
- return None
+ try:
+ if obj.name != name:
+ name = None
+ except ValueError:
+ name = None
return name
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index dccc840f5affd..9f6e834f0a25f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1774,8 +1774,11 @@ def to_stata(self, fname, convert_dates=None, write_index=True,
Parameters
----------
- fname : str or buffer
- String path of file-like object.
+ fname : path (string), buffer or path object
+ string, path object (pathlib.Path or py._path.local.LocalPath) or
+ object implementing a binary write() functions. If using a buffer
+ then the buffer will not be automatically closed after the file
+ data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
@@ -3718,7 +3721,7 @@ def rename(self, *args, **kwargs):
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
- Whether to return a new %(klass)s. If True then value of copy is
+ Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
@@ -4454,7 +4457,10 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
- if level:
+ # make sure that the axis is lexsorted to start
+ # if not we need to reconstruct to get the correct indexer
+ labels = labels._sort_levels_monotonic()
+ if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
@@ -4462,9 +4468,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
- # make sure that the axis is lexsorted to start
- # if not we need to reconstruct to get the correct indexer
- labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
@@ -5731,7 +5734,12 @@ def diff(self, periods=1, axis=0):
# ----------------------------------------------------------------------
# Function application
- def _gotitem(self, key, ndim, subset=None):
+ def _gotitem(self,
+ key, # type: Union[str, List[str]]
+ ndim, # type: int
+ subset=None # type: Union[Series, DataFrame, None]
+ ):
+ # type: (...) -> Union[Series, DataFrame]
"""
sub-classes to define
return a sliced object
@@ -5746,9 +5754,11 @@ def _gotitem(self, key, ndim, subset=None):
"""
if subset is None:
subset = self
+ elif subset.ndim == 1: # is Series
+ return subset
# TODO: _shallow_copy(subset)?
- return self[key]
+ return subset[key]
_agg_doc = dedent("""
The aggregation operations are always performed over an axis, either the
@@ -7079,6 +7089,9 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ numeric_only : boolean, default True
+ If False, the quantile of datetime and timedelta data will be
+ computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
@@ -7106,7 +7119,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
- columns=['a', 'b'])
+ columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
@@ -7116,6 +7129,20 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0.1 1.3 3.7
0.5 2.5 55.0
+ Specifying `numeric_only=False` will also compute the quantile of
+ datetime and timedelta data.
+
+ >>> df = pd.DataFrame({'A': [1, 2],
+ 'B': [pd.Timestamp('2010'),
+ pd.Timestamp('2011')],
+ 'C': [pd.Timedelta('1 days'),
+ pd.Timedelta('2 days')]})
+ >>> df.quantile(0.5, numeric_only=False)
+ A 1.5
+ B 2010-07-02 12:00:00
+ C 1 days 12:00:00
+ Name: 0.5, dtype: object
+
See Also
--------
pandas.core.window.Rolling.quantile
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index df39eb5fd8312..90238af9b3632 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1384,7 +1384,8 @@ def set_names(self, names, level=None, inplace=False):
names=[u'baz', u'bar'])
"""
- if level is not None and self.nlevels == 1:
+ from .multi import MultiIndex
+ if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 3ffef5804acf7..78b7ae7054248 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -382,11 +382,11 @@ def is_unique(self):
@property
def is_monotonic_increasing(self):
- return Index(self.codes).is_monotonic_increasing
+ return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
- return Index(self.codes).is_monotonic_decreasing
+ return self._engine.is_monotonic_decreasing
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 408a8cc435b63..8f8d8760583ce 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1572,6 +1572,10 @@ def interval_range(start=None, end=None, periods=None, freq=None,
periods += 1
if is_number(endpoint):
+ # force consistency between start/end/freq (lower end if freq skips it)
+ if com._all_not_none(start, end, freq):
+ end -= (end - start) % freq
+
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
@@ -1580,10 +1584,6 @@ def interval_range(start=None, end=None, periods=None, freq=None,
elif end is None:
end = start + (periods - 1) * freq
- # force end to be consistent with freq (lower if freq skips end)
- if freq is not None:
- end -= end % freq
-
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 0829aa8f5a509..2757e0797a410 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -725,7 +725,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
- String to append DataFrame column names
+ String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0e2ae22f35af7..c5caafa07fb8e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1195,12 +1195,13 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = com._default_index(len(self))
- if level is not None and isinstance(self.index, MultiIndex):
+ if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
- if len(level) < len(self.index.levels):
- new_index = self.index.droplevel(level)
+ if isinstance(self.index, MultiIndex):
+ if len(level) < self.index.nlevels:
+ new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
@@ -2616,7 +2617,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
axis = self._get_axis_number(axis)
index = self.index
- if level:
+ if level is not None:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
@@ -3268,7 +3269,7 @@ def rename(self, index=None, **kwargs):
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
- Whether to return a new %(klass)s. If True then value of copy is
+ Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 5532d7522cd2d..ff58f7d104ff9 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -290,6 +290,7 @@ def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
+ object_state[2] = self.sp_values.__reduce__()[2]
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
@@ -339,6 +340,10 @@ def values(self):
output.put(int_index.indices, self)
return output
+ @property
+ def shape(self):
+ return (len(self),)
+
@property
def sp_values(self):
# caching not an option, leaks memory
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 81d775157cf62..5d50c45fe7eca 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -241,7 +241,7 @@ def str_count(arr, pat, flags=0):
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
- >>> s.str.count('\$')
+ >>> s.str.count('\\$')
0 1
1 0
2 1
@@ -358,7 +358,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
Returning any digit using regular expression.
- >>> s1.str.contains('\d', regex=True)
+ >>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 0827216975f15..a492b7c0b8e8e 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -5,7 +5,7 @@
import codecs
import mmap
from contextlib import contextmanager, closing
-from zipfile import ZipFile
+import zipfile
from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
@@ -428,7 +428,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
return f, handles
-class BytesZipFile(ZipFile, BytesIO):
+class BytesZipFile(zipfile.ZipFile, BytesIO):
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
@@ -437,10 +437,10 @@ class BytesZipFile(ZipFile, BytesIO):
bytes strings into a member of the archive.
"""
# GH 17778
- def __init__(self, file, mode='r', **kwargs):
+ def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs):
if mode in ['wb', 'rb']:
mode = mode.replace('b', '')
- super(BytesZipFile, self).__init__(file, mode, **kwargs)
+ super(BytesZipFile, self).__init__(file, mode, compression, **kwargs)
def write(self, data):
super(BytesZipFile, self).writestr(self.filename, data)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 8f91c7a497e2d..2797924985c70 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1758,11 +1758,25 @@ def value_labels(self):
return self.value_label_dict
-def _open_file_binary_write(fname, encoding):
+def _open_file_binary_write(fname):
+ """
+ Open a binary file or no-op if file-like
+
+ Parameters
+ ----------
+ fname : string path, path object or buffer
+
+ Returns
+ -------
+ file : file-like object
+ File object supporting write
+ own : bool
+ True if the file was created, otherwise False
+ """
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
- return fname
- return open(fname, "wb")
+ return fname, False
+ return open(fname, "wb"), True
def _set_endianness(endianness):
@@ -1899,7 +1913,9 @@ class StataWriter(StataParser):
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
- object implementing a binary write() functions.
+ object implementing a binary write() functions. If using a buffer
+ then the buffer will not be automatically closed after the file
+ is written.
.. versionadded:: 0.23.0 support for pathlib, py.path.
@@ -1970,6 +1986,7 @@ def __init__(self, fname, data, convert_dates=None, write_index=True,
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
+ self._own_file = True
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
@@ -2183,9 +2200,7 @@ def _prepare_pandas(self, data):
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
- self._file = _open_file_binary_write(
- self._fname, self._encoding or self._default_encoding
- )
+ self._file, self._own_file = _open_file_binary_write(self._fname)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
@@ -2205,6 +2220,23 @@ def write_file(self):
self._write_file_close_tag()
self._write_map()
finally:
+ self._close()
+
+ def _close(self):
+ """
+ Close the file if it was created by the writer.
+
+ If a buffer or file-like object was passed in, for example a GzipFile,
+ then leave this file open for the caller to close. In either case,
+ attempt to flush the file contents to ensure they are written to disk
+ (if supported)
+ """
+ # Some file-like objects might not support flush
+ try:
+ self._file.flush()
+ except AttributeError:
+ pass
+ if self._own_file:
self._file.close()
def _write_map(self):
@@ -2374,7 +2406,7 @@ def _prepare_data(self):
def _write_data(self):
data = self.data
- data.tofile(self._file)
+ self._file.write(data.tobytes())
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
@@ -2641,7 +2673,9 @@ class StataWriter117(StataWriter):
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
- object implementing a binary write() functions.
+ object implementing a binary write() functions. If using a buffer
+ then the buffer will not be automatically closed after the file
+ is written.
data : DataFrame
Input to save
convert_dates : dict
@@ -2879,7 +2913,7 @@ def _write_data(self):
self._update_map('data')
data = self.data
self._file.write(b'<data>')
- data.tofile(self._file)
+ self._file.write(data.tobytes())
self._file.write(b'</data>')
def _write_strls(self):
diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py
index 5133c97d8b590..c78f02245a5b4 100644
--- a/pandas/tests/categorical/test_missing.py
+++ b/pandas/tests/categorical/test_missing.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import collections
+
import numpy as np
import pytest
@@ -68,3 +70,16 @@ def test_fillna_raises(self, fillna_kwargs, msg):
with tm.assert_raises_regex(ValueError, msg):
cat.fillna(**fillna_kwargs)
+
+ @pytest.mark.parametrize("named", [True, False])
+ def test_fillna_iterable_category(self, named):
+ # https://github.com/pandas-dev/pandas/issues/21097
+ if named:
+ Point = collections.namedtuple("Point", "x y")
+ else:
+ Point = lambda *args: args # tuple
+ cat = Categorical([Point(0, 0), Point(0, 1), None])
+ result = cat.fillna(Point(0, 0))
+ expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
+
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index 32cf29818e069..af26d83df3fe2 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -18,6 +18,11 @@ def test_isna(self, data_missing):
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
+ # GH 21189
+ result = pd.Series(data_missing).drop([0, 1]).isna()
+ expected = pd.Series([], dtype=bool)
+ self.assert_series_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index e9431bd0c233c..90f0181beab0d 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -90,7 +90,7 @@ def nbytes(self):
return 0
def isna(self):
- return np.array([x.is_nan() for x in self._data])
+ return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 88bb66f38b35c..10be7836cb8d7 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -108,7 +108,8 @@ def nbytes(self):
return sys.getsizeof(self.data)
def isna(self):
- return np.array([x == self.dtype.na_value for x in self.data])
+ return np.array([x == self.dtype.na_value for x in self.data],
+ dtype=bool)
def take(self, indexer, allow_fill=False, fill_value=None):
# re-implement here, since NumPy has trouble setting
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index ac46f02d00773..dfb2961befe35 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -554,6 +554,14 @@ def test_apply_non_numpy_dtype(self):
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
+ def test_apply_dup_names_multi_agg(self):
+ # GH 21063
+ df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
+ expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
+ result = df.agg(['min'])
+
+ tm.assert_frame_equal(result, expected)
+
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index d89731dc09044..d05321abefca6 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -861,6 +861,23 @@ def test_stack_preserve_categorical_dtype(self):
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("level", [0, 'baz'])
+ def test_unstack_swaplevel_sortlevel(self, level):
+ # GH 20994
+ mi = pd.MultiIndex.from_product([[0], ['d', 'c']],
+ names=['bar', 'baz'])
+ df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A'])
+ df.columns.name = 'foo'
+
+ expected = pd.DataFrame([
+ [3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([
+ ('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[
+ 'baz', 'foo']))
+ expected.index.name = 'bar'
+
+ result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
+ tm.assert_frame_equal(result, expected)
+
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index b60eb89e87da5..599ae683f914b 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -550,18 +550,36 @@ def test_sort_index(self):
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
- def test_sort_index_multiindex(self):
+ @pytest.mark.parametrize("level", ['A', 0]) # GH 21052
+ def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
- mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC'))
- df = DataFrame([[1, 2], [3, 4]], mi)
+ mi = MultiIndex.from_tuples([
+ [2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
+
+ expected_mi = MultiIndex.from_tuples([
+ [1, 1, 1],
+ [2, 1, 2],
+ [2, 1, 3]], names=list('ABC'))
+ expected = pd.DataFrame([
+ [5, 6],
+ [3, 4],
+ [1, 2]], index=expected_mi)
+ result = df.sort_index(level=level)
+ assert_frame_equal(result, expected)
- # MI sort, but no level: sort_level has no effect
- mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
- df = DataFrame([[1, 2], [3, 4]], mi)
- result = df.sort_index(sort_remaining=False)
- expected = df.sort_index()
+ # sort_remaining=False
+ expected_mi = MultiIndex.from_tuples([
+ [1, 1, 1],
+ [2, 1, 3],
+ [2, 1, 2]], names=list('ABC'))
+ expected = pd.DataFrame([
+ [5, 6],
+ [1, 2],
+ [3, 4]], index=expected_mi)
+ result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 6ad8b4905abff..203c3c73bec94 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -59,9 +59,9 @@ def test_rank_apply():
('first', False, False, [3., 4., 1., 5., 2.]),
('first', False, True, [.6, .8, .2, 1., .4]),
('dense', True, False, [1., 1., 3., 1., 2.]),
- ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]),
+ ('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]),
('dense', False, False, [3., 3., 1., 3., 2.]),
- ('dense', False, True, [.6, .6, .2, .6, .4]),
+ ('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]),
])
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
@@ -126,7 +126,7 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
- [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats
+ [2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06'), np.nan, np.nan]
@@ -167,11 +167,11 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
('dense', True, 'keep', False,
[1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
('dense', True, 'keep', True,
- [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]),
+ [1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]),
('dense', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('dense', False, 'keep', True,
- [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
+ [3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]),
('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
('average', True, 'no_na', True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
@@ -198,10 +198,10 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
('dense', True, 'no_na', True,
- [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]),
+ [0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]),
('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
('dense', False, 'no_na', True,
- [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5])
+ [0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.])
])
def test_rank_args_missing(grps, vals, ties_method, ascending,
na_option, pct, exp):
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 626057c1ea760..7fccf1f57a886 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -721,6 +721,23 @@ def interweave(list_obj):
assert_frame_equal(result, exp)
+@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
+def test_pad_stable_sorting(fill_method):
+ # GH 21207
+ x = [0] * 20
+ y = [np.nan] * 10 + [1] * 10
+
+ if fill_method == 'bfill':
+ y = y[::-1]
+
+ df = pd.DataFrame({'x': x, 'y': y})
+ expected = df.copy()
+
+ result = getattr(df.groupby('x'), fill_method)()
+
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 45be3974dad63..8b0514764b0c0 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -650,6 +650,14 @@ def test_unit_mixed(self, cache):
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise', cache=cache)
+ @pytest.mark.parametrize('cache', [True, False])
+ def test_unit_rounding(self, cache):
+ # GH 14156: argument will incur floating point errors but no
+ # premature rounding
+ result = pd.to_datetime(1434743731.8770001, unit='s', cache=cache)
+ expected = pd.Timestamp('2015-06-19 19:55:31.877000093')
+ assert result == expected
+
@pytest.mark.parametrize('cache', [True, False])
def test_dataframe(self, cache):
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index 0fadfcf0c7f28..29fe2b0185662 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -110,6 +110,8 @@ def test_constructor_timedelta(self, closed, name, freq, periods):
@pytest.mark.parametrize('start, end, freq, expected_endpoint', [
(0, 10, 3, 9),
+ (0, 10, 1.5, 9),
+ (0.5, 10, 3, 9.5),
(Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')),
(Timestamp('2018-01-01'),
Timestamp('2018-02-09'),
@@ -125,6 +127,22 @@ def test_early_truncation(self, start, end, freq, expected_endpoint):
result_endpoint = result.right[-1]
assert result_endpoint == expected_endpoint
+ @pytest.mark.parametrize('start, end, freq', [
+ (0.5, None, None),
+ (None, 4.5, None),
+ (0.5, None, 1.5),
+ (None, 6.5, 1.5)])
+ def test_no_invalid_float_truncation(self, start, end, freq):
+ # GH 21161
+ if freq is None:
+ breaks = [0.5, 1.5, 2.5, 3.5, 4.5]
+ else:
+ breaks = [0.5, 2.0, 3.5, 5.0, 6.5]
+ expected = IntervalIndex.from_breaks(breaks)
+
+ result = interval_range(start=start, end=end, periods=4, freq=freq)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize('start, mid, end', [
(Timestamp('2018-03-10', tz='US/Eastern'),
Timestamp('2018-03-10 23:30:00', tz='US/Eastern'),
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f4fa547574b9e..1e4dd2921b3f5 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2088,6 +2088,17 @@ def test_get_duplicates_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
+ def test_tab_complete_warning(self, ip):
+ # https://github.com/pandas-dev/pandas/issues/16409
+ pytest.importorskip('IPython', minversion="6.0.0")
+ from IPython.core.completer import provisionalcompleter
+
+ code = "import pandas as pd; idx = pd.Index([1, 2])"
+ ip.run_code(code)
+ with tm.assert_produces_warning(None):
+ with provisionalcompleter('ignore'):
+ list(ip.Completer.completions('idx.', 4))
+
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 6a1a1a5bdba4f..0e630f69b1a32 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -543,35 +543,41 @@ def test_reindex_empty_index(self):
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
- def test_is_monotonic(self):
- c = CategoricalIndex([1, 2, 3])
+ @pytest.mark.parametrize('data, non_lexsorted_data', [
+ [[1, 2, 3], [9, 0, 1, 2, 3]],
+ [list('abc'), list('fabcd')],
+ ])
+ def test_is_monotonic(self, data, non_lexsorted_data):
+ c = CategoricalIndex(data)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
- c = CategoricalIndex([1, 2, 3], ordered=True)
+ c = CategoricalIndex(data, ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
- c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
+ c = CategoricalIndex(data, categories=reversed(data))
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
- c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
+ c = CategoricalIndex(data, categories=reversed(data), ordered=True)
assert not c.is_monotonic_increasing
- assert not c.is_monotonic_decreasing
+ assert c.is_monotonic_decreasing
- c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
+ # test when data is neither monotonic increasing nor decreasing
+ reordered_data = [data[0], data[2], data[1]]
+ c = CategoricalIndex(reordered_data, categories=reversed(data))
assert not c.is_monotonic_increasing
- assert c.is_monotonic_decreasing
+ assert not c.is_monotonic_decreasing
# non lexsorted categories
- categories = [9, 0, 1, 2, 3]
+ categories = non_lexsorted_data
- c = CategoricalIndex([9, 0], categories=categories)
+ c = CategoricalIndex(categories[:2], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
- c = CategoricalIndex([0, 1], categories=categories)
+ c = CategoricalIndex(categories[1:3], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 37f70090c179f..182dbdf2cf4e4 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -164,6 +164,22 @@ def test_set_name_methods(self):
assert res is None
assert ind.names == new_names2
+ @pytest.mark.parametrize('inplace', [True, False])
+ def test_set_names_with_nlevel_1(self, inplace):
+ # GH 21149
+ # Ensure that .set_names for MultiIndex with
+ # nlevels == 1 does not raise any errors
+ expected = pd.MultiIndex(levels=[[0, 1]],
+ labels=[[0, 1]],
+ names=['first'])
+ m = pd.MultiIndex.from_product([[0, 1]])
+ result = m.set_names('first', level=0, inplace=inplace)
+
+ if inplace:
+ result = m
+
+ tm.assert_index_equal(result, expected)
+
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 2423ddcd9a1a0..2b7ff1f5a9879 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -54,20 +54,21 @@ def test_bad_stream_exception(self):
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
- handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
- # stream must be binary UTF8
- stream = codecs.StreamRecoder(
- handle, utf8.encode, utf8.decode, codec.streamreader,
- codec.streamwriter)
+
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
- with tm.assert_raises_regex(UnicodeDecodeError, msg):
- self.read_csv(stream)
- stream.close()
+
+ # stream must be binary UTF8
+ with open(self.csv_shiftjs, "rb") as handle, codecs.StreamRecoder(
+ handle, utf8.encode, utf8.decode, codec.streamreader,
+ codec.streamwriter) as stream:
+
+ with tm.assert_raises_regex(UnicodeDecodeError, msg):
+ self.read_csv(stream)
def test_read_csv(self):
if not compat.PY3:
diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py
index 01c6620e50d37..e84db66561c49 100644
--- a/pandas/tests/io/parser/compression.py
+++ b/pandas/tests/io/parser/compression.py
@@ -110,16 +110,15 @@ def test_read_csv_infer_compression(self):
# see gh-9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
- inputs = [self.csv1, self.csv1 + '.gz',
- self.csv1 + '.bz2', open(self.csv1)]
+ with open(self.csv1) as f:
+ inputs = [self.csv1, self.csv1 + '.gz',
+ self.csv1 + '.bz2', f]
- for f in inputs:
- df = self.read_csv(f, index_col=0, parse_dates=True,
- compression='infer')
-
- tm.assert_frame_equal(expected, df)
+ for inp in inputs:
+ df = self.read_csv(inp, index_col=0, parse_dates=True,
+ compression='infer')
- inputs[3].close()
+ tm.assert_frame_equal(expected, df)
def test_read_csv_compressed_utf16_example(self):
# GH18071
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index ab4c14034cd20..e8d9d8b52164b 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -35,24 +35,18 @@ def setup_method(self, method):
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
- try:
- f = open(self.csv1, 'rb')
+ with open(self.csv1, 'rb') as f:
reader = TextReader(f)
- result = reader.read() # noqa
- finally:
- f.close()
+ reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
- try:
- f = open(self.csv1, 'rb')
+ with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
- finally:
- f.close()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 5da347e47957c..b80263021c269 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -182,6 +182,8 @@ def test_date_time():
fname = os.path.join(dirpath, "datetime.csv")
df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime',
'DateTimeHi', 'Taiw'])
+ # GH 19732: Timestamps imported from sas will incur floating point errors
+ df.iloc[:, 3] = df.iloc[:, 3].dt.round('us')
tm.assert_frame_equal(df, df0)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 110b790a65037..f3a465da4e87f 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -2,6 +2,8 @@
# pylint: disable=E1101
import datetime as dt
+import io
+import gzip
import os
import struct
import warnings
@@ -1473,3 +1475,28 @@ def test_invalid_date_conversion(self):
with pytest.raises(ValueError):
original.to_stata(path,
convert_dates={'wrong_name': 'tc'})
+
+ @pytest.mark.parametrize('version', [114, 117])
+ def test_nonfile_writing(self, version):
+ # GH 21041
+ bio = io.BytesIO()
+ df = tm.makeDataFrame()
+ df.index.name = 'index'
+ with tm.ensure_clean() as path:
+ df.to_stata(bio, version=version)
+ bio.seek(0)
+ with open(path, 'wb') as dta:
+ dta.write(bio.read())
+ reread = pd.read_stata(path, index_col='index')
+ tm.assert_frame_equal(df, reread)
+
+ def test_gzip_writing(self):
+ # writing version 117 requires seek and cannot be used with gzip
+ df = tm.makeDataFrame()
+ df.index.name = 'index'
+ with tm.ensure_clean() as path:
+ with gzip.GzipFile(path, 'wb') as gz:
+ df.to_stata(gz, version=114)
+ with gzip.GzipFile(path, 'rb') as gz:
+ reread = pd.read_stata(gz, index_col='index')
+ tm.assert_frame_equal(df, reread)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index f5e58fa70e1c4..dea305d4b3fee 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -2487,3 +2487,14 @@ def test_concat_aligned_sort_does_not_raise():
columns=[1, 'a'])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("s1name,s2name", [
+ (np.int64(190), (43, 0)), (190, (43, 0))])
+def test_concat_series_name_npscalar_tuple(s1name, s2name):
+ # GH21015
+ s1 = pd.Series({'a': 1, 'b': 2}, name=s1name)
+ s2 = pd.Series({'c': 5, 'd': 6}, name=s2name)
+ result = pd.concat([s1, s2])
+ expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6})
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 3fdc2aa71bfc0..205fdf49d3e91 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -106,6 +106,16 @@ def test_compare_timedelta_ndarray(self):
class TestTimedeltas(object):
+ @pytest.mark.parametrize("unit, value, expected", [
+ ('us', 9.999, 9999), ('ms', 9.999999, 9999999),
+ ('s', 9.999999999, 9999999999)])
+ def test_rounding_on_int_unit_construction(self, unit, value, expected):
+ # GH 12690
+ result = Timedelta(value, unit=unit)
+ assert result.value == expected
+ result = Timedelta(str(value) + unit)
+ assert result.value == expected
+
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index b022b327de57c..ab87d98fca8eb 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -621,10 +621,51 @@ def test_basics_nanos(self):
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
- def test_unit(self):
-
- def check(val, unit=None, h=1, s=1, us=0):
- stamp = Timestamp(val, unit=unit)
+ @pytest.mark.parametrize('value, check_kwargs', [
+ [946688461000000000, {}],
+ [946688461000000000 / long(1000), dict(unit='us')],
+ [946688461000000000 / long(1000000), dict(unit='ms')],
+ [946688461000000000 / long(1000000000), dict(unit='s')],
+ [10957, dict(unit='D', h=0)],
+ pytest.param((946688461000000000 + 500000) / long(1000000000),
+ dict(unit='s', us=499, ns=964),
+ marks=pytest.mark.skipif(not PY3,
+ reason='using truediv, so these'
+ ' are like floats')),
+ pytest.param((946688461000000000 + 500000000) / long(1000000000),
+ dict(unit='s', us=500000),
+ marks=pytest.mark.skipif(not PY3,
+ reason='using truediv, so these'
+ ' are like floats')),
+ pytest.param((946688461000000000 + 500000) / long(1000000),
+ dict(unit='ms', us=500),
+ marks=pytest.mark.skipif(not PY3,
+ reason='using truediv, so these'
+ ' are like floats')),
+ pytest.param((946688461000000000 + 500000) / long(1000000000),
+ dict(unit='s'),
+ marks=pytest.mark.skipif(PY3,
+ reason='get chopped in py2')),
+ pytest.param((946688461000000000 + 500000000) / long(1000000000),
+ dict(unit='s'),
+ marks=pytest.mark.skipif(PY3,
+ reason='get chopped in py2')),
+ pytest.param((946688461000000000 + 500000) / long(1000000),
+ dict(unit='ms'),
+ marks=pytest.mark.skipif(PY3,
+ reason='get chopped in py2')),
+ [(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)],
+ [(946688461000000000 + 500000000) / long(1000000),
+ dict(unit='ms', us=500000)],
+ [946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)],
+ [946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)],
+ [946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)],
+ [946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)],
+ [946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)],
+ [10957 + 0.5, dict(unit='D', h=12)]])
+ def test_unit(self, value, check_kwargs):
+ def check(value, unit=None, h=1, s=1, us=0, ns=0):
+ stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
@@ -637,41 +678,9 @@ def check(val, unit=None, h=1, s=1, us=0):
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
- assert stamp.nanosecond == 0
-
- ts = Timestamp('20000101 01:01:01')
- val = ts.value
- days = (ts - Timestamp('1970-01-01')).days
-
- check(val)
- check(val / long(1000), unit='us')
- check(val / long(1000000), unit='ms')
- check(val / long(1000000000), unit='s')
- check(days, unit='D', h=0)
+ assert stamp.nanosecond == ns
- # using truediv, so these are like floats
- if PY3:
- check((val + 500000) / long(1000000000), unit='s', us=500)
- check((val + 500000000) / long(1000000000), unit='s', us=500000)
- check((val + 500000) / long(1000000), unit='ms', us=500)
-
- # get chopped in py2
- else:
- check((val + 500000) / long(1000000000), unit='s')
- check((val + 500000000) / long(1000000000), unit='s')
- check((val + 500000) / long(1000000), unit='ms')
-
- # ok
- check((val + 500000) / long(1000), unit='us', us=500)
- check((val + 500000000) / long(1000000), unit='ms', us=500000)
-
- # floats
- check(val / 1000.0 + 5, unit='us', us=5)
- check(val / 1000.0 + 5000, unit='us', us=5000)
- check(val / 1000000.0 + 0.5, unit='ms', us=500)
- check(val / 1000000.0 + 0.005, unit='ms', us=5)
- check(val / 1000000000.0 + 0.5, unit='s', us=500000)
- check(days + 0.5, unit='D', h=12)
+ check(value, **check_kwargs)
def test_roundtrip(self):
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index dce4e82cbdcf1..859082a7e722d 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -188,6 +188,11 @@ def test_reset_index_level(self):
with tm.assert_raises_regex(IndexError, 'Too many levels'):
s.reset_index(level=[0, 1, 2])
+ # Check that .reset_index([],drop=True) doesn't fail
+ result = pd.Series(range(4)).reset_index([], drop=True)
+ expected = pd.Series(range(4))
+ assert_series_equal(result, expected)
+
def test_reset_index_range(self):
# GH 12071
s = pd.Series(range(2), name='A', dtype='int64')
@@ -275,3 +280,18 @@ def test_set_axis_prior_to_deprecation_signature(self):
with tm.assert_produces_warning(FutureWarning):
result = s.set_axis(0, list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
+
+ def test_reset_index_drop_errors(self):
+ # GH 20925
+
+ # KeyError raised for series index when passed level name is missing
+ s = pd.Series(range(4))
+ with tm.assert_raises_regex(KeyError, 'must be same as name'):
+ s.reset_index('wrong', drop=True)
+ with tm.assert_raises_regex(KeyError, 'must be same as name'):
+ s.reset_index('wrong')
+
+ # KeyError raised for series when level to be dropped is missing
+ s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2))
+ with tm.assert_raises_regex(KeyError, 'not found'):
+ s.reset_index('wrong', drop=True)
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 01b4ea6eaa238..13e0d1b12c372 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -141,19 +141,20 @@ def test_sort_index_inplace(self):
assert result is None
tm.assert_series_equal(random_order, self.ts)
- def test_sort_index_multiindex(self):
+ @pytest.mark.parametrize("level", ['A', 0]) # GH 21052
+ def test_sort_index_multiindex(self, level):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
# implicit sort_remaining=True
- res = s.sort_index(level='A')
+ res = s.sort_index(level=level)
assert_series_equal(backwards, res)
# GH13496
- # rows share same level='A': sort has no effect without remaining lvls
- res = s.sort_index(level='A', sort_remaining=False)
+ # sort has no effect without remaining lvls
+ res = s.sort_index(level=level, sort_remaining=False)
assert_series_equal(s, res)
def test_sort_index_kind(self):
diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py
index 6c0c83cf65ff7..b3330f866ba1f 100644
--- a/pandas/tests/sparse/test_array.py
+++ b/pandas/tests/sparse/test_array.py
@@ -454,6 +454,17 @@ def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
+ @pytest.mark.parametrize('data,shape,dtype', [
+ ([0, 0, 0, 0, 0], (5,), None),
+ ([], (0,), None),
+ ([0], (1,), None),
+ (['A', 'A', np.nan, 'B'], (4,), np.object)
+ ])
+ def test_shape(self, data, shape, dtype):
+ # GH 21126
+ out = SparseArray(data, dtype=dtype)
+ assert out.shape == shape
+
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 0b329f64dafa3..bb7ee1b911fee 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
import pytest
+import os
import collections
from functools import partial
import numpy as np
-from pandas import Series, Timestamp
+from pandas import Series, DataFrame, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
from pandas.core import ops
@@ -222,3 +223,21 @@ def test_standardize_mapping():
dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)
+
+
+@pytest.mark.parametrize('obj', [
+ DataFrame(100 * [[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ columns=['X', 'Y', 'Z']),
+ Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
+@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv'])
+def test_compression_size(obj, method, compression):
+ if not compression:
+ pytest.skip("only test compression case.")
+
+ with tm.ensure_clean() as filename:
+ getattr(obj, method)(filename, compression=compression)
+ compressed = os.path.getsize(filename)
+ getattr(obj, method)(filename, compression=None)
+ uncompressed = os.path.getsize(filename)
+ assert uncompressed > compressed
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index ead9ba1e26e2d..79d3aad493182 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -4,9 +4,10 @@
"""
import pytest
+import re
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
- next, get_range_parameters, PY2)
+ next, get_range_parameters, PY2, re_type)
class TestBuiltinIterators(object):
@@ -89,3 +90,7 @@ def test_get_range_parameters(self, start, stop, step):
assert start_result == start_expected
assert stop_result == stop_expected
assert step_result == step_expected
+
+
+def test_re_type():
+ assert isinstance(re.compile(''), re_type)
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index a595d9f18d6b8..c2d09c6d49e86 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -2,6 +2,8 @@
"""
Testing that we work in the downstream packages
"""
+import subprocess
+
import pytest
import numpy as np # noqa
from pandas import DataFrame
@@ -53,6 +55,11 @@ def test_xarray(df):
assert df.to_xarray() is not None
+def test_oo_optimizable():
+ # GH 21071
+ subprocess.check_call(["python", "-OO", "-c", "import pandas"])
+
+
@tm.network
def test_statsmodels():
@@ -87,6 +94,7 @@ def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
+@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
@@ -95,6 +103,7 @@ def test_pandas_datareader():
'F', 'quandl', '2017-01-01', '2017-02-01')
+@pytest.mark.xfail(reaason="downstream install issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index d8e90ae0e1b35..74f2c977e0db2 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -41,7 +41,7 @@ def win_types(request):
return request.param
-@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian', 'slepian'])
+@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian'])
def win_types_special(request):
return request.param
@@ -1079,8 +1079,7 @@ def test_cmov_window_special(self, win_types_special):
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
- 'general_gaussian': {'power': 2., 'width': 2.},
- 'slepian': {'width': 0.5}}
+ 'general_gaussian': {'power': 2., 'width': 2.}}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
@@ -1090,8 +1089,6 @@ def test_cmov_window_special(self, win_types_special):
13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,
13.08516, 12.95111, 12.74577, np.nan, np.nan],
- 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331,
- 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,
12.90702, 12.83757, np.nan, np.nan]
}
diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py
index d6f58d16bcf64..ab7c4fb528452 100644
--- a/pandas/tests/util/test_testing.py
+++ b/pandas/tests/util/test_testing.py
@@ -503,6 +503,25 @@ def test_index_equal_metadata_message(self):
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
+ def test_categorical_index_equality(self):
+ expected = """Index are different
+
+Attribute "dtype" are different
+\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\)
+\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \
+ordered=False\\)"""
+
+ with tm.assert_raises_regex(AssertionError, expected):
+ assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])),
+ pd.Index(pd.Categorical(['a', 'b'],
+ categories=['a', 'b', 'c'])))
+
+ def test_categorical_index_equality_relax_categories_check(self):
+ assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])),
+ pd.Index(pd.Categorical(['a', 'b'],
+ categories=['a', 'b', 'c'])),
+ check_categorical=False)
+
class TestAssertSeriesEqual(object):
@@ -600,6 +619,25 @@ def test_series_equal_message(self):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),
check_less_precise=True)
+ def test_categorical_series_equality(self):
+ expected = """Attributes are different
+
+Attribute "dtype" are different
+\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\)
+\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \
+ordered=False\\)"""
+
+ with tm.assert_raises_regex(AssertionError, expected):
+ assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])),
+ pd.Series(pd.Categorical(['a', 'b'],
+ categories=['a', 'b', 'c'])))
+
+ def test_categorical_series_equality_relax_categories_check(self):
+ assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])),
+ pd.Series(pd.Categorical(['a', 'b'],
+ categories=['a', 'b', 'c'])),
+ check_categorical=False)
+
class TestAssertFrameEqual(object):
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 749165f894819..c294110d89ec5 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1090,12 +1090,17 @@ def apply(self, other):
class CustomBusinessMonthEnd(_CustomBusinessMonth):
- __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
+ # TODO(py27): Replace condition with Subsitution after dropping Py27
+ if _CustomBusinessMonth.__doc__:
+ __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
_prefix = 'CBM'
class CustomBusinessMonthBegin(_CustomBusinessMonth):
- __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning')
+ # TODO(py27): Replace condition with Subsitution after dropping Py27
+ if _CustomBusinessMonth.__doc__:
+ __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]',
+ 'beginning')
_prefix = 'CBMS'
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 624fbbbd4f05e..6b55554cdc941 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -4,7 +4,7 @@
import types
import warnings
from textwrap import dedent, wrap
-from functools import wraps, update_wrapper
+from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
def deprecate(name, alternative, version, alt_name=None,
@@ -20,18 +20,18 @@ def deprecate(name, alternative, version, alt_name=None,
Parameters
----------
name : str
- Name of function to deprecate
- alternative : str
- Name of function to use instead
+ Name of function to deprecate.
+ alternative : func
+ Function to use instead.
version : str
- Version of pandas in which the method has been deprecated
+ Version of pandas in which the method has been deprecated.
alt_name : str, optional
- Name to use in preference of alternative.__name__
+ Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
- The message to display in the warning.
- Default is '{name} is deprecated. Use {alt_name} instead.'
+ The message to display in the warning.
+ Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
@@ -39,25 +39,26 @@ def deprecate(name, alternative, version, alt_name=None,
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
- @wraps(alternative)
+ # adding deprecated directive to the docstring
+ msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
+ msg = '\n '.join(wrap(msg, 70))
+
+ @Substitution(version=version, msg=msg)
+ @Appender(alternative.__doc__)
def wrapper(*args, **kwargs):
+ """
+ .. deprecated:: %(version)s
+
+ %(msg)s
+
+ """
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
- # adding deprecated directive to the docstring
- msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
- tpl = dedent("""
- .. deprecated:: {version}
-
- {msg}
-
- {rest}
- """)
- rest = getattr(wrapper, '__doc__', '')
- docstring = tpl.format(version=version,
- msg='\n '.join(wrap(msg, 70)),
- rest=dedent(rest))
- wrapper.__doc__ = docstring
+ # Since we are using Substitution to create the required docstring,
+ # remove that from the attributes that should be assigned to the wrapper
+ assignments = tuple(x for x in WRAPPER_ASSIGNMENTS if x != '__doc__')
+ update_wrapper(wrapper, alternative, assigned=assignments)
return wrapper
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index e1484a9c1b390..233eba6490937 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -778,8 +778,12 @@ def assert_index_equal(left, right, exact='equiv', check_names=True,
def _check_types(l, r, obj='Index'):
if exact:
- assert_class_equal(left, right, exact=exact, obj=obj)
- assert_attr_equal('dtype', l, r, obj=obj)
+ assert_class_equal(l, r, exact=exact, obj=obj)
+
+ # Skip exact dtype checking when `check_categorical` is False
+ if check_categorical:
+ assert_attr_equal('dtype', l, r, obj=obj)
+
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
@@ -829,7 +833,8 @@ def _get_ilevel_values(index, level):
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
- if check_exact:
+ # skip exact index checking when `check_categorical` is False
+ if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
@@ -950,23 +955,23 @@ def is_sorted(seq):
def assert_categorical_equal(left, right, check_dtype=True,
- obj='Categorical', check_category_order=True):
+ check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
- left, right : Categorical
- Categoricals to compare
+ left : Categorical
+ right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
- obj : str, default 'Categorical'
- Specify object name being compared, internally used to show appropriate
- assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
+ obj : str, default 'Categorical'
+ Specify object name being compared, internally used to show appropriate
+ assertion message
"""
_check_isinstance(left, right, Categorical)
@@ -1020,7 +1025,7 @@ def raise_assert_detail(obj, message, left, right, diff=None):
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
- obj='numpy array', check_same=None):
+ check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
@@ -1033,11 +1038,11 @@ def assert_numpy_array_equal(left, right, strict_nan=False,
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
+ check_same : None|'copy'|'same', default None
+ Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
- check_same : None|'copy'|'same', default None
- Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
| https://api.github.com/repos/pandas-dev/pandas/pulls/21389 | 2018-06-08T17:22:16Z | 2018-06-09T15:32:44Z | 2018-06-09T15:32:44Z | 2018-06-09T15:32:52Z | |
DOC: update whatsnew 0.23.1 | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 5a1bcce9b5970..d0af3c6c9ca9c 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -47,27 +47,23 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
-Groupby/Resample/Rolling
-~~~~~~~~~~~~~~~~~~~~~~~~
+**Groupby/Resample/Rolling**
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
-Data-type specific
-~~~~~~~~~~~~~~~~~~
+**Data-type specific**
- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`)
- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`)
- Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
-Sparse
-~~~~~~
+**Sparse**
- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`)
-Indexing
-~~~~~~~~
+**Indexing**
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
@@ -75,26 +71,21 @@ Indexing
- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`)
- Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`)
-Plotting
-~~~~~~~~
+**Plotting**
- New keywords (sharex, sharey) to turn on/off sharing of x/y-axis by subplots generated with pandas.DataFrame().groupby().boxplot() (:issue: `20968`)
-I/O
-~~~
+**I/O**
- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
- Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`)
-
-Reshaping
-~~~~~~~~~
+**Reshaping**
- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
- Bug in :func:`concat` warning message providing the wrong guidance for future behavior (:issue:`21101`)
-Other
-~~~~~
+**Other**
- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`)
| One of the last commits apparently added back the subsections | https://api.github.com/repos/pandas-dev/pandas/pulls/21387 | 2018-06-08T16:41:36Z | 2018-06-08T16:41:50Z | 2018-06-08T16:41:50Z | 2018-06-08T17:42:01Z |
Accepts integer/float string with units and raises when unit is ambiguous | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b71edcf1f6f51..5bbdddaf60fbe 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -665,8 +665,7 @@ Timedelta
- Bug in :class:`Index` with numeric dtype when multiplying or dividing an array with dtype ``timedelta64`` (:issue:`22390`)
- Bug in :class:`TimedeltaIndex` incorrectly allowing indexing with ``Timestamp`` object (:issue:`20464`)
- Fixed bug where subtracting :class:`Timedelta` from an object-dtyped array would raise ``TypeError`` (:issue:`21980`)
--
--
+- Bug in :class:`Timedelta`: where passing a string of a pure number would not take unit into account. Also raises for ambiguous/duplicate unit specification (:issue: `12136`)
Timezones
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 9c8be1901d1dc..6727a07b6e14c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# cython: profile=False
import collections
import textwrap
import warnings
@@ -6,13 +7,13 @@ import warnings
import sys
cdef bint PY3 = (sys.version_info[0] >= 3)
-from cython import Py_ssize_t
+from cython cimport Py_ssize_t
-from cpython cimport Py_NE, Py_EQ, PyObject_RichCompare
+from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare
import numpy as np
cimport numpy as cnp
-from numpy cimport int64_t
+from numpy cimport int64_t, ndarray
cnp.import_array()
from cpython.datetime cimport (datetime, timedelta,
@@ -32,7 +33,6 @@ from np_datetime cimport (cmp_scalar, reverse_ops, td64_to_tdstruct,
from nattype import nat_strings, NaT
from nattype cimport checknull_with_nat, NPY_NAT
-from offsets cimport to_offset
# ----------------------------------------------------------------------
# Constants
@@ -78,44 +78,6 @@ cdef dict timedelta_abbrevs = { 'D': 'd',
_no_input = object()
-
-# ----------------------------------------------------------------------
-# API
-
-def ints_to_pytimedelta(int64_t[:] arr, box=False):
- """
- convert an i8 repr to an ndarray of timedelta or Timedelta (if box ==
- True)
-
- Parameters
- ----------
- arr : ndarray[int64_t]
- box : bool, default False
-
- Returns
- -------
- result : ndarray[object]
- array of Timedelta or timedeltas objects
- """
- cdef:
- Py_ssize_t i, n = len(arr)
- int64_t value
- object[:] result = np.empty(n, dtype=object)
-
- for i in range(n):
-
- value = arr[i]
- if value == NPY_NAT:
- result[i] = NaT
- else:
- if box:
- result[i] = Timedelta(value)
- else:
- result[i] = timedelta(microseconds=int(value) / 1000)
-
- return result.base # .base to access underlying np.ndarray
-
-
# ----------------------------------------------------------------------
cpdef int64_t delta_to_nanoseconds(delta) except? -1:
@@ -161,7 +123,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
if ts.astype('int64') == NPY_NAT:
return np.timedelta64(NPY_NAT)
elif is_timedelta64_object(ts):
- ts = ts.astype("m8[{unit}]".format(unit=unit.lower()))
+ ts = ts.astype("m8[{0}]".format(unit.lower()))
elif is_integer_object(ts):
if ts == NPY_NAT:
return np.timedelta64(NPY_NAT)
@@ -182,11 +144,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
ts = cast_from_unit(ts, unit)
ts = np.timedelta64(ts)
elif is_string_object(ts):
- if len(ts) > 0 and ts[0] == 'P':
- ts = parse_iso_format_string(ts)
- else:
- ts = parse_timedelta_string(ts)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(parse_timedelta_string(ts))
elif hasattr(ts, 'delta'):
ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns')
@@ -198,7 +156,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
return ts.astype('timedelta64[ns]')
-cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
+cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
coerce non-convertible objects to NaT. Otherwise, raise.
@@ -206,7 +164,7 @@ cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
cdef:
Py_ssize_t i, n
- int64_t[:] iresult
+ ndarray[int64_t] iresult
if errors not in ('ignore', 'raise', 'coerce'):
raise ValueError("errors must be one of 'ignore', "
@@ -232,7 +190,7 @@ cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
else:
raise
- return iresult.base # .base to access underlying np.ndarray
+ return iresult
cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
@@ -264,7 +222,7 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
m = 1L
p = 0
else:
- raise ValueError("cannot cast unit {unit}".format(unit=unit))
+ raise ValueError("cannot cast unit {0}".format(unit))
# just give me the unit back
if ts is None:
@@ -272,22 +230,22 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
- base = <int64_t>ts
+ base = <int64_t> ts
frac = ts - base
if p:
frac = round(frac, p)
- return <int64_t>(base * m) + <int64_t>(frac * m)
+ return <int64_t> (base * m) + <int64_t> (frac * m)
cdef inline _decode_if_necessary(object ts):
# decode ts if necessary
- if not isinstance(ts, unicode) and not PY3:
+ if not PyUnicode_Check(ts) and not PY3:
ts = str(ts).decode('utf-8')
return ts
-cdef inline parse_timedelta_string(object ts):
+cdef inline parse_timedelta_string(object ts, specified_unit=None):
"""
Parse a regular format timedelta string. Return an int64_t (in ns)
or raise a ValueError on an invalid parse.
@@ -295,10 +253,11 @@ cdef inline parse_timedelta_string(object ts):
cdef:
unicode c
- bint neg = 0, have_dot = 0, have_value = 0, have_hhmmss = 0
- object current_unit = None
- int64_t result = 0, m = 0, r
- list number = [], frac = [], unit = []
+ bint neg=0, have_dot=0, have_value=0, have_hhmmss=0
+ object current_unit=None
+ object fallback_unit=None
+ int64_t result=0, m=0, r
+ list number=[], frac=[], unit=[]
# neg : tracks if we have a leading negative for the value
# have_dot : tracks if we are processing a dot (either post hhmmss or
@@ -373,7 +332,7 @@ cdef inline parse_timedelta_string(object ts):
have_hhmmss = 1
else:
raise ValueError("expecting hh:mm:ss format, "
- "received: {ts}".format(ts=ts))
+ "received: {0}".format(ts))
unit, number = [], []
@@ -448,9 +407,13 @@ cdef inline parse_timedelta_string(object ts):
if have_value:
raise ValueError("have leftover units")
if len(number):
- r = timedelta_from_spec(number, frac, 'ns')
+ fallback_unit = 'ns' if specified_unit is None else specified_unit
+ r = timedelta_from_spec(number, frac, fallback_unit)
result += timedelta_as_neg(r, neg)
+ if (specified_unit is not None) and (fallback_unit is None):
+ raise ValueError("unit was specified but is redundant/ambiguous")
+
return result
@@ -482,7 +445,7 @@ cdef inline timedelta_from_spec(object number, object frac, object unit):
unit = ''.join(unit)
unit = timedelta_abbrevs[unit.lower()]
except KeyError:
- raise ValueError("invalid abbreviation: {unit}".format(unit=unit))
+ raise ValueError("invalid abbreviation: {0}".format(unit))
n = ''.join(number) + '.' + ''.join(frac)
return cast_from_unit(float(n), unit)
@@ -541,12 +504,10 @@ def _binary_op_method_timedeltalike(op, name):
elif hasattr(other, 'dtype'):
# nd-array like
- if other.dtype.kind in ['m', 'M']:
- return op(self.to_timedelta64(), other)
- elif other.dtype.kind == 'O':
- return np.array([op(self, x) for x in other])
- else:
+ if other.dtype.kind not in ['m', 'M']:
+ # raise rathering than letting numpy return wrong answer
return NotImplemented
+ return op(self.to_timedelta64(), other)
elif not _validate_ops_compat(other):
return NotImplemented
@@ -593,10 +554,10 @@ cdef inline int64_t parse_iso_format_string(object ts) except? -1:
cdef:
unicode c
int64_t result = 0, r
- int p = 0
+ int p=0
object dec_unit = 'ms', err_msg
- bint have_dot = 0, have_value = 0, neg = 0
- list number = [], unit = []
+ bint have_dot=0, have_value=0, neg=0
+ list number=[], unit=[]
ts = _decode_if_necessary(ts)
@@ -683,8 +644,8 @@ cdef _to_py_int_float(v):
return int(v)
elif is_float_object(v):
return float(v)
- raise TypeError("Invalid type {typ}. Must be int or "
- "float.".format(typ=type(v)))
+ raise TypeError("Invalid type {0}. Must be int or "
+ "float.".format(type(v)))
# Similar to Timestamp/datetime, this is a construction requirement for
@@ -730,10 +691,9 @@ cdef class _Timedelta(timedelta):
return True
# only allow ==, != ops
- raise TypeError('Cannot compare type {cls} with '
- 'type {other}'
- .format(cls=type(self).__name__,
- other=type(other).__name__))
+ raise TypeError('Cannot compare type {!r} with type ' \
+ '{!r}'.format(type(self).__name__,
+ type(other).__name__))
if util.is_array(other):
return PyObject_RichCompare(np.array([self]), other, op)
return PyObject_RichCompare(other, self, reverse_ops[op])
@@ -742,9 +702,9 @@ cdef class _Timedelta(timedelta):
return False
elif op == Py_NE:
return True
- raise TypeError('Cannot compare type {cls} with type {other}'
- .format(cls=type(self).__name__,
- other=type(other).__name__))
+ raise TypeError('Cannot compare type {!r} with type ' \
+ '{!r}'.format(type(self).__name__,
+ type(other).__name__))
return cmp_scalar(self.value, ots.value, op)
@@ -835,81 +795,12 @@ cdef class _Timedelta(timedelta):
@property
def asm8(self):
- """
- Return a numpy timedelta64 array scalar view.
-
- Provides access to the array scalar view (i.e. a combination of the
- value and the units) associated with the numpy.timedelta64().view(),
- including a 64-bit integer representation of the timedelta in
- nanoseconds (Python int compatible).
-
- Returns
- -------
- numpy timedelta64 array scalar view
- Array scalar view of the timedelta in nanoseconds.
-
- Examples
- --------
- >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns')
- >>> td.asm8
- numpy.timedelta64(86520000003042,'ns')
-
- >>> td = pd.Timedelta('2 min 3 s')
- >>> td.asm8
- numpy.timedelta64(123000000000,'ns')
-
- >>> td = pd.Timedelta('3 ms 5 us')
- >>> td.asm8
- numpy.timedelta64(3005000,'ns')
-
- >>> td = pd.Timedelta(42, unit='ns')
- >>> td.asm8
- numpy.timedelta64(42,'ns')
- """
+ """ return a numpy timedelta64 array view of myself """
return np.int64(self.value).view('m8[ns]')
@property
def resolution(self):
- """
- Return a string representing the lowest timedelta resolution.
-
- Each timedelta has a defined resolution that represents the lowest OR
- most granular level of precision. Each level of resolution is
- represented by a short string as defined below:
-
- Resolution: Return value
-
- * Days: 'D'
- * Hours: 'H'
- * Minutes: 'T'
- * Seconds: 'S'
- * Milliseconds: 'L'
- * Microseconds: 'U'
- * Nanoseconds: 'N'
-
- Returns
- -------
- str
- Timedelta resolution.
-
- Examples
- --------
- >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns')
- >>> td.resolution
- 'N'
-
- >>> td = pd.Timedelta('1 days 2 min 3 us')
- >>> td.resolution
- 'U'
-
- >>> td = pd.Timedelta('2 min 3 s')
- >>> td.resolution
- 'S'
-
- >>> td = pd.Timedelta(36, unit='us')
- >>> td.resolution
- 'U'
- """
+ """ return a string representing the lowest resolution that we have """
self._ensure_components()
if self._ns:
@@ -931,7 +822,7 @@ cdef class _Timedelta(timedelta):
def nanoseconds(self):
"""
Return the number of nanoseconds (n), where 0 <= n < 1 microsecond.
-
+
Returns
-------
int
@@ -982,8 +873,8 @@ cdef class _Timedelta(timedelta):
sign = " "
if format == 'all':
- fmt = ("{days} days{sign}{hours:02}:{minutes:02}:{seconds:02}."
- "{milliseconds:03}{microseconds:03}{nanoseconds:03}")
+ fmt = "{days} days{sign}{hours:02}:{minutes:02}:{seconds:02}." \
+ "{milliseconds:03}{microseconds:03}{nanoseconds:03}"
else:
# if we have a partial day
subs = (self._h or self._m or self._s or
@@ -1008,14 +899,11 @@ cdef class _Timedelta(timedelta):
return fmt.format(**comp_dict)
def __repr__(self):
- return "Timedelta('{val}')".format(val=self._repr_base(format='long'))
+ return "Timedelta('{0}')".format(self._repr_base(format='long'))
def __str__(self):
return self._repr_base(format='long')
- def __bool__(self):
- return self.value != 0
-
def isoformat(self):
"""
Format Timedelta as ISO 8601 Duration like
@@ -1062,8 +950,8 @@ cdef class _Timedelta(timedelta):
components.nanoseconds)
# Trim unnecessary 0s, 1.000000000 -> 1
seconds = seconds.rstrip('0').rstrip('.')
- tpl = ('P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'
- .format(td=components, seconds=seconds))
+ tpl = 'P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'.format(
+ td=components, seconds=seconds)
return tpl
@@ -1081,7 +969,7 @@ class Timedelta(_Timedelta):
----------
value : Timedelta, timedelta, np.timedelta64, string, or integer
unit : string, {'ns', 'us', 'ms', 's', 'm', 'h', 'D'}, optional
- Denote the unit of the input, if input is an integer. Default 'ns'.
+ Denote the unit of the input, if input is an integer/float. Default 'ns'.
days, seconds, microseconds,
milliseconds, minutes, hours, weeks : numeric, optional
Values for construction in compat with datetime.timedelta.
@@ -1116,11 +1004,34 @@ class Timedelta(_Timedelta):
if isinstance(value, Timedelta):
value = value.value
elif is_string_object(value):
- if len(value) > 0 and value[0] == 'P':
+ # Check if it is just a number in a string
+ try:
+ value = int(value)
+ except (ValueError, TypeError):
+ try:
+ value = float(value)
+ except (ValueError, TypeError):
+ pass
+
+ if is_integer_object(value) or is_float_object(value):
+ value = convert_to_timedelta64(value, unit)
+ elif len(value) > 0 and value[0] == 'P':
value = parse_iso_format_string(value)
else:
- value = parse_timedelta_string(value)
- value = np.timedelta64(value)
+ try:
+ orig_value = value
+ value = float(value)
+ except ValueError:
+ if unit is not None:
+ raise ValueError("Unit cannot be defined for strings other than pure integer/floats."
+ " Value: {} Unit: {}".format(value, unit))
+ value = parse_timedelta_string(value)
+ value = np.timedelta64(value)
+ else:
+ if unit is None:
+ raise ValueError("Cannot convert float string without unit."
+ " Value: {} Type: {}".format(orig_value, type(orig_value)))
+ value = convert_to_timedelta64(value, unit)
elif PyDelta_Check(value):
value = convert_to_timedelta64(value, 'ns')
elif is_timedelta64_object(value):
@@ -1164,6 +1075,7 @@ class Timedelta(_Timedelta):
cdef:
int64_t result, unit
+ from pandas.tseries.frequencies import to_offset
unit = to_offset(freq).nanos
result = unit * rounder(self.value / float(unit))
return Timedelta(result, unit='ns')
@@ -1370,7 +1282,7 @@ class Timedelta(_Timedelta):
'{op}'.format(dtype=other.dtype,
op='__floordiv__'))
- elif is_float_object(other) and util.is_nan(other):
+ elif is_float_object(other) and util._checknull(other):
# i.e. np.nan
return NotImplemented
diff --git a/pandas/tests/scalar/timedelta/test_construction.py b/pandas/tests/scalar/timedelta/test_construction.py
index d648140aa7347..a167ceed7059a 100644
--- a/pandas/tests/scalar/timedelta/test_construction.py
+++ b/pandas/tests/scalar/timedelta/test_construction.py
@@ -89,6 +89,9 @@ def test_construction():
with pytest.raises(ValueError):
Timedelta('3.1415')
+ with pytest.raises(ValueError):
+ Timedelta('2000')
+
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
@@ -210,3 +213,16 @@ def test_td_constructor_on_nanoseconds(constructed_td, conversion):
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds='abc')
+
+
+@pytest.mark.parametrize("str_unit, unit, expectation", [
+ ("", "s", tm.do_not_raise), # Expected case
+ ("s", "s", pytest.raises(ValueError)), # Units doubly defined
+ ("s", "d", pytest.raises(ValueError)),
+ ("", None, pytest.raises(ValueError)), # No units
+])
+def test_string_with_unit(str_unit, unit, expectation):
+ with expectation:
+ val_str = "10{}".format(str_unit)
+ assert Timedelta(val_str, unit=unit) == Timedelta(10, unit=unit)
+ assert pd.to_timedelta(val_str, unit=unit) == Timedelta(10, unit=unit)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3db251e89842d..a03827a361a8e 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -49,6 +49,18 @@
from pandas.io.common import urlopen
+class NullContextManager(object):
+ def __init__(self, dummy_resource=None):
+ self.dummy_resource = dummy_resource
+ def __enter__(self):
+ return self.dummy_resource
+ def __exit__(self, *args):
+ pass
+
+
+do_not_raise = NullContextManager()
+
+
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
| I couldn't get tests to run on my machine so I will see if it passes on the CI servers.
- [x] closes #12136
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21384 | 2018-06-08T14:45:43Z | 2018-10-07T16:30:57Z | null | 2018-10-07T17:07:52Z |
DOC: update multi-index term with MultiIndex | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index d4efa8a28f6c5..74f1d80c6fd3d 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -168,7 +168,7 @@ either match on the *index* or *columns* via the **axis** keyword:
df_orig = df
-Furthermore you can align a level of a multi-indexed DataFrame with a Series.
+Furthermore you can align a level of a MultiIndexed DataFrame with a Series.
.. ipython:: python
@@ -1034,7 +1034,7 @@ Passing a single function to ``.transform()`` with a ``Series`` will yield a sin
Transform with multiple functions
+++++++++++++++++++++++++++++++++
-Passing multiple functions will yield a column multi-indexed DataFrame.
+Passing multiple functions will yield a column MultiIndexed DataFrame.
The first level will be the original frame column names; the second level
will be the names of the transforming functions.
@@ -1060,7 +1060,7 @@ Passing a dict of functions will allow selective transforming per column.
tsdf.transform({'A': np.abs, 'B': lambda x: x+1})
-Passing a dict of lists will generate a multi-indexed DataFrame with these
+Passing a dict of lists will generate a MultiIndexed DataFrame with these
selective transforms.
.. ipython:: python
@@ -1889,12 +1889,12 @@ faster than sorting the entire Series and calling ``head(n)`` on the result.
df.nsmallest(5, ['a', 'c'])
-.. _basics.multi-index_sorting:
+.. _basics.multiindex_sorting:
-Sorting by a multi-index column
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Sorting by a MultiIndex column
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You must be explicit about sorting when the column is a multi-index, and fully specify
+You must be explicit about sorting when the column is a MultiIndex, and fully specify
all levels to ``by``.
.. ipython:: python
diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst
index 6b2ecfe66d5e2..4dec2a23facca 100644
--- a/doc/source/contributing_docstring.rst
+++ b/doc/source/contributing_docstring.rst
@@ -243,7 +243,7 @@ their use cases, if it is not too generic.
"""
Pivot a row index to columns.
- When using a multi-index, a level can be pivoted so each value in
+ When using a MultiIndex, a level can be pivoted so each value in
the index becomes a column. This is especially useful when a subindex
is repeated for the main index, and data is easier to visualize as a
pivot table.
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index b5b56fc6815c9..4d8e7979060f4 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -353,7 +353,7 @@ From a list of dicts
From a dict of tuples
~~~~~~~~~~~~~~~~~~~~~
-You can automatically create a multi-indexed frame by passing a tuples
+You can automatically create a MultiIndexed frame by passing a tuples
dictionary.
.. ipython:: python
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index d0af44cde1857..56fea1ccfd9dc 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -189,7 +189,7 @@ widely used by institutions such as statistics offices, central banks,
and international organisations. pandaSDMX can expose datasets and related
structural metadata including data flows, code-lists,
and data structure definitions as pandas Series
-or multi-indexed DataFrames.
+or MultiIndexed DataFrames.
`fredapi <https://github.com/mortada/fredapi>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 32129147ee281..658b9ff15783d 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2356,7 +2356,7 @@ Read a URL and match a table that contains specific text:
Specify a header row (by default ``<th>`` or ``<td>`` elements located within a
``<thead>`` are used to form the column index, if multiple rows are contained within
-``<thead>`` then a multi-index is created); if specified, the header row is taken
+``<thead>`` then a MultiIndex is created); if specified, the header row is taken
from the data minus the parsed header elements (``<th>`` elements).
.. code-block:: python
@@ -3615,10 +3615,10 @@ defaults to `nan`.
# we have provided a minimum string column size
store.root.df_mixed.table
-Storing Multi-Index DataFrames
-++++++++++++++++++++++++++++++
+Storing MultiIndex DataFrames
++++++++++++++++++++++++++++++
-Storing multi-index ``DataFrames`` as tables is very similar to
+Storing MultiIndex ``DataFrames`` as tables is very similar to
storing/selecting from homogeneous index ``DataFrames``.
.. ipython:: python
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index 0de6b871712a3..45944ba56d4e7 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -1085,12 +1085,12 @@ As you can see, this drops any rows where there was no match.
.. _merging.join_on_mi:
-Joining a single Index to a Multi-index
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Joining a single Index to a MultiIndex
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You can join a singly-indexed ``DataFrame`` with a level of a multi-indexed ``DataFrame``.
+You can join a singly-indexed ``DataFrame`` with a level of a MultiIndexed ``DataFrame``.
The level will match on the name of the index of the singly-indexed frame against
-a level name of the multi-indexed frame.
+a level name of the MultiIndexed frame.
.. ipython:: python
@@ -1130,8 +1130,8 @@ This is equivalent but less verbose and more memory efficient / faster than this
labels=['left', 'right'], vertical=False);
plt.close('all');
-Joining with two multi-indexes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Joining with two MultiIndexes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is not implemented via ``join`` at-the-moment, however it can be done using
the following code.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 04c499ff6797b..fa03d614ed42c 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2174,7 +2174,7 @@ Highlights include:
- SQL interfaces updated to use ``sqlalchemy``, see :ref:`here<whatsnew_0140.sql>`.
- Display interface changes, see :ref:`here<whatsnew_0140.display>`
- MultiIndexing using Slicers, see :ref:`here<whatsnew_0140.slicers>`.
-- Ability to join a singly-indexed DataFrame with a multi-indexed DataFrame, see :ref:`here <merging.join_on_mi>`
+- Ability to join a singly-indexed DataFrame with a MultiIndexed DataFrame, see :ref:`here <merging.join_on_mi>`
- More consistency in groupby results and more flexible groupby specifications, see :ref:`here<whatsnew_0140.groupby>`
- Holiday calendars are now supported in ``CustomBusinessDay``, see :ref:`here <timeseries.holiday>`
- Several improvements in plotting functions, including: hexbin, area and pie plots, see :ref:`here<whatsnew_0140.plotting>`.
@@ -2384,8 +2384,8 @@ Bug Fixes
- Bug in merging ``timedelta`` dtypes (:issue:`5695`)
- Bug in plotting.scatter_matrix function. Wrong alignment among diagonal
and off-diagonal plots, see (:issue:`5497`).
-- Regression in Series with a multi-index via ix (:issue:`6018`)
-- Bug in Series.xs with a multi-index (:issue:`6018`)
+- Regression in Series with a MultiIndex via ix (:issue:`6018`)
+- Bug in Series.xs with a MultiIndex (:issue:`6018`)
- Bug in Series construction of mixed type with datelike and an integer (which should result in
object type and not automatic conversion) (:issue:`6028`)
- Possible segfault when chained indexing with an object array under NumPy 1.7.1 (:issue:`6026`, :issue:`6056`)
@@ -2409,10 +2409,10 @@ Bug Fixes
- Fixed a bug in ``query``/``eval`` during lexicographic string comparisons (:issue:`6155`).
- Fixed a bug in ``query`` where the index of a single-element ``Series`` was
being thrown away (:issue:`6148`).
-- Bug in ``HDFStore`` on appending a dataframe with multi-indexed columns to
+- Bug in ``HDFStore`` on appending a dataframe with MultiIndexed columns to
an existing table (:issue:`6167`)
- Consistency with dtypes in setting an empty DataFrame (:issue:`6171`)
-- Bug in selecting on a multi-index ``HDFStore`` even in the presence of under
+- Bug in selecting on a MultiIndex ``HDFStore`` even in the presence of under
specified column spec (:issue:`6169`)
- Bug in ``nanops.var`` with ``ddof=1`` and 1 elements would sometimes return ``inf``
rather than ``nan`` on some platforms (:issue:`6136`)
@@ -2659,8 +2659,8 @@ API Changes
- the ``format`` keyword now replaces the ``table`` keyword; allowed values
are ``fixed(f)|table(t)`` the ``Storer`` format has been renamed to
``Fixed``
- - a column multi-index will be recreated properly (:issue:`4710`); raise on
- trying to use a multi-index with data_columns on the same axis
+ - a column MultiIndex will be recreated properly (:issue:`4710`); raise on
+ trying to use a MultiIndex with data_columns on the same axis
- ``select_as_coordinates`` will now return an ``Int64Index`` of the
resultant selection set
- support ``timedelta64[ns]`` as a serialization type (:issue:`3577`)
@@ -2932,7 +2932,7 @@ Bug Fixes
- A zero length series written in Fixed format not deserializing properly.
(:issue:`4708`)
- Fixed decoding perf issue on pyt3 (:issue:`5441`)
- - Validate levels in a multi-index before storing (:issue:`5527`)
+ - Validate levels in a MultiIndex before storing (:issue:`5527`)
- Correctly handle ``data_columns`` with a Panel (:issue:`5717`)
- Fixed bug in tslib.tz_convert(vals, tz1, tz2): it could raise IndexError
exception while trying to access trans[pos + 1] (:issue:`4496`)
@@ -2995,7 +2995,7 @@ Bug Fixes
alignment (:issue:`3777`)
- frozenset objects now raise in the ``Series`` constructor (:issue:`4482`,
:issue:`4480`)
-- Fixed issue with sorting a duplicate multi-index that has multiple dtypes
+- Fixed issue with sorting a duplicate MultiIndex that has multiple dtypes
(:issue:`4516`)
- Fixed bug in ``DataFrame.set_values`` which was causing name attributes to
be lost when expanding the index. (:issue:`3742`, :issue:`4039`)
@@ -3042,11 +3042,11 @@ Bug Fixes
(:issue:`4328`)
- Bug with Series indexing not raising an error when the right-hand-side has
an incorrect length (:issue:`2702`)
-- Bug in multi-indexing with a partial string selection as one part of a
+- Bug in MultiIndexing with a partial string selection as one part of a
MultIndex (:issue:`4758`)
- Bug with reindexing on the index with a non-unique index will now raise
``ValueError`` (:issue:`4746`)
-- Bug in setting with ``loc/ix`` a single indexer with a multi-index axis and
+- Bug in setting with ``loc/ix`` a single indexer with a MultiIndex axis and
a NumPy array, related to (:issue:`3777`)
- Bug in concatenation with duplicate columns across dtypes not merging with
axis=0 (:issue:`4771`, :issue:`4975`)
@@ -3117,7 +3117,7 @@ Bug Fixes
- Make sure series-series boolean comparisons are label based (:issue:`4947`)
- Bug in multi-level indexing with a Timestamp partial indexer
(:issue:`4294`)
-- Tests/fix for multi-index construction of an all-nan frame (:issue:`4078`)
+- Tests/fix for MultiIndex construction of an all-nan frame (:issue:`4078`)
- Fixed a bug where :func:`~pandas.read_html` wasn't correctly inferring
values of tables with commas (:issue:`5029`)
- Fixed a bug where :func:`~pandas.read_html` wasn't providing a stable
@@ -3174,7 +3174,7 @@ Bug Fixes
- Fixed segfault in C parser caused by passing more names than columns in
the file. (:issue:`5156`)
- Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`)
-- C and Python Parser can now handle the more common multi-index column
+- C and Python Parser can now handle the more common MultiIndex column
format which doesn't have a row for index names (:issue:`4702`)
- Bug when trying to use an out-of-bounds date as an object dtype
(:issue:`5312`)
@@ -3199,7 +3199,7 @@ Bug Fixes
- performance improvements in ``isnull`` on larger size pandas objects
- Fixed various setitem with 1d ndarray that does not have a matching
length to the indexer (:issue:`5508`)
-- Bug in getitem with a multi-index and ``iloc`` (:issue:`5528`)
+- Bug in getitem with a MultiIndex and ``iloc`` (:issue:`5528`)
- Bug in delitem on a Series (:issue:`5542`)
- Bug fix in apply when using custom function and objects are not mutated (:issue:`5545`)
- Bug in selecting from a non-unique index with ``loc`` (:issue:`5553`)
@@ -3208,7 +3208,7 @@ Bug Fixes
- Bug in repeated indexing of object with resultant non-unique index (:issue:`5678`)
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
-- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
+- Bug in MultiIndex selection in PY3 when using certain keys (:issue:`5725`)
- Row-wise concat of differing dtypes failing in certain cases (:issue:`5754`)
pandas 0.12.0
@@ -3229,14 +3229,14 @@ New Features
- Added module for reading and writing Stata files: pandas.io.stata (:issue:`1512`)
includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader
- Added support for writing in ``to_csv`` and reading in ``read_csv``,
- multi-index columns. The ``header`` option in ``read_csv`` now accepts a
+ MultiIndex columns. The ``header`` option in ``read_csv`` now accepts a
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatibility for the pre 0.12 behavior of
- writing and reading multi-index columns via a list of tuples. The default in
+ writing and reading MultiIndex columns via a list of tuples. The default in
0.12 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ MultiIndex column.
Note: The default value will change in 0.12 to make the default *to* write and
- read multi-index columns in the new format. (:issue:`3571`, :issue:`1651`, :issue:`3141`)
+ read MultiIndex columns in the new format. (:issue:`3571`, :issue:`1651`, :issue:`3141`)
- Add iterator to ``Series.str`` (:issue:`3638`)
- ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`).
- Added keyword parameters for different types of scatter_matrix subplots
@@ -3447,7 +3447,7 @@ Bug Fixes
- Fixed bug with ``Panel.transpose`` argument aliases (:issue:`3556`)
- Fixed platform bug in ``PeriodIndex.take`` (:issue:`3579`)
- Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (:issue:`3593`)
-- Fixed bug in reset_index with ``NaN`` in a multi-index (:issue:`3586`)
+- Fixed bug in reset_index with ``NaN`` in a MultiIndex (:issue:`3586`)
- ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
@@ -3480,7 +3480,7 @@ Bug Fixes
their first argument (:issue:`3702`)
- Fix file tokenization error with \r delimiter and quoted fields (:issue:`3453`)
- Groupby transform with item-by-item not upcasting correctly (:issue:`3740`)
-- Incorrectly read a HDFStore multi-index Frame with a column specification (:issue:`3748`)
+- Incorrectly read a HDFStore MultiIndex Frame with a column specification (:issue:`3748`)
- ``read_html`` now correctly skips tests (:issue:`3741`)
- PandasObjects raise TypeError when trying to hash (:issue:`3882`)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (:issue:`3481`)
@@ -3497,7 +3497,7 @@ Bug Fixes
- csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was
specified (:issue:`3967`), Python parser failing with ``chunksize=1``
- Fix index name not propagating when using ``shift``
-- Fixed dropna=False being ignored with multi-index stack (:issue:`3997`)
+- Fixed dropna=False being ignored with MultiIndex stack (:issue:`3997`)
- Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`)
- Fix ``Series.clip`` for datetime series. NA/NaN threshold values will now throw ValueError (:issue:`3996`)
- Fixed insertion issue into DataFrame, after rename (:issue:`4032`)
@@ -3521,7 +3521,7 @@ Bug Fixes
iterated over when regex=False (:issue:`4115`)
- Fixed bug in ``convert_objects(convert_numeric=True)`` where a mixed numeric and
object Series/Frame was not converting properly (:issue:`4119`)
-- Fixed bugs in multi-index selection with column multi-index and duplicates
+- Fixed bugs in MultiIndex selection with column MultiIndex and duplicates
(:issue:`4145`, :issue:`4146`)
- Fixed bug in the parsing of microseconds when using the ``format``
argument in ``to_datetime`` (:issue:`4152`)
@@ -3830,7 +3830,7 @@ Improvements to existing features
- ``HDFStore``
- - enables storing of multi-index dataframes (closes :issue:`1277`)
+ - enables storing of MultiIndex dataframes (closes :issue:`1277`)
- support data column indexing and selection, via ``data_columns`` keyword
in append
- support write chunking to reduce memory footprint, via ``chunksize``
diff --git a/doc/source/spelling_wordlist.txt b/doc/source/spelling_wordlist.txt
index 4c355a1b9c435..be93cdad083e9 100644
--- a/doc/source/spelling_wordlist.txt
+++ b/doc/source/spelling_wordlist.txt
@@ -8,6 +8,10 @@ ga
fe
reindexed
automagic
+closedness
+ae
+arbitrarly
+losslessly
Histogramming
histogramming
concat
diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt
index 3a269e53a2404..ec4ac17c80fd7 100644
--- a/doc/source/whatsnew/v0.10.0.txt
+++ b/doc/source/whatsnew/v0.10.0.txt
@@ -370,7 +370,7 @@ Updated PyTables Support
df1.get_dtype_counts()
- performance improvements on table writing
-- support for arbitrary indexed dimensions
+- support for arbitrarly indexed dimensions
- ``SparseSeries`` now has a ``density`` property (:issue:`2384`)
- enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument
to strip arbitrary characters (:issue:`2411`)
diff --git a/doc/source/whatsnew/v0.10.1.txt b/doc/source/whatsnew/v0.10.1.txt
index bb405c283ba24..f1a32440c6950 100644
--- a/doc/source/whatsnew/v0.10.1.txt
+++ b/doc/source/whatsnew/v0.10.1.txt
@@ -93,7 +93,7 @@ columns, this is equivalent to passing a
store.select('df',columns = ['A','B'])
-``HDFStore`` now serializes multi-index dataframes when appending tables.
+``HDFStore`` now serializes MultiIndex dataframes when appending tables.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.12.0.txt b/doc/source/whatsnew/v0.12.0.txt
index 69483b18a5490..f66f6c0f72d5d 100644
--- a/doc/source/whatsnew/v0.12.0.txt
+++ b/doc/source/whatsnew/v0.12.0.txt
@@ -7,7 +7,7 @@ This is a major release from 0.11.0 and includes several new features and
enhancements along with a large number of bug fixes.
Highlights include a consistent I/O API naming scheme, routines to read html,
-write multi-indexes to csv files, read & write STATA data files, read & write JSON format
+write MultiIndexes to csv files, read & write STATA data files, read & write JSON format
files, Python 3 support for ``HDFStore``, filtering of groupby expressions via ``filter``, and a
revamped ``replace`` routine that accepts regular expressions.
diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt
index 4408470c52feb..d4b7b09c054d6 100644
--- a/doc/source/whatsnew/v0.14.0.txt
+++ b/doc/source/whatsnew/v0.14.0.txt
@@ -13,7 +13,7 @@ users upgrade to this version.
- SQL interfaces updated to use ``sqlalchemy``, See :ref:`Here<whatsnew_0140.sql>`.
- Display interface changes, See :ref:`Here<whatsnew_0140.display>`
- MultiIndexing Using Slicers, See :ref:`Here<whatsnew_0140.slicers>`.
- - Ability to join a singly-indexed DataFrame with a multi-indexed DataFrame, see :ref:`Here <merging.join_on_mi>`
+ - Ability to join a singly-indexed DataFrame with a MultiIndexed DataFrame, see :ref:`Here <merging.join_on_mi>`
- More consistency in groupby results and more flexible groupby specifications, See :ref:`Here<whatsnew_0140.groupby>`
- Holiday calendars are now supported in ``CustomBusinessDay``, see :ref:`Here <timeseries.holiday>`
- Several improvements in plotting functions, including: hexbin, area and pie plots, see :ref:`Here<whatsnew_0140.plotting>`.
@@ -466,8 +466,8 @@ Some other enhancements to the sql functions include:
MultiIndexing Using Slicers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In 0.14.0 we added a new way to slice multi-indexed objects.
-You can slice a multi-index by providing multiple indexers.
+In 0.14.0 we added a new way to slice MultiIndexed objects.
+You can slice a MultiIndex by providing multiple indexers.
You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`,
including slices, lists of labels, labels, and boolean indexers.
@@ -519,7 +519,7 @@ See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :is
columns=columns).sort_index().sort_index(axis=1)
df
-Basic multi-index slicing using slices, lists, and labels.
+Basic MultiIndex slicing using slices, lists, and labels.
.. ipython:: python
@@ -748,9 +748,9 @@ Enhancements
- Add option to turn off escaping in ``DataFrame.to_latex`` (:issue:`6472`)
- ``pd.read_clipboard`` will, if the keyword ``sep`` is unspecified, try to detect data copied from a spreadsheet
and parse accordingly. (:issue:`6223`)
-- Joining a singly-indexed DataFrame with a multi-indexed DataFrame (:issue:`3662`)
+- Joining a singly-indexed DataFrame with a MultiIndexed DataFrame (:issue:`3662`)
- See :ref:`the docs<merging.join_on_mi>`. Joining multi-index DataFrames on both the left and right is not yet supported ATM.
+ See :ref:`the docs<merging.join_on_mi>`. Joining MultiIndex DataFrames on both the left and right is not yet supported ATM.
.. ipython:: python
@@ -781,7 +781,7 @@ Enhancements
noon, January 1, 4713 BC. Because nanoseconds are used to define the time
in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`)
- ``DataFrame.to_stata`` will now check data for compatibility with Stata data types
- and will upcast when needed. When it is not possible to lossless upcast, a warning
+ and will upcast when needed. When it is not possible to losslessly upcast, a warning
is issued (:issue:`6327`)
- ``DataFrame.to_stata`` and ``StataWriter`` will accept keyword arguments time_stamp
and data_label which allow the time stamp and dataset label to be set when creating a
@@ -852,7 +852,7 @@ Performance
- Performance improvement when converting ``DatetimeIndex`` to floating ordinals
using ``DatetimeConverter`` (:issue:`6636`)
- Performance improvement for ``DataFrame.shift`` (:issue:`5609`)
-- Performance improvement in indexing into a multi-indexed Series (:issue:`5567`)
+- Performance improvement in indexing into a MultiIndexed Series (:issue:`5567`)
- Performance improvements in single-dtyped indexing (:issue:`6484`)
- Improve performance of DataFrame construction with certain offsets, by removing faulty caching
(e.g. MonthEnd,BusinessMonthEnd), (:issue:`6479`)
@@ -896,7 +896,7 @@ Bug Fixes
- Issue with groupby ``agg`` with a single function and a a mixed-type frame (:issue:`6337`)
- Bug in ``DataFrame.replace()`` when passing a non- ``bool``
``to_replace`` argument (:issue:`6332`)
-- Raise when trying to align on different levels of a multi-index assignment (:issue:`3738`)
+- Raise when trying to align on different levels of a MultiIndex assignment (:issue:`3738`)
- Bug in setting complex dtypes via boolean indexing (:issue:`6345`)
- Bug in TimeGrouper/resample when presented with a non-monotonic DatetimeIndex that would return invalid results. (:issue:`4161`)
- Bug in index name propagation in TimeGrouper/resample (:issue:`4161`)
@@ -996,7 +996,7 @@ Bug Fixes
- accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`)
- Bug in C parser with leading white space (:issue:`3374`)
- Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines
-- Bug in python parser with explicit multi-index in row following column header (:issue:`6893`)
+- Bug in python parser with explicit MultiIndex in row following column header (:issue:`6893`)
- Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`)
- Bug in ``DataFrame.apply`` with functions that used ``*args`` or ``**kwargs`` and returned
an empty result (:issue:`6952`)
@@ -1043,7 +1043,7 @@ Bug Fixes
- Bug in ``query``/``eval`` where global constants were not looked up correctly
(:issue:`7178`)
- Bug in recognizing out-of-bounds positional list indexers with ``iloc`` and a multi-axis tuple indexer (:issue:`7189`)
-- Bug in setitem with a single value, multi-index and integer indices (:issue:`7190`, :issue:`7218`)
+- Bug in setitem with a single value, MultiIndex and integer indices (:issue:`7190`, :issue:`7218`)
- Bug in expressions evaluation with reversed ops, showing in series-dataframe ops (:issue:`7198`, :issue:`7192`)
-- Bug in multi-axis indexing with > 2 ndim and a multi-index (:issue:`7199`)
+- Bug in multi-axis indexing with > 2 ndim and a MultiIndex (:issue:`7199`)
- Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`)
diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt
index f7f69218e0ef5..5183dd24e9b34 100644
--- a/doc/source/whatsnew/v0.14.1.txt
+++ b/doc/source/whatsnew/v0.14.1.txt
@@ -156,7 +156,7 @@ Experimental
~~~~~~~~~~~~
- ``pandas.io.data.Options`` has a new method, ``get_all_data`` method, and now consistently returns a
- multi-indexed ``DataFrame`` (:issue:`5602`)
+ MultiIndexed ``DataFrame`` (:issue:`5602`)
- ``io.gbq.read_gbq`` and ``io.gbq.to_gbq`` were refactored to remove the
dependency on the Google ``bq.py`` command line client. This submodule
now uses ``httplib2`` and the Google ``apiclient`` and ``oauth2client`` API client
@@ -169,7 +169,7 @@ Experimental
Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.where`` with a symmetric shaped frame and a passed other of a DataFrame (:issue:`7506`)
-- Bug in Panel indexing with a multi-index axis (:issue:`7516`)
+- Bug in Panel indexing with a MultiIndex axis (:issue:`7516`)
- Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`)
- Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:)
- Bug in time ops with non-aligned Series (:issue:`7500`)
@@ -183,10 +183,10 @@ Bug Fixes
- Bug in plotting subplots with ``DataFrame.plot``, ``hist`` clears passed ``ax`` even if the number of subplots is one (:issue:`7391`).
- Bug in plotting subplots with ``DataFrame.boxplot`` with ``by`` kw raises ``ValueError`` if the number of subplots exceeds 1 (:issue:`7391`).
- Bug in subplots displays ``ticklabels`` and ``labels`` in different rule (:issue:`5897`)
-- Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`)
+- Bug in ``Panel.apply`` with a MultiIndex as an axis (:issue:`7469`)
- Bug in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`)
- Bug in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`)
-- Bug in multi-index slicing with datetimelike ranges (strings and Timestamps), (:issue:`7429`)
+- Bug in MultiIndex slicing with datetimelike ranges (strings and Timestamps), (:issue:`7429`)
- Bug in ``Index.min`` and ``max`` doesn't handle ``nan`` and ``NaT`` properly (:issue:`7261`)
- Bug in ``PeriodIndex.min/max`` results in ``int`` (:issue:`7609`)
- Bug in ``resample`` where ``fill_method`` was ignored if you passed ``how`` (:issue:`2073`)
@@ -221,8 +221,8 @@ Bug Fixes
- Bug where ``NDFrame.replace()`` didn't correctly replace objects with
``Period`` values (:issue:`7379`).
- Bug in ``.ix`` getitem should always return a Series (:issue:`7150`)
-- Bug in multi-index slicing with incomplete indexers (:issue:`7399`)
-- Bug in multi-index slicing with a step in a sliced level (:issue:`7400`)
+- Bug in MultiIndex slicing with incomplete indexers (:issue:`7399`)
+- Bug in MultiIndex slicing with a step in a sliced level (:issue:`7400`)
- Bug where negative indexers in ``DatetimeIndex`` were not correctly sliced
(:issue:`7408`)
- Bug where ``NaT`` wasn't repr'd correctly in a ``MultiIndex`` (:issue:`7406`,
diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt
index 94093b2cfb16c..79003296ac165 100644
--- a/doc/source/whatsnew/v0.15.0.txt
+++ b/doc/source/whatsnew/v0.15.0.txt
@@ -711,7 +711,7 @@ Other notable API changes:
2 7 NaN
3 11 NaN
- Furthermore, ``.loc`` will raise If no values are found in a multi-index with a list-like indexer:
+ Furthermore, ``.loc`` will raise If no values are found in a MultiIndex with a list-like indexer:
.. ipython:: python
:okexcept:
@@ -1114,9 +1114,9 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex`` in-place addition and subtraction cause different result from normal one (:issue:`6527`)
- Bug in adding and subtracting ``PeriodIndex`` with ``PeriodIndex`` raise ``TypeError`` (:issue:`7741`)
- Bug in ``combine_first`` with ``PeriodIndex`` data raises ``TypeError`` (:issue:`3367`)
-- Bug in multi-index slicing with missing indexers (:issue:`7866`)
-- Bug in multi-index slicing with various edge cases (:issue:`8132`)
-- Regression in multi-index indexing with a non-scalar type object (:issue:`7914`)
+- Bug in MultiIndex slicing with missing indexers (:issue:`7866`)
+- Bug in MultiIndex slicing with various edge cases (:issue:`8132`)
+- Regression in MultiIndex indexing with a non-scalar type object (:issue:`7914`)
- Bug in ``Timestamp`` comparisons with ``==`` and ``int64`` dtype (:issue:`8058`)
- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is referred internally (:issue:`7748`)
- Bug in ``Panel`` when using ``major_xs`` and ``copy=False`` is passed (deprecation warning fails because of missing ``warnings``) (:issue:`8152`).
@@ -1130,7 +1130,7 @@ Bug Fixes
- Bug in ``get`` where an ``IndexError`` would not cause the default value to be returned (:issue:`7725`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may reset nanosecond (:issue:`7697`)
- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may raise ``AttributeError`` if ``Timestamp`` has ``dateutil`` tzinfo (:issue:`7697`)
-- Bug in sorting a multi-index frame with a ``Float64Index`` (:issue:`8017`)
+- Bug in sorting a MultiIndex frame with a ``Float64Index`` (:issue:`8017`)
- Bug in inconsistent panel setitem with a rhs of a ``DataFrame`` for alignment (:issue:`7763`)
- Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`)
- Bug in 32-bit platforms with ``Series.shift`` (:issue:`8129`)
@@ -1212,7 +1212,7 @@ Bug Fixes
- Bug in ``NDFrame.loc`` indexing when row/column names were lost when target was a list/ndarray (:issue:`6552`)
- Regression in ``NDFrame.loc`` indexing when rows/columns were converted to Float64Index if target was an empty list/ndarray (:issue:`7774`)
- Bug in ``Series`` that allows it to be indexed by a ``DataFrame`` which has unexpected results. Such indexing is no longer permitted (:issue:`8444`)
-- Bug in item assignment of a ``DataFrame`` with multi-index columns where right-hand-side columns were not aligned (:issue:`7655`)
+- Bug in item assignment of a ``DataFrame`` with MultiIndex columns where right-hand-side columns were not aligned (:issue:`7655`)
- Suppress FutureWarning generated by NumPy when comparing object arrays containing NaN for equality (:issue:`7065`)
- Bug in ``DataFrame.eval()`` where the dtype of the ``not`` operator (``~``)
was not correctly inferred as ``bool``.
diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt
index 918eab3a9763e..8cbf239ea20d0 100644
--- a/doc/source/whatsnew/v0.15.1.txt
+++ b/doc/source/whatsnew/v0.15.1.txt
@@ -288,7 +288,7 @@ Bug Fixes
- Bug in Panel indexing with a list-like (:issue:`8710`)
- Compat issue is ``DataFrame.dtypes`` when ``options.mode.use_inf_as_null`` is True (:issue:`8722`)
- Bug in ``read_csv``, ``dialect`` parameter would not take a string (:issue:`8703`)
-- Bug in slicing a multi-index level with an empty-list (:issue:`8737`)
+- Bug in slicing a MultiIndex level with an empty-list (:issue:`8737`)
- Bug in numeric index operations of add/sub with Float/Index Index with numpy arrays (:issue:`8608`)
- Bug in setitem with empty indexer and unwanted coercion of dtypes (:issue:`8669`)
- Bug in ix/loc block splitting on setitem (manifests with integer-like dtypes, e.g. datetime64) (:issue:`8607`)
diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt
index 16a57676c89c0..ee72fab7d23f2 100644
--- a/doc/source/whatsnew/v0.15.2.txt
+++ b/doc/source/whatsnew/v0.15.2.txt
@@ -199,7 +199,7 @@ Bug Fixes
- Bug in ``groupby`` signatures that didn't include \*args or \*\*kwargs (:issue:`8733`).
- ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo and when it receives no data from Yahoo (:issue:`8761`), (:issue:`8783`).
- Unclear error message in csv parsing when passing dtype and names and the parsed data is a different data type (:issue:`8833`)
-- Bug in slicing a multi-index with an empty list and at least one boolean indexer (:issue:`8781`)
+- Bug in slicing a MultiIndex with an empty list and at least one boolean indexer (:issue:`8781`)
- ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo (:issue:`8761`).
- ``Timedelta`` kwargs may now be numpy ints and floats (:issue:`8757`).
- Fixed several outstanding bugs for ``Timedelta`` arithmetic and comparisons (:issue:`8813`, :issue:`5963`, :issue:`5436`).
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt
index 214a08ef0bbff..ce525bbb4c1d6 100644
--- a/doc/source/whatsnew/v0.16.0.txt
+++ b/doc/source/whatsnew/v0.16.0.txt
@@ -589,7 +589,7 @@ Bug Fixes
- Fixed bug on big endian platforms which produced incorrect results in ``StataReader`` (:issue:`8688`).
- Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`, :issue:`5873`)
- Bug in ``pivot`` and ``unstack`` where ``nan`` values would break index alignment (:issue:`4862`, :issue:`7401`, :issue:`7403`, :issue:`7405`, :issue:`7466`, :issue:`9497`)
-- Bug in left ``join`` on multi-index with ``sort=True`` or null values (:issue:`9210`).
+- Bug in left ``join`` on MultiIndex with ``sort=True`` or null values (:issue:`9210`).
- Bug in ``MultiIndex`` where inserting new keys would fail (:issue:`9250`).
- Bug in ``groupby`` when key space exceeds ``int64`` bounds (:issue:`9096`).
- Bug in ``unstack`` with ``TimedeltaIndex`` or ``DatetimeIndex`` and nulls (:issue:`9491`).
diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt
index e2da12fc94b58..d3a8064a0e786 100644
--- a/doc/source/whatsnew/v0.16.1.txt
+++ b/doc/source/whatsnew/v0.16.1.txt
@@ -133,7 +133,7 @@ groupby operations on the index will preserve the index nature as well
reindexing operations, will return a resulting index based on the type of the passed
indexer, meaning that passing a list will return a plain-old-``Index``; indexing with
a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories
-of the PASSED ``Categorical`` dtype. This allows one to arbitrary index these even with
+of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with
values NOT in the categories, similarly to how you can reindex ANY pandas index.
.. code-block:: ipython
@@ -455,7 +455,7 @@ Bug Fixes
- Bug where using DataFrames asfreq would remove the name of the index. (:issue:`9885`)
- Bug causing extra index point when resample BM/BQ (:issue:`9756`)
- Changed caching in ``AbstractHolidayCalendar`` to be at the instance level rather than at the class level as the latter can result in unexpected behaviour. (:issue:`9552`)
-- Fixed latex output for multi-indexed dataframes (:issue:`9778`)
+- Fixed latex output for MultiIndexed dataframes (:issue:`9778`)
- Bug causing an exception when setting an empty range using ``DataFrame.loc`` (:issue:`9596`)
- Bug in hiding ticklabels with subplots and shared axes when adding a new plot to an existing grid of axes (:issue:`9158`)
- Bug in ``transform`` and ``filter`` when grouping on a categorical variable (:issue:`9921`)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 1b98ebd0e19c5..404f2bf06e861 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -1069,7 +1069,7 @@ Bug Fixes
- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
- Bug in ``pd.rolling_*`` where ``Series.name`` would be lost in the output (:issue:`10565`)
- Bug in ``stack`` when index or columns are not unique. (:issue:`10417`)
-- Bug in setting a ``Panel`` when an axis has a multi-index (:issue:`10360`)
+- Bug in setting a ``Panel`` when an axis has a MultiIndex (:issue:`10360`)
- Bug in ``USFederalHolidayCalendar`` where ``USMemorialDay`` and ``USMartinLutherKingJr`` were incorrect (:issue:`10278` and :issue:`9760` )
- Bug in ``.sample()`` where returned object, if set, gives unnecessary ``SettingWithCopyWarning`` (:issue:`10738`)
- Bug in ``.sample()`` where weights passed as ``Series`` were not aligned along axis before being treated positionally, potentially causing problems if weight indices were not aligned with sampled object. (:issue:`10738`)
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 990f27950d982..c5ae0d147824c 100644
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -160,7 +160,7 @@ Bug Fixes
- Bug in ``HDFStore.append`` with strings whose encoded length exceeded the max unencoded length (:issue:`11234`)
- Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`)
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-- Bug in using ``DataFrame.ix`` with a multi-index indexer (:issue:`11372`)
+- Bug in using ``DataFrame.ix`` with a MultiIndex indexer (:issue:`11372`)
- Bug in ``date_range`` with ambiguous endpoints (:issue:`11626`)
- Prevent adding new attributes to the accessors ``.str``, ``.dt`` and ``.cat``. Retrieving such
a value was not possible, so error out on setting it. (:issue:`10673`)
@@ -189,7 +189,7 @@ Bug Fixes
- Bug in ``pandas.json`` when file to load is big (:issue:`11344`)
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
- Fixed a bug that prevented the construction of an empty series of dtype ``datetime64[ns, tz]`` (:issue:`11245`).
-- Bug in ``read_excel`` with multi-index containing integers (:issue:`11317`)
+- Bug in ``read_excel`` with MultiIndex containing integers (:issue:`11317`)
- Bug in ``to_excel`` with openpyxl 2.2+ and merging (:issue:`11408`)
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
- Bug in ``DataFrame.corr()`` raises exception when computes Kendall correlation for DataFrames with boolean and not boolean columns (:issue:`11560`)
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 8dc49dbc319a6..a3213136d998a 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -1257,7 +1257,7 @@ Bug Fixes
- Bug in ``read_sql`` with ``pymysql`` connections failing to return chunked data (:issue:`11522`)
- Bug in ``.to_csv`` ignoring formatting parameters ``decimal``, ``na_rep``, ``float_format`` for float indexes (:issue:`11553`)
- Bug in ``Int64Index`` and ``Float64Index`` preventing the use of the modulo operator (:issue:`9244`)
-- Bug in ``MultiIndex.drop`` for not lexsorted multi-indexes (:issue:`12078`)
+- Bug in ``MultiIndex.drop`` for not lexsorted MultiIndexes (:issue:`12078`)
- Bug in ``DataFrame`` when masking an empty ``DataFrame`` (:issue:`11859`)
@@ -1277,7 +1277,7 @@ Bug Fixes
- Bug in ``Series`` constructor with read-only data (:issue:`11502`)
- Removed ``pandas.util.testing.choice()``. Should use ``np.random.choice()``, instead. (:issue:`12386`)
- Bug in ``.loc`` setitem indexer preventing the use of a TZ-aware DatetimeIndex (:issue:`12050`)
-- Bug in ``.style`` indexes and multi-indexes not appearing (:issue:`11655`)
+- Bug in ``.style`` indexes and MultiIndexes not appearing (:issue:`11655`)
- Bug in ``to_msgpack`` and ``from_msgpack`` which did not correctly serialize or deserialize ``NaT`` (:issue:`12307`).
- Bug in ``.skew`` and ``.kurt`` due to roundoff error for highly similar values (:issue:`11974`)
- Bug in ``Timestamp`` constructor where microsecond resolution was lost if HHMMSS were not separated with ':' (:issue:`10041`)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index bd90e371597dc..2146b7b99a5a7 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -462,7 +462,7 @@ Selecting via a scalar value that is contained *in* the intervals.
Other Enhancements
^^^^^^^^^^^^^^^^^^
-- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closed. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`)
+- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closedness. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`)
- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
- ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`)
- ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 3a257c1ff9648..77ae5b92d0e70 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -935,7 +935,7 @@ Deprecations
- :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`).
- :func:`read_excel()` has deprecated ``parse_cols`` in favor of ``usecols`` for consistency with :func:`read_csv` (:issue:`4988`)
- :func:`read_csv()` has deprecated the ``tupleize_cols`` argument. Column tuples will always be converted to a ``MultiIndex`` (:issue:`17060`)
-- :meth:`DataFrame.to_csv` has deprecated the ``tupleize_cols`` argument. Multi-index columns will be always written as rows in the CSV file (:issue:`17060`)
+- :meth:`DataFrame.to_csv` has deprecated the ``tupleize_cols`` argument. MultiIndex columns will be always written as rows in the CSV file (:issue:`17060`)
- The ``convert`` parameter has been deprecated in the ``.take()`` method, as it was not being respected (:issue:`16948`)
- ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`).
- :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`).
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 2c147736d79a8..49e59c9ddf5a7 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -126,7 +126,7 @@ I/O
- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the ``numpy.bool_`` datatype (:issue:`18390`)
- Bug in :func:`read_json` not decoding when reading line delimited JSON from S3 (:issue:`17200`)
- Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`)
-- Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`)
+- Bug in :func:`to_latex` where repeated MultiIndex values were not printed even though a higher level index differed from the previous row (:issue:`14484`)
- Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`)
- Bug in :meth:`DataFrame.to_latex` with ``longtable=True`` where a latex multicolumn always spanned over three columns (:issue:`17959`)
| Hello,
before I start working on part 2 of the sphinx spelling extension I wanted to submit this PR in order to fix the double use of multi-index and MultiIndex. I have changed all `multi-index` mentions through the documentation to `MultiIndex`, this meant that some titles needed changing and new linking needed to be done. I have tested the documentation locally and everything seems to be working okay.
I have also reverted the changes to some adverbs that got changed previously (which shouldn't have according to @h-vetinari)
I hope that creating this PR before working on part 2 is okay. If you would rather me add this and the next one together let me know and I'll update the title and whatnot 😄 👍
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21382 | 2018-06-08T11:26:36Z | 2018-06-08T17:39:12Z | 2018-06-08T17:39:12Z | 2018-06-29T14:43:18Z |
TST : Adding new test case for pivot_table() with Categorical data | diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 3ec60d50f2792..e7a9f5551b2df 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -142,6 +142,30 @@ def test_pivot_table_dropna_categoricals(self, dropna):
tm.assert_frame_equal(result, expected)
+ def test_pivot_with_non_observable_dropna_civ(self, dropna):
+ # gh-21370
+ arr = [np.nan, 'low', 'high', 'low', np.nan]
+ df = pd.DataFrame({
+ "In": pd.Categorical(arr,
+ categories=['low', 'high'],
+ ordered=True),
+ "Col": ["A", "B", "C", "A", "B"],
+ "Val": range(1, 6)
+ })
+ result = df.pivot_table(index="In", columns="Col", values="Val",
+ dropna=dropna)
+ expected = pd.DataFrame({
+ 'A': [4.0, np.nan],
+ 'B': [2.0, np.nan],
+ 'C': [np.nan, 3.0]},
+ index=pd.Index(
+ pd.Categorical.from_codes(
+ [0, 1],
+ categories=['low', 'high'],
+ ordered=True),
+ name='In'))
+ tm.assert_frame_equal(result, expected)
+
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
| TST: add additional test cases for pivot_table with categorical data #21370
- [x] closes #21370
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added a new test case for checking ```index/column/value``` workability as per the patch offered by PR #21252
**Note** : the _civ means Column,Index and Value in the test name (wasn't being very creative here) | https://api.github.com/repos/pandas-dev/pandas/pulls/21381 | 2018-06-08T11:25:31Z | 2018-09-25T16:55:29Z | null | 2018-09-25T16:55:29Z |
MAINT: More friendly error msg on Index overflow | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index dff6b5421d5ab..bf1051332ee19 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -181,6 +181,9 @@ class Index(IndexOpsMixin, PandasObject):
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
+ If dtype is None, we find the dtype that best fits the data.
+ If an actual dtype is provided, we coerce to that dtype if it's safe.
+ Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
@@ -306,7 +309,14 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
- data = np.array(data, copy=copy, dtype=dtype)
+ try:
+ data = np.array(data, copy=copy, dtype=dtype)
+ except OverflowError:
+ # gh-15823: a more user-friendly error message
+ raise OverflowError(
+ "the elements provided in the data cannot "
+ "all be casted to the dtype {dtype}"
+ .format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f9f16dc0ce8b7..c264f5f79e47e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -474,6 +474,13 @@ def test_constructor_nonhashable_name(self, indices):
tm.assert_raises_regex(TypeError, message,
indices.set_names, names=renamed)
+ def test_constructor_overflow_int64(self):
+ # see gh-15832
+ msg = ("the elements provided in the data cannot "
+ "all be casted to the dtype int64")
+ with tm.assert_raises_regex(OverflowError, msg):
+ Index([np.iinfo(np.uint64).max - 1], dtype="int64")
+
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
| Display a more friendly error message when there is an `OverflowError` during `Index` construction.
Partially addresses #15832. | https://api.github.com/repos/pandas-dev/pandas/pulls/21377 | 2018-06-08T05:34:52Z | 2018-06-12T00:16:37Z | 2018-06-12T00:16:37Z | 2018-06-29T14:46:02Z |
PERF: Add __contains__ to CategoricalIndex | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 5464e7cba22c3..48f42621d183d 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -193,3 +193,16 @@ def time_categorical_series_is_monotonic_increasing(self):
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
+
+
+class Contains(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 10**5
+ self.ci = tm.makeCategoricalIndex(N)
+ self.cat = self.ci.categories[0]
+
+ def time_contains(self):
+ self.cat in self.ci
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index 3e4326dea2ecc..1ac6e21adc46d 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -24,7 +24,9 @@ Fixed Regressions
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
--
+- Improved performance of membership checks in :class:`CategoricalIndex`
+ (i.e. ``x in ci``-style checks are much faster). :meth:`CategoricalIndex.contains`
+ is likewise much faster (:issue:`21369`)
-
Documentation Changes
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 587090fa72def..7f2860a963423 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -325,19 +325,31 @@ def _reverse_indexer(self):
def __contains__(self, key):
hash(key)
- if self.categories._defer_to_indexing:
- return key in self.categories
+ if isna(key): # if key is a NaN, check if any NaN is in self.
+ return self.isna().any()
+
+ # is key in self.categories? Then get its location.
+ # If not (i.e. KeyError), it logically can't be in self either
+ try:
+ loc = self.categories.get_loc(key)
+ except KeyError:
+ return False
- return key in self.values
+ # loc is the location of key in self.categories, but also the value
+ # for key in self.codes and in self._engine. key may be in categories,
+ # but still not in self, check this. Example:
+ # 'b' in CategoricalIndex(['a'], categories=['a', 'b']) # False
+ if is_scalar(loc):
+ return loc in self._engine
+ else:
+ # if self.categories is IntervalIndex, loc is an array
+ # check if any scalar of the array is in self._engine
+ return any(loc_ in self._engine for loc_ in loc)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
-
- if self.categories._defer_to_indexing:
- return self.categories.contains(key)
-
- return key in self.values
+ return key in self
def __array__(self, dtype=None):
""" the array interface, return my values """
| - [x] progress towards #20395
- [x] xref #21022
- [ ] tests added / passed
- [x] benchmark added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Currently, membership checks in ``CategoricalIndex`` is very slow as explained in #21022. This PR fixes the issue for ``CategoricalIndex``, while #21022 contains the fix for ``Categorical``. The difference between the two cases is the use of ``_engine`` for ``CategoricalIndex``, which makes this even faster than the ``Catagorical`` solution in #21022.
Tests exist already and can be found in ``tests/indexes/test_category.py::TestCategoricalIndex::test_contains``.
ASV:
```
before after ratio
[0c65c57a] [986779ab]
- 2.49±0.2ms 3.26±0.2μs 0.00 categoricals.Contains.time_contains
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/21369 | 2018-06-07T21:38:54Z | 2018-06-14T10:38:24Z | 2018-06-14T10:38:24Z | 2018-07-02T23:25:25Z |
DOC: clean-up 0.23.1 whatsnew | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 048a429136f0c..09b711c80910c 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -10,19 +10,22 @@ and bug fixes. We recommend that all users upgrade to this version.
:local:
:backlinks: none
-.. _whatsnew_0231.enhancements:
-New features
-~~~~~~~~~~~~
+.. _whatsnew_0231.fixed_regressions:
+Fixed Regressions
+~~~~~~~~~~~~~~~~~
-.. _whatsnew_0231.deprecations:
-
-Deprecations
-~~~~~~~~~~~~
+- Fixed regression in the :attr:`DatetimeIndex.date` and :attr:`DatetimeIndex.time`
+ attributes in case of timezone-aware data: :attr:`DatetimeIndex.time` returned
+ a tz-aware time instead of tz-naive (:issue:`21267`) and :attr:`DatetimeIndex.date`
+ returned incorrect date when the input date has a non-UTC timezone (:issue:`21230`).
+- Fixed regression in :meth:`pandas.io.json.json_normalize` when called with ``None`` values
+ in nested levels in JSON (:issue:`21158`).
+- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
+- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
+- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
--
--
.. _whatsnew_0231.performance:
@@ -31,14 +34,7 @@ Performance Improvements
- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`)
- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`)
--
--
-
-Documentation Changes
-~~~~~~~~~~~~~~~~~~~~~
--
--
.. _whatsnew_0231.bug_fixes:
@@ -46,74 +42,39 @@ Bug Fixes
~~~~~~~~~
Groupby/Resample/Rolling
-^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
-Strings
-^^^^^^^
+Data-type specific
- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`)
-
-Timedelta
-^^^^^^^^^
- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`)
-
-Categorical
-^^^^^^^^^^^
-
-- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
-- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
+- Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
Sparse
-^^^^^^
- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`)
-Conversion
-^^^^^^^^^^
-
--
--
-
Indexing
-^^^^^^^^
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
-- Bug in :attr:`DatetimeIndex.date` where an incorrect date is returned when the input date has a non-UTC timezone (:issue:`21230`)
- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`)
- Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`)
-- Bug in :attr:`DatetimeIndex.time` where given a tz-aware Timestamp, a tz-aware Time is returned instead of tz-naive (:issue:`21267`)
--
I/O
-^^^
- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
-- Bug when :meth:`pandas.io.json.json_normalize` was called with ``None`` values in nested levels in JSON (:issue:`21158`)
-- Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
- Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`)
--
-
-Plotting
-^^^^^^^^
-
--
--
Reshaping
-^^^^^^^^^
- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
--
Other
-^^^^^
- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`)
-- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
| I added a "Fixed regressions" section and moved some over there. @TomAugspurger does that look ok to you? (then I'll maybe merge to the other PRs can add it there)
Also mad the bug fixes subsections into plain text to make it a bit less "heavy" (given the only few entries per section). | https://api.github.com/repos/pandas-dev/pandas/pulls/21368 | 2018-06-07T21:12:48Z | 2018-06-07T21:20:17Z | 2018-06-07T21:20:17Z | 2018-06-12T16:30:40Z |
DOC: move whatsnew file for #21116 (index droplevel) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index f2bc81eea186b..b3c1dbc86525d 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -15,8 +15,6 @@ and bug fixes. We recommend that all users upgrade to this version.
New features
~~~~~~~~~~~~
-- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`)
-
.. _whatsnew_0231.deprecations:
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 6cbc19cca99e1..78974a955b570 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -15,7 +15,8 @@ Other Enhancements
- :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`)
- :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether NaN/NaT values should be considered (:issue:`17534`)
- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
--
+- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`)
+
.. _whatsnew_0240.api_breaking:
| xref https://github.com/pandas-dev/pandas/pull/21116#issuecomment-395436923 | https://api.github.com/repos/pandas-dev/pandas/pulls/21367 | 2018-06-07T20:41:51Z | 2018-06-07T20:42:12Z | 2018-06-07T20:42:12Z | 2018-06-07T20:42:15Z |
REGR: NA-values in ctors with string dtype | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 5a1bcce9b5970..b766ee9f3dec1 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -10,7 +10,6 @@ and bug fixes. We recommend that all users upgrade to this version.
:local:
:backlinks: none
-
.. _whatsnew_0231.fixed_regressions:
Fixed Regressions
@@ -29,6 +28,7 @@ Fixed Regressions
- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
+- Fixed regression in constructors coercing NA values like ``None`` to strings when passing ``dtype=str`` (:issue:`21083`)
- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing
values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index a463f573c82e0..d5f399c7cd63d 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -159,3 +159,14 @@ def tz_aware_fixture(request):
Fixture for trying explicit timezones: {0}
"""
return request.param
+
+
+@pytest.fixture(params=[str, 'str', 'U'])
+def string_dtype(request):
+ """Parametrized fixture for string dtypes.
+
+ * str
+ * 'str'
+ * 'U'
+ """
+ return request.param
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e4ed6d544d42e..ebc7a13234a98 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1227,3 +1227,45 @@ def construct_1d_object_array_from_listlike(values):
result = np.empty(len(values), dtype='object')
result[:] = values
return result
+
+
+def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
+ """
+ Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
+
+ Parameters
+ ----------
+ values : Sequence
+ dtype : numpy.dtype, optional
+ copy : bool, default False
+ Note that copies may still be made with ``copy=False`` if casting
+ is required.
+
+ Returns
+ -------
+ arr : ndarray[dtype]
+
+ Examples
+ --------
+ >>> np.array([1.0, 2.0, None], dtype='str')
+ array(['1.0', '2.0', 'None'], dtype='<U4')
+
+ >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
+
+
+ """
+ subarr = np.array(values, dtype=dtype, copy=copy)
+
+ if dtype is not None and dtype.kind in ("U", "S"):
+ # GH-21083
+ # We can't just return np.array(subarr, dtype='str') since
+ # NumPy will convert the non-string objects into strings
+ # Including NA values. Se we have to go
+ # string -> object -> update NA, which requires an
+ # additional pass over the data.
+ na_values = isna(values)
+ subarr2 = subarr.astype(object)
+ subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
+ subarr = subarr2
+
+ return subarr
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2ba1f15044952..0450f28087f66 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -40,6 +40,7 @@
maybe_convert_platform,
maybe_cast_to_datetime, maybe_castable,
construct_1d_arraylike_from_scalar,
+ construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike)
from pandas.core.dtypes.missing import (
isna,
@@ -4074,7 +4075,8 @@ def _try_cast(arr, take_fast_path):
isinstance(subarr, np.ndarray))):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_type(subarr):
- subarr = np.array(subarr, dtype=dtype, copy=copy)
+ subarr = construct_1d_ndarray_preserving_na(subarr, dtype,
+ copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py
index 20cd8b43478d2..4a19682e2c558 100644
--- a/pandas/tests/dtypes/test_cast.py
+++ b/pandas/tests/dtypes/test_cast.py
@@ -23,6 +23,7 @@
maybe_convert_scalar,
find_common_type,
construct_1d_object_array_from_listlike,
+ construct_1d_ndarray_preserving_na,
construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
@@ -440,3 +441,15 @@ def test_cast_1d_arraylike_from_scalar_categorical(self):
tm.assert_categorical_equal(result, expected,
check_category_order=True,
check_dtype=True)
+
+
+@pytest.mark.parametrize('values, dtype, expected', [
+ ([1, 2, 3], None, np.array([1, 2, 3])),
+ (np.array([1, 2, 3]), None, np.array([1, 2, 3])),
+ (['1', '2', None], None, np.array(['1', '2', None])),
+ (['1', '2', None], np.dtype('str'), np.array(['1', '2', None])),
+ ([1, 2, None], np.dtype('str'), np.array(['1', '2', None])),
+])
+def test_construct_1d_ndarray_preserving_na(values, dtype, expected):
+ result = construct_1d_ndarray_preserving_na(values, dtype=dtype)
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 300e1acdea911..e7fb765128738 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -151,6 +151,17 @@ def test_constructor_complex_dtypes(self):
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
+ def test_constructor_dtype_str_na_values(self, string_dtype):
+ # https://github.com/pandas-dev/pandas/issues/21083
+ df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
+ result = df.isna()
+ expected = DataFrame({"A": [False, True]})
+ tm.assert_frame_equal(result, expected)
+ assert df.iloc[1, 0] is None
+
+ df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
+ assert np.isnan(df.iloc[1, 0])
+
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 4c9f8c2ea0980..1eeeec0be3b8b 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -794,22 +794,26 @@ def test_arg_for_errors_in_astype(self):
@pytest.mark.parametrize('input_vals', [
([1, 2]),
- ([1.0, 2.0, np.nan]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
- def test_constructor_list_str(self, input_vals):
+ def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
- for dtype in ['str', str, 'U']:
- result = DataFrame({'A': input_vals}, dtype=dtype)
- expected = DataFrame({'A': input_vals}).astype({'A': dtype})
- assert_frame_equal(result, expected)
+ result = DataFrame({'A': input_vals}, dtype=string_dtype)
+ expected = DataFrame({'A': input_vals}).astype({'A': string_dtype})
+ assert_frame_equal(result, expected)
+
+ def test_constructor_list_str_na(self, string_dtype):
+
+ result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
+ expected = DataFrame({"A": ['1.0', '2.0', None]}, dtype=object)
+ assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ(TestData):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 14ae1ef42865a..aba472f2ce8f9 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1829,7 +1829,7 @@ def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
- s = Series(data, dtype=str)
+ s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 7e59325c32ddc..906d2aacd5586 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -137,6 +137,17 @@ def test_constructor_no_data_index_order(self):
result = pd.Series(index=['b', 'a', 'c'])
assert result.index.tolist() == ['b', 'a', 'c']
+ def test_constructor_dtype_str_na_values(self, string_dtype):
+ # https://github.com/pandas-dev/pandas/issues/21083
+ ser = Series(['x', None], dtype=string_dtype)
+ result = ser.isna()
+ expected = Series([False, True])
+ tm.assert_series_equal(result, expected)
+ assert ser.iloc[1] is None
+
+ ser = Series(['x', np.nan], dtype=string_dtype)
+ assert np.isnan(ser.iloc[1])
+
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
@@ -164,22 +175,25 @@ def test_constructor_list_like(self):
@pytest.mark.parametrize('input_vals', [
([1, 2]),
- ([1.0, 2.0, np.nan]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
- def test_constructor_list_str(self, input_vals):
+ def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
+ result = Series(input_vals, dtype=string_dtype)
+ expected = Series(input_vals).astype(string_dtype)
+ assert_series_equal(result, expected)
- for dtype in ['str', str, 'U']:
- result = Series(input_vals, dtype=dtype)
- expected = Series(input_vals).astype(dtype)
- assert_series_equal(result, expected)
+ def test_constructor_list_str_na(self, string_dtype):
+ result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
+ expected = Series(['1.0', '2.0', np.nan], dtype=object)
+ assert_series_equal(result, expected)
+ assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
| ```python
In [1]: import pandas as pd
In [2]: pd.Series([1, 2, None], dtype='str')[2] # None
```
Closes #21083 | https://api.github.com/repos/pandas-dev/pandas/pulls/21366 | 2018-06-07T18:59:58Z | 2018-06-08T16:27:14Z | 2018-06-08T16:27:14Z | 2018-06-12T16:30:40Z |
Fix Issue #21336: Defeault behavior for resolution property in Timestamps | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index c636e73fbd6c2..c03065bc7e63d 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -75,4 +75,4 @@ Bug Fixes
**Other**
--
+- Timestamp resolution returns a :class:`Timedelta` (in nanoseconds) rather than a normal `timedelta` object (in microseconds) (:issue:`21336`)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 68c1839221508..e0c476870c37f 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -193,4 +193,3 @@ Other
- :meth: `~pandas.io.formats.style.Styler.background_gradient` now takes a ``text_color_threshold`` parameter to automatically lighten the text color based on the luminance of the background color. This improves readability with dark background colors without the need to limit the background colormap range. (:issue:`21258`)
-
-
--
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 87dc371195b5b..91c90263b0f43 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -795,7 +795,11 @@ cdef class _Timedelta(timedelta):
@property
def resolution(self):
- """ return a string representing the lowest resolution that we have """
+ """
+ Return a string representing the lowest resolution that we have.
+ Note that this is nonstandard behavior.
+ To retrieve a timedelta object use the resolution_timedelta property
+ """
self._ensure_components()
if self._ns:
@@ -813,6 +817,32 @@ cdef class _Timedelta(timedelta):
else:
return "D"
+ @property
+ def resolution_timedelta(self):
+ """
+ Return a timedelta object (rather than a string)
+ representing the lowest resolution we have.
+ to retrieve a string use the resolution property.
+ """
+
+ self._ensure_components()
+ if self._ns:
+ # At time of writing datetime.timedelta doesn't
+ # support nanoseconds as a keyword argument.
+ return timedelta(microseconds=0.1)
+ elif self._us:
+ return timedelta(microseconds=1)
+ elif self._ms:
+ return timedelta(milliseconds=1)
+ elif self._s:
+ return timedelta(seconds=1)
+ elif self._m:
+ return timedelta(minutes=1)
+ elif self._h:
+ return timedelta(hours=1)
+ else:
+ return timedelta(days=1)
+
@property
def nanoseconds(self):
"""
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index ba5ebdab82ddc..fcf9e8583c9fc 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -400,12 +400,17 @@ cdef class _Timestamp(datetime):
def asm8(self):
return np.datetime64(self.value, 'ns')
+ @property
+ def resolution(self):
+ """ Return resolution in a native pandas format. """
+ # GH 21336
+ return Timedelta(nanoseconds=1)
+
def timestamp(self):
"""Return POSIX timestamp as float."""
# py27 compat, see GH#17329
return round(self.value / 1e9, 6)
-
# ----------------------------------------------------------------------
# Python front end to C extension type _Timestamp
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 205fdf49d3e91..509de841b7730 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -588,3 +588,31 @@ def test_components(self):
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
+
+ def test_resolution(self):
+ # GH 21344
+ assert Timedelta(nanoseconds=30).resolution == 'N'
+ # Note that datetime.timedelta doesn't offer
+ # finer resolution than microseconds
+ assert Timedelta(nanoseconds=30).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
+
+ assert Timedelta(microseconds=30).resolution == 'U'
+ assert Timedelta(microseconds=30).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
+
+ assert Timedelta(milliseconds=30).resolution == 'L'
+ assert Timedelta(milliseconds=30).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
+
+ assert Timedelta(seconds=30).resolution == 'S'
+ assert Timedelta(seconds=30).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
+
+ assert Timedelta(minutes=30).resolution == 'T'
+ assert Timedelta(minutes=30).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
+
+ assert Timedelta(hours=2).resolution == 'H'
+ assert Timedelta(hours=2).resolution_timedelta.resolution == \
+ timedelta(0, 0, 1)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index ab87d98fca8eb..4681531ff55d5 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -172,6 +172,11 @@ def test_woy_boundary(self):
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
+ def test_resolution(self):
+ # GH 21336
+ dt = Timestamp('2100-01-01 00:00:00')
+ assert dt.resolution == Timedelta(nanoseconds=1)
+
class TestTimestampConstructors(object):
| - [x] closes #21336
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21365 | 2018-06-07T18:53:25Z | 2018-07-12T00:55:47Z | null | 2018-07-12T00:55:47Z |
Fix #21356: JSON nested_to_record Silently Drops Top-Level None Values | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 807b83fa2cf69..8ce2824d779f4 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -31,6 +31,7 @@ Fixed Regressions
- Fixed regression in constructors coercing NA values like ``None`` to strings when passing ``dtype=str`` (:issue:`21083`)
- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing
values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`)
+- Fixed Regression in :func:`nested_to_record` which now flattens list of dictionaries and doesnot drop keys with value as `None` (:issue:`21356`)
.. _whatsnew_0231.performance:
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 17393d458e746..b845a43b9ca9e 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -80,8 +80,6 @@ def nested_to_record(ds, prefix="", sep=".", level=0):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
- elif v is None: # pop the key if the value is None
- new_d.pop(k)
continue
else:
v = new_d.pop(k)
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index dc34ba81f679d..395c2c90767d3 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -238,15 +238,16 @@ def test_non_ascii_key(self):
tm.assert_frame_equal(result, expected)
def test_missing_field(self, author_missing_data):
- # GH20030: Checks for robustness of json_normalize - should
- # unnest records where only the first record has a None value
+ # GH20030:
result = json_normalize(author_missing_data)
ex_data = [
- {'author_name.first': np.nan,
+ {'info': np.nan,
+ 'author_name.first': np.nan,
'author_name.last_name': np.nan,
'info.created_at': np.nan,
'info.last_updated': np.nan},
- {'author_name.first': 'Jane',
+ {'info': None,
+ 'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
'info.created_at': '11/08/1993',
'info.last_updated': '26/05/2012'}
@@ -351,9 +352,8 @@ def test_json_normalize_errors(self):
errors='raise'
)
- def test_nonetype_dropping(self):
- # GH20030: Checks that None values are dropped in nested_to_record
- # to prevent additional columns of nans when passed to DataFrame
+ def test_donot_drop_nonevalues(self):
+ # GH21356
data = [
{'info': None,
'author_name':
@@ -367,7 +367,8 @@ def test_nonetype_dropping(self):
]
result = nested_to_record(data)
expected = [
- {'author_name.first': 'Smith',
+ {'info': None,
+ 'author_name.first': 'Smith',
'author_name.last_name': 'Appleseed'},
{'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
@@ -395,6 +396,7 @@ def test_nonetype_top_level_bottom_level(self):
}
result = nested_to_record(data)
expected = {
+ 'id': None,
'location.country.state.id': None,
'location.country.state.town.info.id': None,
'location.country.state.town.info.region': None,
@@ -423,6 +425,7 @@ def test_nonetype_multiple_levels(self):
}
result = nested_to_record(data)
expected = {
+ 'id': None,
'location.id': None,
'location.country.id': None,
'location.country.state.id': None,
| - [x] closes #21356
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21363 | 2018-06-07T18:25:30Z | 2018-06-08T16:50:21Z | 2018-06-08T16:50:20Z | 2018-06-12T16:30:39Z |
BUG: Fixed concat warning message | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 09b711c80910c..ead4fac14182d 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -74,6 +74,7 @@ I/O
Reshaping
- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
+- Bug in :func:`concat` warning message providing the wrong guidance for future behavior (:issue:`21101`)
Other
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index f9501cd2f9ddf..6f4fdfe5bf5cd 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -24,9 +24,9 @@
Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
-To accept the future behavior, pass 'sort=True'.
+To accept the future behavior, pass 'sort=False'.
-To retain the current behavior and silence the warning, pass sort=False
+To retain the current behavior and silence the warning, pass 'sort=True'.
""")
| - [x] closes #21101
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21362 | 2018-06-07T18:08:48Z | 2018-06-07T21:21:09Z | 2018-06-07T21:21:08Z | 2018-06-12T16:30:39Z |
Revert change to comparison op with datetime.date objects | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 5a1bcce9b5970..3cebcc56e9083 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -10,12 +10,52 @@ and bug fixes. We recommend that all users upgrade to this version.
:local:
:backlinks: none
-
.. _whatsnew_0231.fixed_regressions:
Fixed Regressions
~~~~~~~~~~~~~~~~~
+**Comparing Series with datetime.date**
+
+We've reverted a 0.23.0 change to comparing a :class:`Series` holding datetimes and a ``datetime.date`` object (:issue:`21152`).
+In pandas 0.22 and earlier, comparing a Series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comapring.
+This was inconsistent with Python, NumPy, and :class:`DatetimeIndex`, which never consider a datetime and ``datetime.date`` equal.
+
+In 0.23.0, we unified operations between DatetimeIndex and Series, and in the process changed comparisons between a Series of datetimes and ``datetime.date`` without warning.
+
+We've temporarily restored the 0.22.0 behavior, so datetimes and dates may again compare equal, but restore the 0.23.0 behavior in a future release.
+
+To summarize, here's the behavior in 0.22.0, 0.23.0, 0.23.1:
+
+.. code-block:: python
+
+ # 0.22.0... Silently coerce the datetime.date
+ >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1)
+ 0 True
+ 1 False
+ dtype: bool
+
+ # 0.23.0... Do not coerce the datetime.date
+ >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1)
+ 0 False
+ 1 False
+ dtype: bool
+
+ # 0.23.1... Coerce the datetime.date with a warning
+ >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1)
+ /bin/python:1: FutureWarning: Comparing Series of datetimes with 'datetime.date'. Currently, the
+ 'datetime.date' is coerced to a datetime. In the future pandas will
+ not coerce, and the values not compare equal to the 'datetime.date'.
+ To retain the current behavior, convert the 'datetime.date' to a
+ datetime with 'pd.Timestamp'.
+ #!/bin/python3
+ 0 True
+ 1 False
+ dtype: bool
+
+In addition, ordering comparisons will raise a ``TypeError`` in the future.
+
+**Other Fixes**
- Reverted the ability of :func:`~DataFrame.to_sql` to perform multivalue
inserts as this caused regression in certain cases (:issue:`21103`).
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index e14f82906cd06..540ebeee438f6 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -5,7 +5,10 @@
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
+import datetime
import operator
+import textwrap
+import warnings
import numpy as np
import pandas as pd
@@ -1197,8 +1200,35 @@ def wrapper(self, other, axis=None):
if is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
+ if (isinstance(other, datetime.date) and
+ not isinstance(other, datetime.datetime)):
+ # https://github.com/pandas-dev/pandas/issues/21152
+ # Compatibility for difference between Series comparison w/
+ # datetime and date
+ msg = (
+ "Comparing Series of datetimes with 'datetime.date'. "
+ "Currently, the 'datetime.date' is coerced to a "
+ "datetime. In the future pandas will not coerce, "
+ "and {future}. "
+ "To retain the current behavior, "
+ "convert the 'datetime.date' to a datetime with "
+ "'pd.Timestamp'."
+ )
+
+ if op in {operator.lt, operator.le, operator.gt, operator.ge}:
+ future = "a TypeError will be raised"
+ else:
+ future = (
+ "'the values will not compare equal to the "
+ "'datetime.date'"
+ )
+ msg = '\n'.join(textwrap.wrap(msg.format(future=future)))
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ other = pd.Timestamp(other)
+
res_values = dispatch_to_index_op(op, self, other,
pd.DatetimeIndex)
+
return self._constructor(res_values, index=self.index,
name=res_name)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index ec0d7296e540e..95836f046195a 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -88,6 +88,46 @@ def test_ser_cmp_result_names(self, names, op):
class TestTimestampSeriesComparison(object):
+ def test_dt64_ser_cmp_date_warning(self):
+ # https://github.com/pandas-dev/pandas/issues/21359
+ # Remove this test and enble invalid test below
+ ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
+ date = ser.iloc[0].to_pydatetime().date()
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser == date
+ expected = pd.Series([True] + [False] * 9, name='dates')
+ tm.assert_series_equal(result, expected)
+ assert "Comparing Series of datetimes " in str(m[0].message)
+ assert "will not compare equal" in str(m[0].message)
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser != date
+ tm.assert_series_equal(result, ~expected)
+ assert "will not compare equal" in str(m[0].message)
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser <= date
+ tm.assert_series_equal(result, expected)
+ assert "a TypeError will be raised" in str(m[0].message)
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser < date
+ tm.assert_series_equal(result, pd.Series([False] * 10, name='dates'))
+ assert "a TypeError will be raised" in str(m[0].message)
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser >= date
+ tm.assert_series_equal(result, pd.Series([True] * 10, name='dates'))
+ assert "a TypeError will be raised" in str(m[0].message)
+
+ with tm.assert_produces_warning(FutureWarning) as m:
+ result = ser > date
+ tm.assert_series_equal(result, pd.Series([False] + [True] * 9,
+ name='dates'))
+ assert "a TypeError will be raised" in str(m[0].message)
+
+ @pytest.mark.skip(reason="GH-21359")
def test_dt64ser_cmp_date_invalid(self):
# GH#19800 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
| - [x] closes #21152
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
cc @jbrockmendel, @innominate227
FYI, @jorisvandenbossche the whatsnew will conflict with your other PR | https://api.github.com/repos/pandas-dev/pandas/pulls/21361 | 2018-06-07T18:01:46Z | 2018-06-08T16:54:37Z | 2018-06-08T16:54:37Z | 2018-06-12T16:30:39Z |
Revert "enable multivalues insert (#19664)" | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 7bd56d52b3492..32129147ee281 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4719,12 +4719,6 @@ writes ``data`` to the database in batches of 1000 rows at a time:
data.to_sql('data_chunked', engine, chunksize=1000)
-.. note::
-
- The function :func:`~pandas.DataFrame.to_sql` will perform a multi-value
- insert if the engine dialect ``supports_multivalues_insert``. This will
- greatly speed up the insert in some cases.
-
SQL data types
++++++++++++++
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index ead4fac14182d..2b64ef32c1eb6 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -16,6 +16,10 @@ and bug fixes. We recommend that all users upgrade to this version.
Fixed Regressions
~~~~~~~~~~~~~~~~~
+
+- Reverted the ability of :func:`~DataFrame.to_sql` to perform multivalue
+ inserts as this caused regression in certain cases (:issue:`21103`).
+ In the future this will be made configurable.
- Fixed regression in the :attr:`DatetimeIndex.date` and :attr:`DatetimeIndex.time`
attributes in case of timezone-aware data: :attr:`DatetimeIndex.time` returned
a tz-aware time instead of tz-naive (:issue:`21267`) and :attr:`DatetimeIndex.date`
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index ccb8d2d99d734..a582d32741ae9 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -572,29 +572,8 @@ def create(self):
else:
self._execute_create()
- def insert_statement(self, data, conn):
- """
- Generate tuple of SQLAlchemy insert statement and any arguments
- to be executed by connection (via `_execute_insert`).
-
- Parameters
- ----------
- conn : SQLAlchemy connectable(engine/connection)
- Connection to recieve the data
- data : list of dict
- The data to be inserted
-
- Returns
- -------
- SQLAlchemy statement
- insert statement
- *, optional
- Additional parameters to be passed when executing insert statement
- """
- dialect = getattr(conn, 'dialect', None)
- if dialect and getattr(dialect, 'supports_multivalues_insert', False):
- return self.table.insert(data),
- return self.table.insert(), data
+ def insert_statement(self):
+ return self.table.insert()
def insert_data(self):
if self.index is not None:
@@ -633,9 +612,8 @@ def insert_data(self):
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
- """Insert data into this table with database connection"""
data = [{k: v for k, v in zip(keys, row)} for row in data_iter]
- conn.execute(*self.insert_statement(data, conn))
+ conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 4530cc9d2fba9..f3ab74d37a2bc 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1665,29 +1665,6 @@ class Temporary(Base):
tm.assert_frame_equal(df, expected)
- def test_insert_multivalues(self):
- # issues addressed
- # https://github.com/pandas-dev/pandas/issues/14315
- # https://github.com/pandas-dev/pandas/issues/8953
-
- db = sql.SQLDatabase(self.conn)
- df = DataFrame({'A': [1, 0, 0], 'B': [1.1, 0.2, 4.3]})
- table = sql.SQLTable("test_table", db, frame=df)
- data = [
- {'A': 1, 'B': 0.46},
- {'A': 0, 'B': -2.06}
- ]
- statement = table.insert_statement(data, conn=self.conn)[0]
-
- if self.supports_multivalues_insert:
- assert statement.parameters == data, (
- 'insert statement should be multivalues'
- )
- else:
- assert statement.parameters is None, (
- 'insert statement should not be multivalues'
- )
-
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
@@ -1702,7 +1679,6 @@ class _TestSQLiteAlchemy(object):
"""
flavor = 'sqlite'
- supports_multivalues_insert = True
@classmethod
def connect(cls):
@@ -1751,7 +1727,6 @@ class _TestMySQLAlchemy(object):
"""
flavor = 'mysql'
- supports_multivalues_insert = True
@classmethod
def connect(cls):
@@ -1821,7 +1796,6 @@ class _TestPostgreSQLAlchemy(object):
"""
flavor = 'postgresql'
- supports_multivalues_insert = True
@classmethod
def connect(cls):
| This reverts commit 7c7bd569ce8e0f117c618d068e3d2798134dbc73.
Reverts https://github.com/pandas-dev/pandas/pull/19664
Closes https://github.com/pandas-dev/pandas/issues/21103 | https://api.github.com/repos/pandas-dev/pandas/pulls/21355 | 2018-06-07T14:13:38Z | 2018-06-07T21:25:38Z | 2018-06-07T21:25:38Z | 2018-06-12T16:30:38Z |
Fix typo in error message in the PlanePlot class | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 87b7d13251f28..d1a2121597dd6 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -811,7 +811,7 @@ class PlanePlot(MPLPlot):
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
- raise ValueError(self._kind + ' requires and x and y column')
+ raise ValueError(self._kind + ' requires an x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
| - [x] closes #21347
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Very simple fix, just a typo in an error message. | https://api.github.com/repos/pandas-dev/pandas/pulls/21350 | 2018-06-07T07:41:06Z | 2018-06-07T11:23:33Z | 2018-06-07T11:23:33Z | 2018-06-12T16:30:38Z |
Support for use of Enums in MultiIndex | diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml
index 5a9e206ec2c69..03f47186f781a 100644
--- a/ci/deps/travis-27.yaml
+++ b/ci/deps/travis-27.yaml
@@ -41,6 +41,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
+ - enum34
- moto==1.3.4
- hypothesis>=3.58.0
- pip:
diff --git a/pandas/tests/indexes/multi/test_enum.py b/pandas/tests/indexes/multi/test_enum.py
new file mode 100644
index 0000000000000..a13dd465ca813
--- /dev/null
+++ b/pandas/tests/indexes/multi/test_enum.py
@@ -0,0 +1,25 @@
+import pandas as pd
+
+import pandas.util.testing as tm
+import pandas.util._test_decorators as td
+
+
+@td.skip_if_no('enum')
+def test_enum_in_multiindex():
+ # GH 21298
+ # Allow use of Enums as one of the factors in a MultiIndex.
+ from enum import Enum
+ MyEnum = Enum("MyEnum", "A B")
+ df = pd.DataFrame(columns=pd.MultiIndex.from_product(iterables=[
+ MyEnum,
+ [1, 2]
+ ]))
+
+ exp_index_0 = pd.Index([MyEnum.A, MyEnum.B], dtype='object')
+ tm.assert_index_equal(df.columns.levels[0], exp_index_0)
+
+ expected = df.copy()
+ df = df.append({(MyEnum.A, 1): "abc", (MyEnum.B, 2): "xyz"},
+ ignore_index=True)
+ expected.loc[0, [(MyEnum.A, 1), (MyEnum.B, 2)]] = 'abc', 'xyz'
+ tm.assert_frame_equal(df, expected)
| - [x] closes #21298
- [x] tests added / passed: added `test_use_enum_in_multiindex` in `tests/indexes/test_multi.py`, see diff
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The boolean flag in https://github.com/pandas-dev/pandas/blob/9f95f7dbffef7752175ca9ed918314cb6f0b9b18/pandas/core/arrays/categorical.py#L2526-L2528 incorrectly caused a `TypeError` to be raised in
https://github.com/pandas-dev/pandas/blob/9f95f7dbffef7752175ca9ed918314cb6f0b9b18/pandas/core/arrays/categorical.py#L352-L354
when an Enum was used in a MultiIndex. No tests fail when the flag is flipped back. This is fixed in this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/21348 | 2018-06-07T00:49:51Z | 2018-11-26T03:51:55Z | null | 2022-05-12T01:23:23Z |
PERF: Add __contains__ to CategoricalIndex | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 5464e7cba22c3..48f42621d183d 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -193,3 +193,16 @@ def time_categorical_series_is_monotonic_increasing(self):
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
+
+
+class Contains(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 10**5
+ self.ci = tm.makeCategoricalIndex(N)
+ self.cat = self.ci.categories[0]
+
+ def time_contains(self):
+ self.cat in self.ci
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 9c29c34adb7dd..e23db591181fa 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -33,6 +33,8 @@ Performance Improvements
- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`)
- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`)
+- Improved performance of membership checks in :class:`CategoricalIndex`
+ (i.e. ``x in ci``-style checks are much faster). :meth:`CategoricalIndex.contains` is likewise much faster (:issue:`21107`)
-
-
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 150eca32e229d..9643d8c3ddc26 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -328,16 +328,18 @@ def __contains__(self, key):
if self.categories._defer_to_indexing:
return key in self.categories
- return key in self.values
+ try:
+ code_value = self.categories.get_loc(key)
+ except KeyError:
+ if isna(key):
+ code_value = -1
+ else:
+ return False
+ return code_value in self._engine
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
- hash(key)
-
- if self.categories._defer_to_indexing:
- return self.categories.contains(key)
-
- return key in self.values
+ return key in self
def __array__(self, dtype=None):
""" the array interface, return my values """
| - [x] progress towards #20395
- [x] xref #21022
- [ ] tests added / passed
- [x] benchmark added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Currently, membership checks in ``CategoricalIndex`` is very slow as explained in #21022. This PR fixes the issue for ``CategoricalIndex``, while #21022 contains the fix for ``Categorical``. The difference between the two cases is the use of ``_engine`` for ``CategoricalIndex``, which makes this even faster than the ``Catagorical`` solution in #21022.
Tests exist already and can be found in ``tests/indexes/test_category.py::TestCategoricalIndex::test_contains``.
ASV:
```
before after ratio
[0c65c57a] [986779ab]
- 2.49±0.2ms 3.26±0.2μs 0.00 categoricals.Contains.time_contains
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/21342 | 2018-06-06T18:48:52Z | 2018-06-07T21:28:07Z | null | 2018-06-07T21:28:07Z |
make DateOffset immutable | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index a9c49b7476fa6..fd34424dedc52 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -67,6 +67,7 @@ Datetimelike API Changes
^^^^^^^^^^^^^^^^^^^^^^^^
- For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with non-``None`` ``freq`` attribute, addition or subtraction of integer-dtyped array or ``Index`` will return an object of the same class (:issue:`19959`)
+- :class:`DateOffset` objects are now immutable. Attempting to alter one of these will now raise ``AttributeError`` (:issue:`21341`)
.. _whatsnew_0240.api.other:
@@ -176,7 +177,6 @@ Timezones
Offsets
^^^^^^^
--
-
-
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 3ca9bb307da9c..a9ef9166e4d33 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -304,6 +304,15 @@ class _BaseOffset(object):
_day_opt = None
_attributes = frozenset(['n', 'normalize'])
+ def __init__(self, n=1, normalize=False):
+ n = self._validate_n(n)
+ object.__setattr__(self, "n", n)
+ object.__setattr__(self, "normalize", normalize)
+ object.__setattr__(self, "_cache", {})
+
+ def __setattr__(self, name, value):
+ raise AttributeError("DateOffset objects are immutable.")
+
@property
def kwds(self):
# for backwards-compatibility
@@ -395,13 +404,14 @@ class _BaseOffset(object):
kwds = {key: odict[key] for key in odict if odict[key]}
state.update(kwds)
- self.__dict__ = state
+ self.__dict__.update(state)
+
if 'weekmask' in state and 'holidays' in state:
calendar, holidays = _get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
- self.calendar = calendar
- self.holidays = holidays
+ object.__setattr__(self, "calendar", calendar)
+ object.__setattr__(self, "holidays", holidays)
def __getstate__(self):
"""Return a pickleable state"""
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 5dd2a199405bf..66cb9baeb9357 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -234,6 +234,14 @@ class TestCommon(Base):
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
+ def test_immutable(self, offset_types):
+ # GH#21341 check that __setattr__ raises
+ offset = self._get_offset(offset_types)
+ with pytest.raises(AttributeError):
+ offset.normalize = True
+ with pytest.raises(AttributeError):
+ offset.n = 91
+
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index ffa2c0a5e3211..da8fdb4d79e34 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -23,7 +23,6 @@
ApplyTypeError,
as_datetime, _is_normalized,
_get_calendar, _to_dt64,
- _determine_offset,
apply_index_wraps,
roll_yearday,
shift_month,
@@ -192,11 +191,14 @@ def __add__(date):
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ BaseOffset.__init__(self, n, normalize)
- self._offset, self._use_relativedelta = _determine_offset(kwds)
- self.__dict__.update(kwds)
+ off, use_rd = liboffsets._determine_offset(kwds)
+ object.__setattr__(self, "_offset", off)
+ object.__setattr__(self, "_use_relativedelta", use_rd)
+ for key in kwds:
+ val = kwds[key]
+ object.__setattr__(self, key, val)
@apply_wraps
def apply(self, other):
@@ -446,9 +448,9 @@ def __init__(self, weekmask, holidays, calendar):
# following two attributes. See DateOffset._params()
# holidays, weekmask
- self.weekmask = weekmask
- self.holidays = holidays
- self.calendar = calendar
+ object.__setattr__(self, "weekmask", weekmask)
+ object.__setattr__(self, "holidays", holidays)
+ object.__setattr__(self, "calendar", calendar)
class BusinessMixin(object):
@@ -480,9 +482,8 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset):
_attributes = frozenset(['n', 'normalize', 'offset'])
def __init__(self, n=1, normalize=False, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "_offset", offset)
def _offset_str(self):
def get_str(td):
@@ -578,9 +579,11 @@ class BusinessHourMixin(BusinessMixin):
def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
- self.start = liboffsets._validate_business_time(start)
- self.end = liboffsets._validate_business_time(end)
- self._offset = offset
+ start = liboffsets._validate_business_time(start)
+ object.__setattr__(self, "start", start)
+ end = liboffsets._validate_business_time(end)
+ object.__setattr__(self, "end", end)
+ object.__setattr__(self, "_offset", offset)
@cache_readonly
def next_bday(self):
@@ -807,8 +810,7 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ BaseOffset.__init__(self, n, normalize)
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
@@ -837,9 +839,8 @@ class CustomBusinessDay(_CustomMixin, BusinessDay):
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@@ -898,9 +899,8 @@ class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None,
start='09:00', end='17:00', offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
@@ -914,9 +914,7 @@ class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
- def __init__(self, n=1, normalize=False):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ __init__ = BaseOffset.__init__
@property
def name(self):
@@ -995,9 +993,8 @@ class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@@ -1074,18 +1071,18 @@ class SemiMonthOffset(DateOffset):
_attributes = frozenset(['n', 'normalize', 'day_of_month'])
def __init__(self, n=1, normalize=False, day_of_month=None):
+ BaseOffset.__init__(self, n, normalize)
+
if day_of_month is None:
- self.day_of_month = self._default_day_of_month
+ object.__setattr__(self, "day_of_month",
+ self._default_day_of_month)
else:
- self.day_of_month = int(day_of_month)
+ object.__setattr__(self, "day_of_month", int(day_of_month))
if not self._min_day_of_month <= self.day_of_month <= 27:
msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}'
raise ValueError(msg.format(min=self._min_day_of_month,
day=self.day_of_month))
- self.n = self._validate_n(n)
- self.normalize = normalize
-
@classmethod
def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@@ -1291,9 +1288,8 @@ class Week(DateOffset):
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=None):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self.weekday = weekday
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "weekday", weekday)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
@@ -1421,10 +1417,9 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
_attributes = frozenset(['n', 'normalize', 'week', 'weekday'])
def __init__(self, n=1, normalize=False, week=0, weekday=0):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self.weekday = weekday
- self.week = week
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "weekday", weekday)
+ object.__setattr__(self, "week", week)
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
@@ -1493,9 +1488,8 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=0):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self.weekday = weekday
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "weekday", weekday)
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -1553,11 +1547,11 @@ class QuarterOffset(DateOffset):
# startingMonth vs month attr names are resolved
def __init__(self, n=1, normalize=False, startingMonth=None):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ BaseOffset.__init__(self, n, normalize)
+
if startingMonth is None:
startingMonth = self._default_startingMonth
- self.startingMonth = startingMonth
+ object.__setattr__(self, "startingMonth", startingMonth)
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@@ -1679,11 +1673,10 @@ def onOffset(self, dt):
return dt.month == self.month and dt.day == self._get_offset_day(dt)
def __init__(self, n=1, normalize=False, month=None):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ BaseOffset.__init__(self, n, normalize)
month = month if month is not None else self._default_month
- self.month = month
+ object.__setattr__(self, "month", month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
@@ -1776,12 +1769,11 @@ class FY5253(DateOffset):
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
variation="nearest"):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self.startingMonth = startingMonth
- self.weekday = weekday
+ BaseOffset.__init__(self, n, normalize)
+ object.__setattr__(self, "startingMonth", startingMonth)
+ object.__setattr__(self, "weekday", weekday)
- self.variation = variation
+ object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -1976,13 +1968,12 @@ class FY5253Quarter(DateOffset):
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
qtr_with_extra_week=1, variation="nearest"):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ BaseOffset.__init__(self, n, normalize)
- self.weekday = weekday
- self.startingMonth = startingMonth
- self.qtr_with_extra_week = qtr_with_extra_week
- self.variation = variation
+ object.__setattr__(self, "startingMonth", startingMonth)
+ object.__setattr__(self, "weekday", weekday)
+ object.__setattr__(self, "qtr_with_extra_week", qtr_with_extra_week)
+ object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -2129,9 +2120,7 @@ class Easter(DateOffset):
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
- def __init__(self, n=1, normalize=False):
- self.n = self._validate_n(n)
- self.normalize = normalize
+ __init__ = BaseOffset.__init__
@apply_wraps
def apply(self, other):
@@ -2177,11 +2166,10 @@ class Tick(SingleConstructorOffset):
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
- self.n = self._validate_n(n)
+ BaseOffset.__init__(self, n, normalize)
if normalize:
raise ValueError("Tick offset with `normalize=True` are not "
"allowed.") # GH#21427
- self.normalize = normalize
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
| Returning to the long-standing goal of making `DateOffset`s immutable (recall: `DateOffset.__eq__` calls `DateOffset._params` which is _very_ slow. `_params` can't be cached ATM because `DateOffset` is mutable`)
Earlier attempts in this direction tried to make the base class a `cdef class`, but that has run into `pickle` problems that I haven't been able to sort out so far. This PR goes the patch-`__setattr__` route instead.
Note: this PR does _not_ implement the caching that is the underlying goal.
I'm likely to make some other PRs in this area, will try to keep them orthogonal. | https://api.github.com/repos/pandas-dev/pandas/pulls/21341 | 2018-06-06T16:10:10Z | 2018-06-21T10:24:21Z | 2018-06-21T10:24:21Z | 2018-06-22T03:27:26Z |
HDFStore.walk() to iterate on groups | diff --git a/doc/source/api.rst b/doc/source/api.rst
index f2c00d5d12031..8dc5d0e9fc023 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -100,6 +100,7 @@ HDFStore: PyTables (HDF5)
HDFStore.select
HDFStore.info
HDFStore.keys
+ HDFStore.walk
Feather
~~~~~~~
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ae6c4f12f04f7..cf845c176b4c7 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3554,6 +3554,25 @@ everything in the sub-store and **below**, so be *careful*.
store.remove('food')
store
+
+You can walk through the group hierarchy using the ``walk`` method which
+will yield a tuple for each group key along with the relative keys of its contents.
+
+.. versionadded:: 0.24.0
+
+
+.. ipython:: python
+
+ for (path, subgroups, subkeys) in store.walk():
+ for subgroup in subgroups:
+ print('GROUP: {}/{}'.format(path, subgroup))
+ for subkey in subkeys:
+ key = '/'.join([path, subkey])
+ print('KEY: {}'.format(key))
+ print(store.get(key))
+
+
+
.. warning::
Hierarchical keys cannot be retrieved as dotted (attribute) access as described above for items stored under the root node.
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 72e7373d0dd33..b79d886757f99 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -19,6 +19,8 @@ Other Enhancements
- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with :class:`MultiIndex` (:issue:`21115`)
- Added support for reading from Google Cloud Storage via the ``gcsfs`` library (:issue:`19454`)
+- New method :meth:`HDFStore.walk` will recursively walk the group hierarchy of an HDF5 file (:issue:`10932`)
+-
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 580c7923017e5..f93ad425b2c6a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1106,6 +1106,53 @@ def groups(self):
g._v_name != u('table'))))
]
+ def walk(self, where="/"):
+ """ Walk the pytables group hierarchy for pandas objects
+
+ This generator will yield the group path, subgroups and pandas object
+ names for each group.
+ Any non-pandas PyTables objects that are not a group will be ignored.
+
+ The `where` group itself is listed first (preorder), then each of its
+ child groups (following an alphanumerical order) is also traversed,
+ following the same procedure.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ where : str, optional
+ Group where to start walking.
+ If not supplied, the root group is used.
+
+ Yields
+ ------
+ path : str
+ Full path to a group (without trailing '/')
+ groups : list of str
+ names of the groups contained in `path`
+ leaves : list of str
+ names of the pandas objects contained in `path`
+
+ """
+ _tables()
+ self._check_if_open()
+ for g in self._handle.walk_groups(where):
+ if getattr(g._v_attrs, 'pandas_type', None) is not None:
+ continue
+
+ groups = []
+ leaves = []
+ for child in g._v_children.values():
+ pandas_type = getattr(child._v_attrs, 'pandas_type', None)
+ if pandas_type is None:
+ if isinstance(child, _table_mod.group.Group):
+ groups.append(child._v_name)
+ else:
+ leaves.append(child._v_name)
+
+ yield (g._v_pathname.rstrip('/'), groups, leaves)
+
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index b95df3840b6c5..29063b64221c1 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -635,6 +635,57 @@ def test_get(self):
pytest.raises(KeyError, store.get, 'b')
+ @pytest.mark.parametrize('where, expected', [
+ ('/', {
+ '': ({'first_group', 'second_group'}, set()),
+ '/first_group': (set(), {'df1', 'df2'}),
+ '/second_group': ({'third_group'}, {'df3', 's1'}),
+ '/second_group/third_group': (set(), {'df4'}),
+ }),
+ ('/second_group', {
+ '/second_group': ({'third_group'}, {'df3', 's1'}),
+ '/second_group/third_group': (set(), {'df4'}),
+ })
+ ])
+ def test_walk(self, where, expected):
+ # GH10143
+ objs = {
+ 'df1': pd.DataFrame([1, 2, 3]),
+ 'df2': pd.DataFrame([4, 5, 6]),
+ 'df3': pd.DataFrame([6, 7, 8]),
+ 'df4': pd.DataFrame([9, 10, 11]),
+ 's1': pd.Series([10, 9, 8]),
+ # Next 3 items aren't pandas objects and should be ignored
+ 'a1': np.array([[1, 2, 3], [4, 5, 6]]),
+ 'tb1': np.array([(1, 2, 3), (4, 5, 6)], dtype='i,i,i'),
+ 'tb2': np.array([(7, 8, 9), (10, 11, 12)], dtype='i,i,i')
+ }
+
+ with ensure_clean_store('walk_groups.hdf', mode='w') as store:
+ store.put('/first_group/df1', objs['df1'])
+ store.put('/first_group/df2', objs['df2'])
+ store.put('/second_group/df3', objs['df3'])
+ store.put('/second_group/s1', objs['s1'])
+ store.put('/second_group/third_group/df4', objs['df4'])
+ # Create non-pandas objects
+ store._handle.create_array('/first_group', 'a1', objs['a1'])
+ store._handle.create_table('/first_group', 'tb1', obj=objs['tb1'])
+ store._handle.create_table('/second_group', 'tb2', obj=objs['tb2'])
+
+ assert len(list(store.walk(where=where))) == len(expected)
+ for path, groups, leaves in store.walk(where=where):
+ assert path in expected
+ expected_groups, expected_frames = expected[path]
+ assert expected_groups == set(groups)
+ assert expected_frames == set(leaves)
+ for leaf in leaves:
+ frame_path = '/'.join([path, leaf])
+ obj = store.get(frame_path)
+ if 'df' in leaf:
+ tm.assert_frame_equal(obj, objs[leaf])
+ else:
+ tm.assert_series_equal(obj, objs[leaf])
+
def test_getattr(self):
with ensure_clean_store(self.path) as store:
| This PR adds a walk() method to HDFStore in order to iterate on groups.
This is a revival of the initial PR #10932, rebased on upstream/master and updated tests.
- [x] closes #10143
- [x] tests added / passed: test_pytables.py:testwalk()
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21339 | 2018-06-06T09:27:22Z | 2018-06-26T22:53:32Z | null | 2018-06-27T08:11:24Z |
Series.describe returns first and last for tz-aware datetimes | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 691345ad26e58..c609cb04db028 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -316,6 +316,7 @@ Timezones
- Bug in :class:`Series` constructor which would coerce tz-aware and tz-naive :class:`Timestamp`s to tz-aware (:issue:`13051`)
- Bug in :class:`Index` with ``datetime64[ns, tz]`` dtype that did not localize integer data correctly (:issue:`20964`)
- Bug in :class:`DatetimeIndex` where constructing with an integer and tz would not localize correctly (:issue:`12619`)
+- Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`)
Offsets
^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 818dd1b408518..65ca467a05840 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -20,7 +20,7 @@
is_bool_dtype,
is_categorical_dtype,
is_numeric_dtype,
- is_datetime64_dtype,
+ is_datetime64_any_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_list_like,
@@ -8531,12 +8531,13 @@ def describe_categorical_1d(data):
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
- if is_datetime64_dtype(data):
+ if is_datetime64_any_dtype(data):
+ tz = data.dt.tz
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
- result += [tslib.Timestamp(top), freq,
- tslib.Timestamp(asint.min()),
- tslib.Timestamp(asint.max())]
+ result += [tslib.Timestamp(top, tz=tz), freq,
+ tslib.Timestamp(asint.min(), tz=tz),
+ tslib.Timestamp(asint.max(), tz=tz)]
else:
names += ['top', 'freq']
result += [top, freq]
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index d357208813dd8..c0e9b89c1877f 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -417,6 +417,28 @@ def test_describe_timedelta_values(self):
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
+ def test_describe_tz_values(self, tz_naive_fixture):
+ # GH 21332
+ tz = tz_naive_fixture
+ s1 = Series(range(5))
+ start = Timestamp(2018, 1, 1)
+ end = Timestamp(2018, 1, 5)
+ s2 = Series(date_range(start, end, tz=tz))
+ df = pd.DataFrame({'s1': s1, 's2': s2})
+
+ expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
+ 2, 1.581139, 0, 1, 2, 3, 4],
+ 's2': [5, 5, s2.value_counts().index[0], 1,
+ start.tz_localize(tz),
+ end.tz_localize(tz), np.nan, np.nan,
+ np.nan, np.nan, np.nan, np.nan, np.nan]},
+ index=['count', 'unique', 'top', 'freq', 'first',
+ 'last', 'mean', 'std', 'min', '25%', '50%',
+ '75%', 'max']
+ )
+ res = df.describe(include='all')
+ tm.assert_frame_equal(res, expected)
+
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index fcfaff9b11002..b574b6dce930c 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -336,6 +336,23 @@ def test_describe(self):
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
+ def test_describe_with_tz(self, tz_naive_fixture):
+ # GH 21332
+ tz = tz_naive_fixture
+ name = tz_naive_fixture
+ start = Timestamp(2018, 1, 1)
+ end = Timestamp(2018, 1, 5)
+ s = Series(date_range(start, end, tz=tz), name=name)
+ result = s.describe()
+ expected = Series(
+ [5, 5, s.value_counts().index[0], 1, start.tz_localize(tz),
+ end.tz_localize(tz)
+ ],
+ name=name,
+ index=['count', 'unique', 'top', 'freq', 'first', 'last']
+ )
+ tm.assert_series_equal(result, expected)
+
def test_argsort(self):
self._check_accum_op('argsort', check_dtype=False)
argsorted = self.ts.argsort()
| GH issue 21328
- [x] closes #21328
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21332 | 2018-06-05T21:43:25Z | 2018-07-06T22:57:52Z | 2018-07-06T22:57:52Z | 2018-07-07T01:42:11Z |
DOC: fix mistake in Series.str.cat | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 5d50c45fe7eca..44811781837bc 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -2172,9 +2172,9 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
Returns
-------
- concat : str if `other is None`, Series/Index of objects if `others is
- not None`. In the latter case, the result will remain categorical
- if the calling Series/Index is categorical.
+ concat : str or Series/Index of objects
+ If `others` is None, `str` is returned, otherwise a `Series/Index`
+ (same type as caller) of objects is returned.
See Also
--------
| Fix error in API-docstring that was introduced at the end of #20347 due to timepressure for the v.0.23-cutoff: removed functionality that categorical callers get categorical output, but forgot to adapt doc-string.
Unfortunately, this survived both #20347 and the follow-up, but since v.0.23.1 is coming soon, I didn't wanna let this opportunity pass. | https://api.github.com/repos/pandas-dev/pandas/pulls/21330 | 2018-06-05T20:08:21Z | 2018-06-06T15:08:23Z | 2018-06-06T15:08:23Z | 2018-06-12T16:30:38Z |
Fixed Issue Preventing Agg on RollingGroupBy Objects | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 3b61fde77cb9f..a0eeb4c6288c8 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -783,6 +783,7 @@ Groupby/Resample/Rolling
- Bug in :meth:`Series.resample` when passing ``numpy.timedelta64`` to ``loffset`` kwarg (:issue:`7687`).
- Bug in :meth:`Resampler.asfreq` when frequency of ``TimedeltaIndex`` is a subperiod of a new frequency (:issue:`13022`).
- Bug in :meth:`SeriesGroupBy.mean` when values were integral but could not fit inside of int64, overflowing instead. (:issue:`22487`)
+- :func:`RollingGroupby.agg` and :func:`ExpandingGroupby.agg` now support multiple aggregation functions as parameters (:issue:`15072`)
Sparse
^^^^^^
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 26fea89b45ae1..7f14a68503973 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -245,8 +245,8 @@ def _obj_with_exclusions(self):
def __getitem__(self, key):
if self._selection is not None:
- raise Exception('Column(s) {selection} already selected'
- .format(selection=self._selection))
+ raise IndexError('Column(s) {selection} already selected'
+ .format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 96c74f7fd4d75..ac84971de08d8 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -44,8 +44,15 @@ def _gotitem(self, key, ndim, subset=None):
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
+
+ # Try to select from a DataFrame, falling back to a Series
+ try:
+ groupby = self._groupby[key]
+ except IndexError:
+ groupby = self._groupby
+
self = self.__class__(subset,
- groupby=self._groupby[key],
+ groupby=groupby,
parent=self,
**kwargs)
self._reset_cache()
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 483f814bc8383..3cdd0965ccfd0 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -623,8 +623,14 @@ def test_as_index_series_return_frame(df):
assert isinstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
- # corner case
- pytest.raises(Exception, grouped['C'].__getitem__, 'D')
+
+def test_as_index_series_column_slice_raises(df):
+ # GH15072
+ grouped = df.groupby('A', as_index=False)
+ msg = r"Column\(s\) C already selected"
+
+ with tm.assert_raises_regex(IndexError, msg):
+ grouped['C'].__getitem__('D')
def test_groupby_as_index_cython(df):
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 052bfd2b858fb..cc663fc59cbf1 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -1,3 +1,4 @@
+from collections import OrderedDict
from itertools import product
import pytest
import warnings
@@ -314,6 +315,53 @@ def test_preserve_metadata(self):
assert s2.name == 'foo'
assert s3.name == 'foo'
+ @pytest.mark.parametrize("func,window_size,expected_vals", [
+ ('rolling', 2, [[np.nan, np.nan, np.nan, np.nan],
+ [15., 20., 25., 20.],
+ [25., 30., 35., 30.],
+ [np.nan, np.nan, np.nan, np.nan],
+ [20., 30., 35., 30.],
+ [35., 40., 60., 40.],
+ [60., 80., 85., 80]]),
+ ('expanding', None, [[10., 10., 20., 20.],
+ [15., 20., 25., 20.],
+ [20., 30., 30., 20.],
+ [10., 10., 30., 30.],
+ [20., 30., 35., 30.],
+ [26.666667, 40., 50., 30.],
+ [40., 80., 60., 30.]])])
+ def test_multiple_agg_funcs(self, func, window_size, expected_vals):
+ # GH 15072
+ df = pd.DataFrame([
+ ['A', 10, 20],
+ ['A', 20, 30],
+ ['A', 30, 40],
+ ['B', 10, 30],
+ ['B', 30, 40],
+ ['B', 40, 80],
+ ['B', 80, 90]], columns=['stock', 'low', 'high'])
+
+ f = getattr(df.groupby('stock'), func)
+ if window_size:
+ window = f(window_size)
+ else:
+ window = f()
+
+ index = pd.MultiIndex.from_tuples([
+ ('A', 0), ('A', 1), ('A', 2),
+ ('B', 3), ('B', 4), ('B', 5), ('B', 6)], names=['stock', None])
+ columns = pd.MultiIndex.from_tuples([
+ ('low', 'mean'), ('low', 'max'), ('high', 'mean'),
+ ('high', 'min')])
+ expected = pd.DataFrame(expected_vals, index=index, columns=columns)
+
+ result = window.agg(OrderedDict((
+ ('low', ['mean', 'max']),
+ ('high', ['mean', 'min']),
+ )))
+
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestWindow(Base):
| - [X] closes #15072
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
AFAICT the fact that RollingGroupBy could not use `agg` with a list of functions is simply due to the fact that the GroupByMixing it inherits from could not handle the reduction in dimensions that occurs via the normal aggregation functions. | https://api.github.com/repos/pandas-dev/pandas/pulls/21323 | 2018-06-05T04:47:09Z | 2018-09-26T10:35:55Z | 2018-09-26T10:35:54Z | 2020-01-16T00:34:29Z |
BLD: include dll in package_data on Windows | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 1a8b1603daaaa..18004be2b6b5f 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -132,3 +132,4 @@ Bug Fixes
**Other**
- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`)
+- Bug preventing pandas being used on Windows without C++ redistributable installed (:issue:`21106`)
diff --git a/setup.py b/setup.py
index 6febe674fb2a1..90ec8e91a0700 100755
--- a/setup.py
+++ b/setup.py
@@ -453,10 +453,10 @@ def pxd(name):
return pjoin('pandas', name + '.pxd')
-# args to ignore warnings
if is_platform_windows():
extra_compile_args = []
else:
+ # args to ignore warnings
extra_compile_args = ['-Wno-unused-function']
lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
@@ -733,7 +733,7 @@ def pxd(name):
maintainer=AUTHOR,
version=versioneer.get_version(),
packages=find_packages(include=['pandas', 'pandas.*']),
- package_data={'': ['data/*', 'templates/*'],
+ package_data={'': ['data/*', 'templates/*', '_libs/*.dll'],
'pandas.tests.io': ['data/legacy_hdf/*.h5',
'data/legacy_pickle/*/*.pickle',
'data/legacy_msgpack/*/*.msgpack',
| - [x] closes #21106
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Not sure how to test this, but I believe should remove the runtime dependency by static linking against the MSVC++ runtime. @cgohlke any thoughts? | https://api.github.com/repos/pandas-dev/pandas/pulls/21321 | 2018-06-05T01:03:57Z | 2018-06-08T23:32:21Z | 2018-06-08T23:32:21Z | 2018-06-12T16:30:38Z |
BUG: Fix empty Data frames to JSON round-trippable back to data frames | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 07120e26b4ecd..6118732768cb0 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -122,6 +122,7 @@ Bug Fixes
- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
- Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`)
+- Bug in IO JSON :func:`read_json` reading empty JSON schema with ``orient='table'`` back to :class:`DataFrame` caused an error (:issue:`21287`)
**Reshaping**
diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py
index 6f663f8ff8433..2dc176648fb31 100644
--- a/pandas/io/json/table_schema.py
+++ b/pandas/io/json/table_schema.py
@@ -296,7 +296,7 @@ def parse_table_schema(json, precise_float):
"""
table = loads(json, precise_float=precise_float)
col_order = [field['name'] for field in table['schema']['fields']]
- df = DataFrame(table['data'])[col_order]
+ df = DataFrame(table['data'], columns=col_order)[col_order]
dtypes = {field['name']: convert_json_field_to_pandas_type(field)
for field in table['schema']['fields']}
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 49b39c17238ae..b6483d0e978ba 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -560,3 +560,16 @@ def test_multiindex(self, index_names):
out = df.to_json(orient="table")
result = pd.read_json(out, orient="table")
tm.assert_frame_equal(df, result)
+
+ @pytest.mark.parametrize("strict_check", [
+ pytest.param(True, marks=pytest.mark.xfail), False])
+ def test_empty_frame_roundtrip(self, strict_check):
+ # GH 21287
+ df = pd.DataFrame([], columns=['a', 'b', 'c'])
+ expected = df.copy()
+ out = df.to_json(orient='table')
+ result = pd.read_json(out, orient='table')
+ # TODO: When DF coercion issue (#21345) is resolved tighten type checks
+ tm.assert_frame_equal(expected, result,
+ check_dtype=strict_check,
+ check_index_type=strict_check)
| [x] closes #21287
[x] tests added / passed
[x]passes git diff upstream/master -u -- "*.py" | flake8 --diff
[x]whatsnew entry
Fixes the bug occurring when empty DF, previously saved to JSON-file, is read from JSON back to DF. | https://api.github.com/repos/pandas-dev/pandas/pulls/21318 | 2018-06-04T19:23:28Z | 2018-06-08T23:40:04Z | 2018-06-08T23:40:03Z | 2018-06-12T16:30:37Z |
DOC: whatsnew note for MultiIndex Sorting Fix | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 69b07d12c1e98..4f7f720c9004a 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -87,6 +87,7 @@ Indexing
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`)
+- Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`)
-
I/O
| xref https://github.com/pandas-dev/pandas/pull/21043#issuecomment-394342149 | https://api.github.com/repos/pandas-dev/pandas/pulls/21316 | 2018-06-04T16:32:58Z | 2018-06-05T09:04:03Z | 2018-06-05T09:04:02Z | 2018-12-25T06:12:51Z |
Make Period - Period return DateOffset instead of int | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index abf574ae109fd..41be4cb0053ff 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -69,6 +69,49 @@ Current Behavior:
.. _whatsnew_0240.api.datetimelike:
+
+.. _whatsnew_0240.api.period_subtraction:
+
+Period Subtraction
+^^^^^^^^^^^^^^^^^^
+
+Subtraction of a ``Period`` from another ``Period`` will give a ``DateOffset``.
+instead of an integer (:issue:`21314`)
+
+.. ipython:: python
+
+ june = pd.Period('June 2018')
+ april = pd.Period('April 2018')
+ june - april
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [2]: june = pd.Period('June 2018')
+
+ In [3]: april = pd.Period('April 2018')
+
+ In [4]: june - april
+ Out [4]: 2
+
+Similarly, subtraction of a ``Period`` from a ``PeriodIndex`` will now return
+an ``Index`` of ``DateOffset`` objects instead of an ``Int64Index``
+
+.. ipython:: python
+
+ pi = pd.period_range('June 2018', freq='M', periods=3)
+ pi - pi[0]
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [2]: pi = pd.period_range('June 2018', freq='M', periods=3)
+
+ In [3]: pi - pi[0]
+ Out[3]: Int64Index([0, 1, 2], dtype='int64')
+
Datetimelike API Changes
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 49208056f88fe..6985d3b8df363 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1123,9 +1123,12 @@ cdef class _Period(object):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
- return self.ordinal - other.ordinal
+ return (self.ordinal - other.ordinal) * self.freq
elif getattr(other, '_typ', None) == 'periodindex':
- return -other.__sub__(self)
+ # GH#21314 PeriodIndex - Period returns an object-index
+ # of DateOffset objects, for which we cannot use __neg__
+ # directly, so we have to apply it pointwise
+ return other.__sub__(self).map(lambda x: -x)
else: # pragma: no cover
return NotImplemented
elif is_period_object(other):
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c7cb245263df8..a47dfe03445f5 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -899,7 +899,9 @@ def __add__(self, other):
raise TypeError("cannot add {dtype}-dtype to {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
-
+ elif is_categorical_dtype(other):
+ # Categorical op will raise; defer explicitly
+ return NotImplemented
else: # pragma: no cover
return NotImplemented
@@ -964,6 +966,9 @@ def __sub__(self, other):
raise TypeError("cannot subtract {dtype}-dtype from {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
+ elif is_categorical_dtype(other):
+ # Categorical op will raise; defer explicitly
+ return NotImplemented
else: # pragma: no cover
return NotImplemented
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index c163e3d53e634..d4d35d48743bd 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -551,13 +551,14 @@ def is_all_dates(self):
@property
def is_full(self):
"""
- Returns True if there are any missing periods from start to end
+ Returns True if this PeriodIndex is range-like in that all Periods
+ between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
- values = self.values
+ values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
@@ -761,17 +762,19 @@ def _sub_datelike(self, other):
return NotImplemented
def _sub_period(self, other):
+ # If the operation is well-defined, we return an object-Index
+ # of DateOffsets. Null entries are filled with pd.NaT
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
+ new_data = np.array([self.freq * x for x in new_data])
if self.hasnans:
- new_data = new_data.astype(np.float64)
- new_data[self._isnan] = np.nan
- # result must be Int64Index or Float64Index
+ new_data[self._isnan] = tslib.NaT
+
return Index(new_data)
def shift(self, n):
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 65afe85628f8e..fb381a5640519 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -258,9 +258,10 @@ def test_ops_frame_period(self):
assert df['B'].dtype == object
p = pd.Period('2015-03', freq='M')
+ off = p.freq
# dtype will be object because of original dtype
- exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
- 'B': np.array([14, 13], dtype=object)})
+ exp = pd.DataFrame({'A': np.array([2 * off, 1 * off], dtype=object),
+ 'B': np.array([14 * off, 13 * off], dtype=object)})
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
@@ -271,7 +272,7 @@ def test_ops_frame_period(self):
assert df2['A'].dtype == object
assert df2['B'].dtype == object
- exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
- 'B': np.array([16, 16], dtype=object)})
+ exp = pd.DataFrame({'A': np.array([4 * off, 4 * off], dtype=object),
+ 'B': np.array([16 * off, 16 * off], dtype=object)})
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index aea019d910fe0..3a6ca14400dff 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -730,11 +730,12 @@ def test_pi_ops(self):
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
- exp = pd.Index([0, 1, 2, 3], name='idx')
+ off = idx.freq
+ exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
- exp = pd.Index([0, -1, -2, -3], name='idx')
+ exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name='idx')
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('ng', ["str", 1.5])
@@ -864,14 +865,15 @@ def test_pi_sub_period(self):
freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
- exp = pd.Index([-12, -11, -10, -9], name='idx')
+ off = idx.freq
+ exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
- exp = pd.Index([12, 11, 10, 9], name='idx')
+ exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
@@ -898,11 +900,12 @@ def test_pi_sub_period_nat(self):
freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
- exp = pd.Index([-12, np.nan, -10, -9], name='idx')
+ off = idx.freq
+ exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
- exp = pd.Index([12, np.nan, 10, 9], name='idx')
+ exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index f43ab0704f0f4..ffc375ba12e34 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -572,7 +572,7 @@ def test_strftime(self):
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
- assert result == 4
+ assert result == 4 * right.freq
with pytest.raises(period.IncompatibleFrequency):
left - Period('2007-01', freq='M')
@@ -1064,8 +1064,9 @@ def test_sub(self):
dt1 = Period('2011-01-01', freq='D')
dt2 = Period('2011-01-15', freq='D')
- assert dt1 - dt2 == -14
- assert dt2 - dt1 == 14
+ off = dt1.freq
+ assert dt1 - dt2 == -14 * off
+ assert dt2 - dt1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 95836f046195a..f4bdb7ba86aaf 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -517,8 +517,9 @@ def test_ops_series_period(self):
assert ser.dtype == object
per = pd.Period('2015-01-10', freq='D')
+ off = per.freq
# dtype will be object because of original dtype
- expected = pd.Series([9, 8], name='xxx', dtype=object)
+ expected = pd.Series([9 * off, 8 * off], name='xxx', dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
@@ -526,7 +527,7 @@ def test_ops_series_period(self):
pd.Period('2015-01-04', freq='D')], name='xxx')
assert s2.dtype == object
- expected = pd.Series([4, 2], name='xxx', dtype=object)
+ expected = pd.Series([4 * off, 2 * off], name='xxx', dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
| Discussed briefly in #20049.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21314 | 2018-06-04T14:42:47Z | 2018-06-29T00:41:00Z | 2018-06-29T00:41:00Z | 2020-04-05T17:41:50Z |
BUG: fix DataFrame.__getitem__ and .loc with non-list listlikes | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 3a8c81b5e9281..ccd14b7abe611 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -310,6 +310,8 @@ Indexing
- When ``.ix`` is asked for a missing integer label in a :class:`MultiIndex` with a first level of integer type, it now raises a ``KeyError``, consistently with the case of a flat :class:`Int64Index, rather than falling back to positional indexing (:issue:`21593`)
- Bug in :meth:`DatetimeIndex.reindex` when reindexing a tz-naive and tz-aware :class:`DatetimeIndex` (:issue:`8306`)
- Bug in :class:`DataFrame` when setting values with ``.loc`` and a timezone aware :class:`DatetimeIndex` (:issue:`11365`)
+- ``DataFrame.__getitem__`` now accepts dictionaries and dictionary keys as list-likes of labels, consistently with ``Series.__getitem__`` (:issue:`21294`)
+- Fixed ``DataFrame[np.nan]`` when columns are non-unique (:issue:`21428`)
- Bug when indexing :class:`DatetimeIndex` with nanosecond resolution dates and timezones (:issue:`11679`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a420266561c5a..7659d01c045d5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2670,68 +2670,80 @@ def _ixs(self, i, axis=0):
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
- # shortcut if we are an actual column
- is_mi_columns = isinstance(self.columns, MultiIndex)
+ # shortcut if the key is in columns
try:
- if key in self.columns and not is_mi_columns:
- return self._getitem_column(key)
- except:
+ if self.columns.is_unique and key in self.columns:
+ if self.columns.nlevels > 1:
+ return self._getitem_multilevel(key)
+ return self._get_item_cache(key)
+ except (TypeError, ValueError):
+ # The TypeError correctly catches non hashable "key" (e.g. list)
+ # The ValueError can be removed once GH #21729 is fixed
pass
- # see if we can slice the rows
+ # Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
- return self._getitem_slice(indexer)
+ return self._slice(indexer, axis=0)
- if isinstance(key, (Series, np.ndarray, Index, list)):
- # either boolean or fancy integer index
- return self._getitem_array(key)
- elif isinstance(key, DataFrame):
+ # Do we have a (boolean) DataFrame?
+ if isinstance(key, DataFrame):
return self._getitem_frame(key)
- elif is_mi_columns:
- return self._getitem_multilevel(key)
+
+ # Do we have a (boolean) 1d indexer?
+ if com.is_bool_indexer(key):
+ return self._getitem_bool_array(key)
+
+ # We are left with two options: a single key, and a collection of keys,
+ # We interpret tuples as collections only for non-MultiIndex
+ is_single_key = isinstance(key, tuple) or not is_list_like(key)
+
+ if is_single_key:
+ if self.columns.nlevels > 1:
+ return self._getitem_multilevel(key)
+ indexer = self.columns.get_loc(key)
+ if is_integer(indexer):
+ indexer = [indexer]
else:
- return self._getitem_column(key)
+ if is_iterator(key):
+ key = list(key)
+ indexer = self.loc._convert_to_indexer(key, axis=1,
+ raise_missing=True)
- def _getitem_column(self, key):
- """ return the actual column """
+ # take() does not accept boolean indexers
+ if getattr(indexer, "dtype", None) == bool:
+ indexer = np.where(indexer)[0]
- # get column
- if self.columns.is_unique:
- return self._get_item_cache(key)
+ data = self._take(indexer, axis=1)
- # duplicate columns & possible reduce dimensionality
- result = self._constructor(self._data.get(key))
- if result.columns.is_unique:
- result = result[key]
+ if is_single_key:
+ # What does looking for a single key in a non-unique index return?
+ # The behavior is inconsistent. It returns a Series, except when
+ # - the key itself is repeated (test on data.shape, #9519), or
+ # - we have a MultiIndex on columns (test on self.columns, #21309)
+ if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
+ data = data[key]
- return result
-
- def _getitem_slice(self, key):
- return self._slice(key, axis=0)
+ return data
- def _getitem_array(self, key):
+ def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
- if com.is_bool_indexer(key):
- # warning here just in case -- previously __setitem__ was
- # reindexing but __getitem__ was not; it seems more reasonable to
- # go with the __setitem__ behavior since that is more consistent
- # with all other indexing behavior
- if isinstance(key, Series) and not key.index.equals(self.index):
- warnings.warn("Boolean Series key will be reindexed to match "
- "DataFrame index.", UserWarning, stacklevel=3)
- elif len(key) != len(self.index):
- raise ValueError('Item wrong length %d instead of %d.' %
- (len(key), len(self.index)))
- # check_bool_indexer will throw exception if Series key cannot
- # be reindexed to match DataFrame rows
- key = check_bool_indexer(self.index, key)
- indexer = key.nonzero()[0]
- return self._take(indexer, axis=0)
- else:
- indexer = self.loc._convert_to_indexer(key, axis=1,
- raise_missing=True)
- return self._take(indexer, axis=1)
+ # warning here just in case -- previously __setitem__ was
+ # reindexing but __getitem__ was not; it seems more reasonable to
+ # go with the __setitem__ behavior since that is more consistent
+ # with all other indexing behavior
+ if isinstance(key, Series) and not key.index.equals(self.index):
+ warnings.warn("Boolean Series key will be reindexed to match "
+ "DataFrame index.", UserWarning, stacklevel=3)
+ elif len(key) != len(self.index):
+ raise ValueError('Item wrong length %d instead of %d.' %
+ (len(key), len(self.index)))
+
+ # check_bool_indexer will throw exception if Series key cannot
+ # be reindexed to match DataFrame rows
+ key = check_bool_indexer(self.index, key)
+ indexer = key.nonzero()[0]
+ return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 28299fbe61daf..1feddf004058a 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -148,9 +148,10 @@ def _init_dict(self, data, index, columns, dtype=None):
if index is None:
index = extract_index(list(data.values()))
- sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
- fill_value=self._default_fill_value,
- copy=True, dtype=dtype)
+ def sp_maker(x):
+ return SparseArray(x, kind=self._default_kind,
+ fill_value=self._default_fill_value,
+ copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
@@ -397,9 +398,10 @@ def _sanitize_column(self, key, value, **kwargs):
sanitized_column : SparseArray
"""
- sp_maker = lambda x, index=None: SparseArray(
- x, index=index, fill_value=self._default_fill_value,
- kind=self._default_kind)
+ def sp_maker(x, index=None):
+ return SparseArray(x, index=index,
+ fill_value=self._default_fill_value,
+ kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
@@ -428,18 +430,6 @@ def _sanitize_column(self, key, value, **kwargs):
# always return a SparseArray!
return clean
- def __getitem__(self, key):
- """
- Retrieve column or slice from DataFrame
- """
- if isinstance(key, slice):
- date_rng = self.index[key]
- return self.reindex(date_rng)
- elif isinstance(key, (np.ndarray, list, Series)):
- return self._getitem_array(key)
- else:
- return self._get_item_cache(key)
-
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e7fb765128738..bef38288ff3a5 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -501,9 +501,11 @@ def test_constructor_dict_of_tuples(self):
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
- check = lambda result, expected: tm.assert_frame_equal(
- result, expected, check_dtype=True, check_index_type=True,
- check_column_type=True, check_names=True)
+ def check(result, expected):
+ return tm.assert_frame_equal(result, expected, check_dtype=True,
+ check_index_type=True,
+ check_column_type=True,
+ check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
@@ -1655,19 +1657,21 @@ def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
- # allow single nans to succeed
indexer = np.arange(len(df.columns))[isna(df.columns)]
- if len(indexer) == 1:
- tm.assert_series_equal(df.iloc[:, indexer[0]],
- df.loc[:, np.nan])
-
- # multiple nans should fail
- else:
-
+ # No NaN found -> error
+ if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
+ # single nan should result in Series
+ elif len(indexer) == 1:
+ tm.assert_series_equal(df.iloc[:, indexer[0]],
+ df.loc[:, np.nan])
+ # multiple nans should result in DataFrame
+ else:
+ tm.assert_frame_equal(df.iloc[:, indexer],
+ df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
@@ -1683,6 +1687,11 @@ def f():
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
+ # GH 21428 (non-unique columns)
+ df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
+ columns=[np.nan, 1, 2, 2])
+ check(df)
+
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 90f3bede482c6..9ca2b7e3c8a6a 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -92,45 +92,46 @@ def test_get(self):
result = df.get(None)
assert result is None
- def test_getitem_iterator(self):
+ def test_loc_iterable(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
- idx = iter(['A', 'B', 'C'])
- result = self.frame.loc[:, idx]
- expected = self.frame.loc[:, ['A', 'B', 'C']]
- assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "idx_type",
+ [list, iter, Index, set,
+ lambda l: dict(zip(l, range(len(l)))),
+ lambda l: dict(zip(l, range(len(l)))).keys()],
+ ids=["list", "iter", "Index", "set", "dict", "dict_keys"])
+ @pytest.mark.parametrize("levels", [1, 2])
+ def test_getitem_listlike(self, idx_type, levels):
+ # GH 21294
+
+ if levels == 1:
+ frame, missing = self.frame, 'food'
+ else:
+ # MultiIndex columns
+ frame = DataFrame(randn(8, 3),
+ columns=Index([('foo', 'bar'), ('baz', 'qux'),
+ ('peek', 'aboo')],
+ name=('sth', 'sth2')))
+ missing = ('good', 'food')
- def test_getitem_list(self):
- self.frame.columns.name = 'foo'
+ keys = [frame.columns[1], frame.columns[0]]
+ idx = idx_type(keys)
+ idx_check = list(idx_type(keys))
- result = self.frame[['B', 'A']]
- result2 = self.frame[Index(['B', 'A'])]
+ result = frame[idx]
- expected = self.frame.loc[:, ['B', 'A']]
- expected.columns.name = 'foo'
+ expected = frame.loc[:, idx_check]
+ expected.columns.names = frame.columns.names
assert_frame_equal(result, expected)
- assert_frame_equal(result2, expected)
- assert result.columns.name == 'foo'
-
- with tm.assert_raises_regex(KeyError, 'not in index'):
- self.frame[['B', 'A', 'food']]
+ idx = idx_type(keys + [missing])
with tm.assert_raises_regex(KeyError, 'not in index'):
- self.frame[Index(['B', 'A', 'foo'])]
-
- # tuples
- df = DataFrame(randn(8, 3),
- columns=Index([('foo', 'bar'), ('baz', 'qux'),
- ('peek', 'aboo')], name=('sth', 'sth2')))
-
- result = df[[('foo', 'bar'), ('baz', 'qux')]]
- expected = df.iloc[:, :2]
- assert_frame_equal(result, expected)
- assert result.columns.names == ('sth', 'sth2')
+ frame[idx]
def test_getitem_callable(self):
# GH 12533
@@ -223,7 +224,8 @@ def test_setitem_callable(self):
def test_setitem_other_callable(self):
# GH 13299
- inc = lambda x: x + 1
+ def inc(x):
+ return x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
@@ -2082,7 +2084,8 @@ def test_reindex_level(self):
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
- f = lambda val: np.nonzero(df[level] == val)[0]
+ def f(val):
+ return np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
| - [x] closes #21294
- [x] closes #21428
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xref #21309 , but worth fixing separately | https://api.github.com/repos/pandas-dev/pandas/pulls/21313 | 2018-06-04T13:49:12Z | 2018-07-07T14:24:57Z | 2018-07-07T14:24:56Z | 2018-07-08T08:24:12Z |
REGR: allow merging on object boolean columns | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 1a8b1603daaaa..07120e26b4ecd 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -65,15 +65,14 @@ In addition, ordering comparisons will raise a ``TypeError`` in the future.
a tz-aware time instead of tz-naive (:issue:`21267`) and :attr:`DatetimeIndex.date`
returned incorrect date when the input date has a non-UTC timezone (:issue:`21230`).
- Fixed regression in :meth:`pandas.io.json.json_normalize` when called with ``None`` values
- in nested levels in JSON (:issue:`21158`).
+ in nested levels in JSON, and to not drop keys with value as `None` (:issue:`21158`, :issue:`21356`).
- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
- Fixed regression in constructors coercing NA values like ``None`` to strings when passing ``dtype=str`` (:issue:`21083`)
- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing
values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`)
-- Fixed Regression in :func:`nested_to_record` which now flattens list of dictionaries and doesnot drop keys with value as `None` (:issue:`21356`)
-
+- Fixed regression in merging on boolean index/columns (:issue:`21119`).
.. _whatsnew_0231.performance:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 73aba4d4e044b..e38c069b3c3fb 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -28,6 +28,7 @@
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
+ is_bool_dtype,
is_list_like,
is_datetimelike,
_ensure_int64,
@@ -974,9 +975,14 @@ def _maybe_coerce_merge_keys(self):
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
- elif is_numeric_dtype(lk) and not is_numeric_dtype(rk):
+
+ # boolean values are considered as numeric, but are still allowed
+ # to be merged on object boolean values
+ elif ((is_numeric_dtype(lk) and not is_bool_dtype(lk))
+ and not is_numeric_dtype(rk)):
raise ValueError(msg)
- elif not is_numeric_dtype(lk) and is_numeric_dtype(rk):
+ elif (not is_numeric_dtype(lk)
+ and (is_numeric_dtype(rk) and not is_bool_dtype(rk))):
raise ValueError(msg)
elif is_datetimelike(lk) and not is_datetimelike(rk):
raise ValueError(msg)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 8e639edd34b18..037bd9cc7cd18 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1526,6 +1526,27 @@ def test_merge_on_ints_floats_warning(self):
result = B.merge(A, left_on='Y', right_on='X')
assert_frame_equal(result, expected[['Y', 'X']])
+ def test_merge_incompat_infer_boolean_object(self):
+ # GH21119: bool + object bool merge OK
+ df1 = DataFrame({'key': Series([True, False], dtype=object)})
+ df2 = DataFrame({'key': [True, False]})
+
+ expected = DataFrame({'key': [True, False]}, dtype=object)
+ result = pd.merge(df1, df2, on='key')
+ assert_frame_equal(result, expected)
+ result = pd.merge(df2, df1, on='key')
+ assert_frame_equal(result, expected)
+
+ # with missing value
+ df1 = DataFrame({'key': Series([True, False, np.nan], dtype=object)})
+ df2 = DataFrame({'key': [True, False]})
+
+ expected = DataFrame({'key': [True, False]}, dtype=object)
+ result = pd.merge(df1, df2, on='key')
+ assert_frame_equal(result, expected)
+ result = pd.merge(df2, df1, on='key')
+ assert_frame_equal(result, expected)
+
@pytest.mark.parametrize('df1_vals, df2_vals', [
([0, 1, 2], ["0", "1", "2"]),
([0.0, 1.0, 2.0], ["0", "1", "2"]),
@@ -1538,6 +1559,8 @@ def test_merge_on_ints_floats_warning(self):
pd.date_range('20130101', periods=3, tz='US/Eastern')),
([0, 1, 2], Series(['a', 'b', 'a']).astype('category')),
([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')),
+ # TODO ([0, 1], pd.Series([False, True], dtype=bool)),
+ ([0, 1], pd.Series([False, True], dtype=object))
])
def test_merge_incompat_dtypes(self, df1_vals, df2_vals):
# GH 9780, GH 15800
| Fixes https://github.com/pandas-dev/pandas/issues/21119
This is a quick/hacky fix for the specific case in that issue, i.e. merging boolean / object boolean columns.
However, I have the feeling we should do a more general solution for cases where object dtyped columns might be actually mergeable. The problem is that inferring the type can be costly? (and eg when having actual strings object columns, this inferring was also unnecessary and thus will unneeded slow down `merge`?) | https://api.github.com/repos/pandas-dev/pandas/pulls/21310 | 2018-06-04T08:41:20Z | 2018-06-08T17:44:18Z | 2018-06-08T17:44:18Z | 2018-06-12T16:30:37Z |
TST: fix shebang typo | diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 9f1ac8b1e677b..eb40e5521f7f1 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -1,4 +1,4 @@
-#!/usr/env/bin python
+#!/usr/bin/env python
"""
self-contained to write legacy storage (pickle/msgpack) files
| Fix shebang typo in generate_legacy_storage_files.py
- [x] closes #21306 | https://api.github.com/repos/pandas-dev/pandas/pulls/21307 | 2018-06-04T05:06:39Z | 2018-06-04T11:21:23Z | 2018-06-04T11:21:22Z | 2018-06-05T06:24:05Z |
Added parameter to the autocorr plot and code for partial autocorr plot | diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 150c9274d4e5c..da6b64e37f6ed 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -602,13 +602,15 @@ def lag_plot(series, lag=1, ax=None, **kwds):
return ax
-def autocorrelation_plot(series, ax=None, **kwds):
+def autocorrelation_plot(series, ax=None, n_lags=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
+ n_lags : int, optional
+ Number of lags to be plotted. Length of the series by default.
kwds : keywords
Options to pass to matplotlib plotting method
@@ -617,17 +619,23 @@ def autocorrelation_plot(series, ax=None, **kwds):
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
+ if n_lags:
+ if type(n_lags) is not int or type(n_lags) is not np.int:
+ raise TypeError('Passed non integer number of lags.')
+ if n_lags > len(x):
+ raise ValueError('Number of lags cannot be larger '
+ 'that the length of the series.')
n = len(series)
data = np.asarray(series)
if ax is None:
- ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
+ ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
- x = np.arange(n) + 1
+ x = (np.arange(n) + 1).astype(int)
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
@@ -638,8 +646,57 @@ def r(h):
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
- ax.plot(x, y, **kwds)
+ if n_lags:
+ ax.plot(x[:n_lags], y[:n_lags], **kwds)
+ else:
+ ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
+
+
+def partial_autocorrelation_plot(series, ax=None, n_lags=40,
+ method='ywunbiased', **kwds):
+ """Partial autocorrelation plot for time series.
+
+ Parameters:
+ -----------
+ series: Time series
+ ax: Matplotlib axis object, optional
+ n_lags : int, optional
+ The largest lag for which pacf is returned, the default is 40
+ method : {'ywunbiased', 'ywmle', 'ols'}
+ specifies which method for the calculations to use:
+
+ - yw or ywunbiased : yule walker with bias correction in denominator
+ for acovf. Default.
+ - ywm or ywmle : yule walker without bias correction
+ - ols - regression of time series on lags of it and on constant
+ - ld or ldunbiased : Levinson-Durbin recursion with bias correction
+ - ldb or ldbiased : Levinson-Durbin recursion without bias correction
+ kwds : keywords
+ Options to pass to matplotlib plotting method
+
+ Returns:
+ -----------
+ ax: Matplotlib axis object
+ """
+ from statsmodels.tsa.stattools import pacf
+ if type(n_lags) is not int or type(n_lags) is not np.int:
+ raise TypeError('Passed non integer number of lags.')
+ n = len(series)
+ if ax is None:
+ ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
+ ret = pacf(series, n_lags)
+ ax.plot(ret, **kwds)
+ z95 = 1.959963984540054
+ z99 = 2.5758293035489004
+ ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
+ ax.axhline(y=z95 / np.sqrt(n), color='grey')
+ ax.axhline(y=0.0, color='black')
+ ax.axhline(y=-z95 / np.sqrt(n), color='grey')
+ ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
+ ax.set_xlabel("Lag")
+ ax.set_ylabel("Partial autocorrelation")
+ return ax
| Added additional parameters to the autocorrelation_plot to be able to plot a specified number of lags and additional code for plotting the partial autocorrelation plot together with p and q value.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] Additional argument to the autocorrelation_plot allowing to plot the desired number of lags, thus increase clarity and readibility of the plot. Additional function allowing to plot partial autocorrelation code depending on the statsmodels library.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21302 | 2018-06-03T12:06:59Z | 2018-11-01T01:39:57Z | null | 2018-11-01T01:39:57Z |
Added argument to autocorrelation_plot | diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 150c9274d4e5c..20ae041d1d9f5 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -602,7 +602,7 @@ def lag_plot(series, lag=1, ax=None, **kwds):
return ax
-def autocorrelation_plot(series, ax=None, **kwds):
+def autocorrelation_plot(series, ax=None, n_samples=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
@@ -638,7 +638,10 @@ def r(h):
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
- ax.plot(x, y, **kwds)
+ if n_samples:
+ ax.plot(x[:n_samples], y[:n_samples], **kwds)
+ else:
+ ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
| Added additional parameter to autocorrelation_plot to plot the specified number of lags in case of long time series.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21301 | 2018-06-03T10:30:30Z | 2018-06-03T12:09:09Z | null | 2023-05-11T01:17:55Z |
BUG: encoding error in to_csv compression | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index f2bc81eea186b..db4f4acc7ee16 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -92,6 +92,7 @@ I/O
- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
+- Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
-
Plotting
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 0be2a180fbfa2..7f660e2644fa4 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -9,6 +9,7 @@
import numpy as np
from pandas.core.dtypes.missing import notna
+from pandas.core.dtypes.inference import is_file_like
from pandas.core.index import Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, range, zip)
@@ -127,14 +128,19 @@ def save(self):
else:
encoding = self.encoding
- if hasattr(self.path_or_buf, 'write'):
- f = self.path_or_buf
- close = False
+ # PR 21300 uses string buffer to receive csv writing and dump into
+ # file-like output with compression as option. GH 21241, 21118
+ f = StringIO()
+ if not is_file_like(self.path_or_buf):
+ # path_or_buf is path
+ path_or_buf = self.path_or_buf
+ elif hasattr(self.path_or_buf, 'name'):
+ # path_or_buf is file handle
+ path_or_buf = self.path_or_buf.name
else:
- f, handles = _get_handle(self.path_or_buf, self.mode,
- encoding=encoding,
- compression=None)
- close = True if self.compression is None else False
+ # path_or_buf is file-like IO objects.
+ f = self.path_or_buf
+ path_or_buf = None
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
@@ -151,18 +157,16 @@ def save(self):
self._save()
finally:
- # GH 17778 handles compression for byte strings.
- if not close and self.compression:
- f.close()
- with open(f.name, 'r') as f:
- data = f.read()
- f, handles = _get_handle(f.name, self.mode,
+ # GH 17778 handles zip compression for byte strings separately.
+ buf = f.getvalue()
+ if path_or_buf:
+ f, handles = _get_handle(path_or_buf, self.mode,
encoding=encoding,
compression=self.compression)
- f.write(data)
- close = True
- if close:
+ f.write(buf)
f.close()
+ for _fh in handles:
+ _fh.close()
def _save_header(self):
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index e4829ebf48561..60dc336a85388 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -919,29 +919,45 @@ def test_to_csv_path_is_none(self):
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
- def test_to_csv_compression(self, compression):
-
- df = DataFrame([[0.123456, 0.234567, 0.567567],
- [12.32112, 123123.2, 321321.2]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ @pytest.mark.parametrize('df,encoding', [
+ (DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z']), None),
+ # GH 21241, 21118
+ (DataFrame([['abc', 'def', 'ghi']], columns=['X', 'Y', 'Z']), 'ascii'),
+ (DataFrame(5 * [[123, u"你好", u"世界"]],
+ columns=['X', 'Y', 'Z']), 'gb2312'),
+ (DataFrame(5 * [[123, u"Γειά σου", u"Κόσμε"]],
+ columns=['X', 'Y', 'Z']), 'cp737')
+ ])
+ def test_to_csv_compression(self, df, encoding, compression):
with ensure_clean() as filename:
- df.to_csv(filename, compression=compression)
+ df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
- rs = read_csv(filename, compression=compression,
- index_col=0)
- assert_frame_equal(df, rs)
+ result = read_csv(filename, compression=compression,
+ index_col=0, encoding=encoding)
+
+ with open(filename, 'w') as fh:
+ df.to_csv(fh, compression=compression, encoding=encoding)
+
+ result_fh = read_csv(filename, compression=compression,
+ index_col=0, encoding=encoding)
+ assert_frame_equal(df, result)
+ assert_frame_equal(df, result_fh)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
- text = fh.read().decode('utf8')
+ text = fh.read().decode(encoding or 'utf8')
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
- assert_frame_equal(df, read_csv(fh, index_col=0))
+ assert_frame_equal(df, read_csv(fh,
+ index_col=0,
+ encoding=encoding))
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index e369dfda6deac..f98962685ad9a 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -137,29 +137,45 @@ def test_to_csv_path_is_none(self):
csv_str = s.to_csv(path=None)
assert isinstance(csv_str, str)
- def test_to_csv_compression(self, compression):
-
- s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'],
- name='X')
+ @pytest.mark.parametrize('s,encoding', [
+ (Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'],
+ name='X'), None),
+ # GH 21241, 21118
+ (Series(['abc', 'def', 'ghi'], name='X'), 'ascii'),
+ (Series(["123", u"你好", u"世界"], name=u"中文"), 'gb2312'),
+ (Series(["123", u"Γειά σου", u"Κόσμε"], name=u"Ελληνικά"), 'cp737')
+ ])
+ def test_to_csv_compression(self, s, encoding, compression):
with ensure_clean() as filename:
- s.to_csv(filename, compression=compression, header=True)
+ s.to_csv(filename, compression=compression, encoding=encoding,
+ header=True)
# test the round trip - to_csv -> read_csv
- rs = pd.read_csv(filename, compression=compression,
- index_col=0, squeeze=True)
- assert_series_equal(s, rs)
+ result = pd.read_csv(filename, compression=compression,
+ encoding=encoding, index_col=0, squeeze=True)
+
+ with open(filename, 'w') as fh:
+ s.to_csv(fh, compression=compression, encoding=encoding,
+ header=True)
+
+ result_fh = pd.read_csv(filename, compression=compression,
+ encoding=encoding, index_col=0,
+ squeeze=True)
+ assert_series_equal(s, result)
+ assert_series_equal(s, result_fh)
# explicitly ensure file was compressed
with tm.decompress_file(filename, compression) as fh:
- text = fh.read().decode('utf8')
+ text = fh.read().decode(encoding or 'utf8')
assert s.name in text
with tm.decompress_file(filename, compression) as fh:
assert_series_equal(s, pd.read_csv(fh,
index_col=0,
- squeeze=True))
+ squeeze=True,
+ encoding=encoding))
class TestSeriesIO(TestData):
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 88e469731060d..7034e9ac2e0c8 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -252,12 +252,13 @@ def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as filename:
with open(filename, 'w') as fh:
getattr(obj, method)(fh, compression=compression_only)
- # GH 17778
- assert fh.closed
+ assert not fh.closed
+ assert fh.closed
compressed = os.path.getsize(filename)
with tm.ensure_clean() as filename:
with open(filename, 'w') as fh:
getattr(obj, method)(fh, compression=None)
assert not fh.closed
+ assert fh.closed
uncompressed = os.path.getsize(filename)
assert uncompressed > compressed
| - [x] closes #21241
closes #21118
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Fix a problem where encoding wasn't handled properly in to_csv compression in Python 3. It was caused by dumping uncompressed csv on disk and reading it back to memory without passing specified encoding. Then platform tried to decode using default locale encoding which may or may not succeed.
This PR added tests for non-ascii data with csv compression. Also, by using string buffer, this PR removed repeated disk roundtrip and redundant encoding/decoding which caused UnicodeDecodeError. There is performance improvement compared to 0.22 and 0.23. It also supports file-like object as input path_or_buf.
Before this PR:
```
>>> df = DataFrame(100 * [[123, "abc", u"样本1", u"样本2"]], columns=['A', 'B', 'C', 'D'])
>>>
>>> def test_to_csv(df):
... df.to_csv(
... path_or_buf='test',
... encoding='utf8',
... compression='zip',
... quoting=1,
... sep='\t',
... index=False)
...
>>> timeit(lambda: test_to_csv(df), number=5000)
11.856349980007508
```
After this PR:
```
>>> df = DataFrame(100 * [[123, "abc", u"样本1", u"样本2"]], columns=['A', 'B', 'C', 'D'])
>>>
>>> def test_to_csv(df):
... df.to_csv(
... path_or_buf='test',
... encoding='utf8',
... compression='zip',
... quoting=1,
... sep='\t',
... index=False)
...
>>> timeit(lambda: test_to_csv(df), number=5000)
5.459916951993364
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/21300 | 2018-06-03T10:03:06Z | 2018-06-05T04:54:31Z | 2018-06-05T04:54:31Z | 2018-06-20T18:11:54Z |
Add Featuretools to Pandas Ecosystem Page | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 30cdb06b28487..6714398084186 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -38,7 +38,10 @@ Statsmodels leverages pandas objects as the underlying data container for comput
Use pandas DataFrames in your `scikit-learn <http://scikit-learn.org/>`__
ML pipeline.
+`Featuretools <https://github.com/featuretools/featuretools/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community.
.. _ecosystem.visualization:
| Adding a library for automated feature engineering that is built on top of Pandas that I think would be very relevant to users of pandas.
More info on our website (https://www.featuretools.com/) and GitHub (https://github.com/featuretools/featuretools/). | https://api.github.com/repos/pandas-dev/pandas/pulls/21297 | 2018-06-02T17:52:57Z | 2018-06-05T11:08:30Z | 2018-06-05T11:08:30Z | 2018-06-12T16:30:36Z |
DOC: Fixing 'a la' confusion in series.quantile documentation | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4cf29319f703e..d8bdd9ac9ed22 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1837,7 +1837,7 @@ def round(self, decimals=0, *args, **kwargs):
def quantile(self, q=0.5, interpolation='linear'):
"""
- Return value at the given quantile, a la numpy.percentile.
+ Return value at the given quantile.
Parameters
----------
@@ -1876,6 +1876,7 @@ def quantile(self, q=0.5, interpolation='linear'):
See Also
--------
pandas.core.window.Rolling.quantile
+ numpy.percentile
"""
self._check_percentile(q)
| Just stating that numpy.percentile also offers something similar to this
- [x] closes #21292
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] Just clarified quantile doc a bit
| https://api.github.com/repos/pandas-dev/pandas/pulls/21293 | 2018-06-02T05:23:51Z | 2018-06-06T07:31:12Z | 2018-06-06T07:31:11Z | 2018-06-06T07:31:20Z |
BUG: invalid rolling window on empty input | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index f2bc81eea186b..733633c38c281 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -52,6 +52,7 @@ Groupby/Resample/Rolling
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
+- Bug in :func:`pandas.DataFrame.rolling` and :func:`pandas.Series.rolling` which incorrectly accepted a 0 window size rather than raising (:issue:`21286`)
Strings
^^^^^^^
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 015e7f7913ed0..9d0f9dc4f75f9 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -602,8 +602,8 @@ def validate(self):
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
- if window < 0:
- raise ValueError("window must be non-negative")
+ if window <= 0:
+ raise ValueError("window must be > 0 ")
try:
import scipy.signal as sig
except ImportError:
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 74f2c977e0db2..cfd88f41f855e 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -389,8 +389,8 @@ def test_constructor(self, which):
c(window=2, min_periods=1, center=False)
# GH 13383
- c(0)
with pytest.raises(ValueError):
+ c(0)
c(-1)
# not valid
@@ -409,7 +409,6 @@ def test_constructor_with_win_type(self, which):
# GH 13383
o = getattr(self, which)
c = o.rolling
- c(0, win_type='boxcar')
with pytest.raises(ValueError):
c(-1, win_type='boxcar')
| Added a raise Value Error for the case when window==0, Kindly refer to issue #21286 for the same
- [x] closes #21286
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] A simple except cases added
| https://api.github.com/repos/pandas-dev/pandas/pulls/21291 | 2018-06-02T05:05:04Z | 2018-06-08T16:25:52Z | 2018-06-08T16:25:52Z | 2018-06-12T16:30:36Z |
PERF: improve performance of groupby rank (#21237) | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 7777322071957..0725bbeb6c36d 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
- TimeGrouper, Categorical)
+ TimeGrouper, Categorical, Timestamp)
import pandas.util.testing as tm
from .pandas_vb_common import setup # noqa
@@ -385,6 +385,25 @@ def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
+class RankWithTies(object):
+ # GH 21237
+ goal_time = 0.2
+ param_names = ['dtype', 'tie_method']
+ params = [['float64', 'float32', 'int64', 'datetime64'],
+ ['first', 'average', 'dense', 'min', 'max']]
+
+ def setup(self, dtype, tie_method):
+ N = 10**4
+ if dtype == 'datetime64':
+ data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
+ else:
+ data = np.array([1] * N, dtype=dtype)
+ self.df = DataFrame({'values': data, 'key': ['foo'] * N})
+
+ def time_rank_ties(self, dtype, tie_method):
+ self.df.groupby('key').rank(method=tie_method)
+
+
class Float32(object):
# GH 13335
goal_time = 0.2
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 68c1839221508..eaeda8bf190da 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -65,6 +65,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`)
+- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` when dealing with tied rankings (:issue:`21237`)
-
.. _whatsnew_0240.docs:
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index b3e9b7c9e69ee..0062a6c8d31ab 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -429,7 +429,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
is_datetimelike : bool, default False
unused in this method but provided for call compatibility with other
Cython transformations
- ties_method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
+ ties_method : {'average', 'min', 'max', 'first', 'dense'}, default
+ 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
@@ -514,26 +515,22 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
dups += 1
sum_ranks += i - grp_start + 1
- # if keep_na, check for missing values and assign back
- # to the result where appropriate
-
- if keep_na and mask[_as[i]]:
- grp_na_count += 1
- out[_as[i], 0] = nan
- else:
- # this implementation is inefficient because it will
- # continue overwriting previously encountered dups
- # i.e. if 5 duplicated values are encountered it will
- # write to the result as follows (assumes avg tiebreaker):
- # 1
- # .5 .5
- # .33 .33 .33
- # .25 .25 .25 .25
- # .2 .2 .2 .2 .2
- #
- # could potentially be optimized to only write to the
- # result once the last duplicate value is encountered
- if tiebreak == TIEBREAK_AVERAGE:
+ # Update out only when there is a transition of values or labels.
+ # When a new value or group is encountered, go back #dups steps(
+ # the number of occurrence of current value) and assign the ranks
+ # based on the the starting index of the current group (grp_start)
+ # and the current index
+ if (i == N - 1 or
+ (masked_vals[_as[i]] != masked_vals[_as[i+1]]) or
+ (mask[_as[i]] ^ mask[_as[i+1]]) or
+ (labels[_as[i]] != labels[_as[i+1]])):
+ # if keep_na, check for missing values and assign back
+ # to the result where appropriate
+ if keep_na and mask[_as[i]]:
+ for j in range(i - dups + 1, i + 1):
+ out[_as[j], 0] = nan
+ grp_na_count = dups
+ elif tiebreak == TIEBREAK_AVERAGE:
for j in range(i - dups + 1, i + 1):
out[_as[j], 0] = sum_ranks / <float64_t>dups
elif tiebreak == TIEBREAK_MIN:
@@ -552,38 +549,38 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
for j in range(i - dups + 1, i + 1):
out[_as[j], 0] = grp_vals_seen
- # look forward to the next value (using the sorting in _as)
- # if the value does not equal the current value then we need to
- # reset the dups and sum_ranks, knowing that a new value is coming
- # up. the conditional also needs to handle nan equality and the
- # end of iteration
- if (i == N - 1 or
- (masked_vals[_as[i]] != masked_vals[_as[i+1]]) or
- (mask[_as[i]] ^ mask[_as[i+1]])):
- dups = sum_ranks = 0
- val_start = i
- grp_vals_seen += 1
- grp_tie_count +=1
-
- # Similar to the previous conditional, check now if we are moving
- # to a new group. If so, keep track of the index where the new
- # group occurs, so the tiebreaker calculations can decrement that
- # from their position. fill in the size of each group encountered
- # (used by pct calculations later). also be sure to reset any of
- # the items helping to calculate dups
- if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]:
- if tiebreak != TIEBREAK_DENSE:
- for j in range(grp_start, i + 1):
- grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
- else:
- for j in range(grp_start, i + 1):
- grp_sizes[_as[j], 0] = (grp_tie_count -
- (grp_na_count > 0))
- dups = sum_ranks = 0
- grp_na_count = 0
- grp_tie_count = 0
- grp_start = i + 1
- grp_vals_seen = 1
+ # look forward to the next value (using the sorting in _as)
+ # if the value does not equal the current value then we need to
+ # reset the dups and sum_ranks, knowing that a new value is
+ # coming up. the conditional also needs to handle nan equality
+ # and the end of iteration
+ if (i == N - 1 or
+ (masked_vals[_as[i]] != masked_vals[_as[i+1]]) or
+ (mask[_as[i]] ^ mask[_as[i+1]])):
+ dups = sum_ranks = 0
+ grp_vals_seen += 1
+ grp_tie_count += 1
+
+ # Similar to the previous conditional, check now if we are
+ # moving to a new group. If so, keep track of the index where
+ # the new group occurs, so the tiebreaker calculations can
+ # decrement that from their position. fill in the size of each
+ # group encountered (used by pct calculations later). also be
+ # sure to reset any of the items helping to calculate dups
+ if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]:
+ if tiebreak != TIEBREAK_DENSE:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = (i - grp_start + 1 -
+ grp_na_count)
+ else:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = (grp_tie_count -
+ (grp_na_count > 0))
+ dups = sum_ranks = 0
+ grp_na_count = 0
+ grp_tie_count = 0
+ grp_start = i + 1
+ grp_vals_seen = 1
if pct:
for i in range(N):
| - [x] closes #21237
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21285 | 2018-06-01T13:39:35Z | 2018-06-14T10:08:58Z | 2018-06-14T10:08:58Z | 2018-06-14T10:09:02Z |
BUG: DataFrame.asof() : Timezone Awareness / Naivety comparison TypeError (incorrect) | diff --git a/doc/source/io.rst b/doc/source/io.rst
index c2c8c1c17700f..b7a9c0e1a3cdf 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2883,6 +2883,20 @@ use the ``parse_dates`` keyword to parse those strings to datetimes:
read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings'])
+
+Parsing Dates
++++++++++++++
+
+Datetime-like values are normally automatically converted to the appropriate
+dtype when reading the excel file. But if you have a column of strings that
+*look* like dates (but are not actually formatted as dates in excel), you can
+use the `parse_dates` keyword to parse those strings to datetimes:
+
+.. code-block:: python
+
+ read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings'])
+
+
Cell Converters
+++++++++++++++
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2a191ef76473b..5a4e89adb2737 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2463,7 +2463,7 @@ def asof_locs(self, where, mask):
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
- result[(locs == 0) & (where < self.values[first])] = -1
+ result[(locs == 0) & (where.values < self.values[first])] = -1
return result
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
new file mode 100644
index 0000000000000..611b1abe57d31
--- /dev/null
+++ b/pandas/io/tests/test_excel.py
@@ -0,0 +1,2289 @@
+# pylint: disable=E1101
+
+from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
+from datetime import datetime, date, time
+import sys
+import os
+from distutils.version import LooseVersion
+
+import warnings
+import operator
+import functools
+import nose
+
+from numpy import nan
+import numpy as np
+
+import pandas as pd
+from pandas import DataFrame, Index, MultiIndex
+from pandas.io.parsers import read_csv
+from pandas.io.excel import (
+ ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer,
+ _Openpyxl20Writer, _Openpyxl22Writer, register_writer, _XlsxWriter
+)
+from pandas.io.common import URLError
+from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
+from pandas.core.config import set_option, get_option
+import pandas.util.testing as tm
+
+
+def _skip_if_no_xlrd():
+ try:
+ import xlrd
+ ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
+ if ver < (0, 9):
+ raise nose.SkipTest('xlrd < 0.9, skipping')
+ except ImportError:
+ raise nose.SkipTest('xlrd not installed, skipping')
+
+
+def _skip_if_no_xlwt():
+ try:
+ import xlwt # NOQA
+ except ImportError:
+ raise nose.SkipTest('xlwt not installed, skipping')
+
+
+def _skip_if_no_openpyxl():
+ try:
+ import openpyxl # NOQA
+ except ImportError:
+ raise nose.SkipTest('openpyxl not installed, skipping')
+
+
+def _skip_if_no_xlsxwriter():
+ try:
+ import xlsxwriter # NOQA
+ except ImportError:
+ raise nose.SkipTest('xlsxwriter not installed, skipping')
+
+
+def _skip_if_no_excelsuite():
+ _skip_if_no_xlrd()
+ _skip_if_no_xlwt()
+ _skip_if_no_openpyxl()
+
+
+def _skip_if_no_boto():
+ try:
+ import boto # NOQA
+ except ImportError:
+ raise nose.SkipTest('boto not installed, skipping')
+
+
+_seriesd = tm.getSeriesData()
+_tsd = tm.getTimeSeriesData()
+_frame = DataFrame(_seriesd)[:10]
+_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
+_tsframe = tm.makeTimeDataFrame()[:5]
+_mixed_frame = _frame.copy()
+_mixed_frame['foo'] = 'bar'
+
+
+class SharedItems(object):
+
+ def setUp(self):
+ self.dirpath = tm.get_data_path()
+ self.frame = _frame.copy()
+ self.frame2 = _frame2.copy()
+ self.tsframe = _tsframe.copy()
+ self.mixed_frame = _mixed_frame.copy()
+
+ def get_csv_refdf(self, basename):
+ """
+ Obtain the reference data from read_csv with the Python engine.
+ Test data path is defined by pandas.util.testing.get_data_path()
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ dfref : DataFrame
+ """
+ pref = os.path.join(self.dirpath, basename + '.csv')
+ dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
+ return dfref
+
+ def get_excelfile(self, basename):
+ """
+ Return test data ExcelFile instance. Test data path is defined by
+ pandas.util.testing.get_data_path()
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ excel : io.excel.ExcelFile
+ """
+ return ExcelFile(os.path.join(self.dirpath, basename + self.ext))
+
+ def get_exceldf(self, basename, *args, **kwds):
+ """
+ Return test data DataFrame. Test data path is defined by
+ pandas.util.testing.get_data_path()
+
+ Parameters
+ ----------
+
+ basename : str
+ File base name, excluding file extension.
+
+ Returns
+ -------
+
+ df : DataFrame
+ """
+ pth = os.path.join(self.dirpath, basename + self.ext)
+ return read_excel(pth, *args, **kwds)
+
+
+class ReadingTestsBase(SharedItems):
+ # This is based on ExcelWriterBase
+ #
+ # Base class for test cases to run with different Excel readers.
+ # To add a reader test, define the following:
+ # 1. A check_skip function that skips your tests if your reader isn't
+ # installed.
+ # 2. Add a property ext, which is the file extension that your reader
+ # reades from. (needs to start with '.' so it's a valid path)
+ # 3. Add a property engine_name, which is the name of the reader class.
+ # For the reader this is not used for anything at the moment.
+
+ def setUp(self):
+ self.check_skip()
+ super(ReadingTestsBase, self).setUp()
+
+ def test_parse_cols_int(self):
+
+ dfref = self.get_csv_refdf('test1')
+ dfref = dfref.reindex(columns=['A', 'B', 'C'])
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols=3)
+ df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
+ parse_cols=3)
+ # TODO add index to xls file)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ def test_parse_cols_list(self):
+
+ dfref = self.get_csv_refdf('test1')
+ dfref = dfref.reindex(columns=['B', 'C'])
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ parse_cols=[0, 2, 3])
+ df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
+ parse_cols=[0, 2, 3])
+ # TODO add index to xls file)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ def test_parse_cols_str(self):
+
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = dfref.reindex(columns=['A', 'B', 'C'])
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ parse_cols='A:D')
+ df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
+ parse_cols='A:D')
+ # TODO add index to xls, read xls ignores index name ?
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+
+ df1 = dfref.reindex(columns=['B', 'C'])
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ parse_cols='A,C,D')
+ df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
+ parse_cols='A,C,D')
+ # TODO add index to xls file
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+
+ df1 = dfref.reindex(columns=['B', 'C'])
+ df2 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ parse_cols='A,C:D')
+ df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0,
+ parse_cols='A,C:D')
+ tm.assert_frame_equal(df2, df1, check_names=False)
+ tm.assert_frame_equal(df3, df1, check_names=False)
+
+ def test_excel_stop_iterator(self):
+
+ parsed = self.get_exceldf('test2', 'Sheet1')
+ expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
+ tm.assert_frame_equal(parsed, expected)
+
+ def test_excel_cell_error_na(self):
+
+ parsed = self.get_exceldf('test3', 'Sheet1')
+ expected = DataFrame([[np.nan]], columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ def test_excel_passes_na(self):
+
+ excel = self.get_excelfile('test4')
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
+ na_values=['apple'])
+ expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
+ na_values=['apple'])
+ expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ # 13967
+ excel = self.get_excelfile('test5')
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
+ na_values=['apple'])
+ expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
+ na_values=['apple'])
+ expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
+ columns=['Test'])
+ tm.assert_frame_equal(parsed, expected)
+
+ def test_excel_table_sheet_by_index(self):
+
+ excel = self.get_excelfile('test1')
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = read_excel(excel, 0, index_col=0)
+ df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df1 = excel.parse(0, index_col=0)
+ df2 = excel.parse(1, skiprows=[1], index_col=0)
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
+ df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
+ tm.assert_frame_equal(df3, df1.ix[:-1])
+ tm.assert_frame_equal(df3, df4)
+
+ df3 = excel.parse(0, index_col=0, skipfooter=1)
+ df4 = excel.parse(0, index_col=0, skip_footer=1)
+ tm.assert_frame_equal(df3, df1.ix[:-1])
+ tm.assert_frame_equal(df3, df4)
+
+ import xlrd
+ with tm.assertRaises(xlrd.XLRDError):
+ read_excel(excel, 'asdf')
+
+ def test_excel_table(self):
+
+ dfref = self.get_csv_refdf('test1')
+
+ df1 = self.get_exceldf('test1', 'Sheet1', index_col=0)
+ df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0)
+ # TODO add index to file
+ tm.assert_frame_equal(df1, dfref, check_names=False)
+ tm.assert_frame_equal(df2, dfref, check_names=False)
+
+ df3 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ skipfooter=1)
+ df4 = self.get_exceldf('test1', 'Sheet1', index_col=0,
+ skip_footer=1)
+ tm.assert_frame_equal(df3, df1.ix[:-1])
+ tm.assert_frame_equal(df3, df4)
+
+ def test_reader_special_dtypes(self):
+
+ expected = DataFrame.from_items([
+ ("IntCol", [1, 2, -3, 4, 0]),
+ ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
+ ("BoolCol", [True, False, True, True, False]),
+ ("StrCol", [1, 2, 3, 4, 5]),
+ # GH5394 - this is why convert_float isn't vectorized
+ ("Str2Col", ["a", 3, "c", "d", "e"]),
+ ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
+ datetime(1905, 1, 1), datetime(2013, 12, 14),
+ datetime(2015, 3, 14)])
+ ])
+
+ basename = 'test_types'
+
+ # should read in correctly and infer types
+ actual = self.get_exceldf(basename, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ # if not coercing number, then int comes in as float
+ float_expected = expected.copy()
+ float_expected["IntCol"] = float_expected["IntCol"].astype(float)
+ float_expected.loc[1, "Str2Col"] = 3.0
+ actual = self.get_exceldf(basename, 'Sheet1', convert_float=False)
+ tm.assert_frame_equal(actual, float_expected)
+
+ # check setting Index (assuming xls and xlsx are the same here)
+ for icol, name in enumerate(expected.columns):
+ actual = self.get_exceldf(basename, 'Sheet1', index_col=icol)
+ exp = expected.set_index(name)
+ tm.assert_frame_equal(actual, exp)
+
+ # convert_float and converters should be different but both accepted
+ expected["StrCol"] = expected["StrCol"].apply(str)
+ actual = self.get_exceldf(
+ basename, 'Sheet1', converters={"StrCol": str})
+ tm.assert_frame_equal(actual, expected)
+
+ no_convert_float = float_expected.copy()
+ no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
+ actual = self.get_exceldf(basename, 'Sheet1', convert_float=False,
+ converters={"StrCol": str})
+ tm.assert_frame_equal(actual, no_convert_float)
+
+ # GH8212 - support for converters and missing values
+ def test_reader_converters(self):
+
+ basename = 'test_converters'
+
+ expected = DataFrame.from_items([
+ ("IntCol", [1, 2, -3, -1000, 0]),
+ ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
+ ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
+ ("StrCol", ['1', np.nan, '3', '4', '5']),
+ ])
+
+ converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
+ 'FloatCol': lambda x: 10 * x if x else np.nan,
+ 2: lambda x: 'Found' if x != '' else 'Not found',
+ 3: lambda x: str(x) if x else '',
+ }
+
+ # should read in correctly and set types of single cells (not array
+ # dtypes)
+ actual = self.get_exceldf(basename, 'Sheet1', converters=converters)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_reading_all_sheets(self):
+ # Test reading all sheetnames by setting sheetname to None,
+ # Ensure a dict is returned.
+ # See PR #9450
+ basename = 'test_multisheet'
+ dfs = self.get_exceldf(basename, sheetname=None)
+ expected_keys = ['Alpha', 'Beta', 'Charlie']
+ tm.assert_contains_all(expected_keys, dfs.keys())
+
+ def test_reading_multiple_specific_sheets(self):
+ # Test reading specific sheetnames by specifying a mixed list
+ # of integers and strings, and confirm that duplicated sheet
+ # references (positions/names) are removed properly.
+ # Ensure a dict is returned
+ # See PR #9450
+ basename = 'test_multisheet'
+ # Explicitly request duplicates. Only the set should be returned.
+ expected_keys = [2, 'Charlie', 'Charlie']
+ dfs = self.get_exceldf(basename, sheetname=expected_keys)
+ expected_keys = list(set(expected_keys))
+ tm.assert_contains_all(expected_keys, dfs.keys())
+ assert len(expected_keys) == len(dfs.keys())
+
+ def test_reading_all_sheets_with_blank(self):
+ # Test reading all sheetnames by setting sheetname to None,
+ # In the case where some sheets are blank.
+ # Issue #11711
+ basename = 'blank_with_header'
+ dfs = self.get_exceldf(basename, sheetname=None)
+ expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
+ tm.assert_contains_all(expected_keys, dfs.keys())
+
+ # GH6403
+ def test_read_excel_blank(self):
+ actual = self.get_exceldf('blank', 'Sheet1')
+ tm.assert_frame_equal(actual, DataFrame())
+
+ def test_read_excel_blank_with_header(self):
+ expected = DataFrame(columns=['col_1', 'col_2'])
+ actual = self.get_exceldf('blank_with_header', 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ # GH 12292 : error when read one empty column from excel file
+ def test_read_one_empty_col_no_header(self):
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, 'no_header', index=False, header=False)
+ actual_header_none = read_excel(
+ path,
+ 'no_header',
+ parse_cols=[0],
+ header=None
+ )
+
+ actual_header_zero = read_excel(
+ path,
+ 'no_header',
+ parse_cols=[0],
+ header=0
+ )
+ expected = DataFrame()
+ tm.assert_frame_equal(actual_header_none, expected)
+ tm.assert_frame_equal(actual_header_zero, expected)
+
+ def test_read_one_empty_col_with_header(self):
+ _skip_if_no_xlwt()
+ _skip_if_no_openpyxl()
+
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, 'with_header', index=False, header=True)
+ actual_header_none = read_excel(
+ path,
+ 'with_header',
+ parse_cols=[0],
+ header=None
+ )
+
+ actual_header_zero = read_excel(
+ path,
+ 'with_header',
+ parse_cols=[0],
+ header=0
+ )
+ expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
+ tm.assert_frame_equal(actual_header_none, expected_header_none)
+ expected_header_zero = DataFrame(columns=[0], dtype='int64')
+ tm.assert_frame_equal(actual_header_zero, expected_header_zero)
+
+ def test_set_column_names_in_parameter(self):
+ # GH 12870 : pass down column names associated with
+ # keyword argument names
+ refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
+ [3, 'baz']], columns=['a', 'b'])
+
+ with ensure_clean(self.ext) as pth:
+ with ExcelWriter(pth) as writer:
+ refdf.to_excel(writer, 'Data_no_head',
+ header=False, index=False)
+ refdf.to_excel(writer, 'Data_with_head', index=False)
+
+ refdf.columns = ['A', 'B']
+
+ with ExcelFile(pth) as reader:
+ xlsdf_no_head = read_excel(reader, 'Data_no_head',
+ header=None, names=['A', 'B'])
+ xlsdf_with_head = read_excel(reader, 'Data_with_head',
+ index_col=None, names=['A', 'B'])
+
+ tm.assert_frame_equal(xlsdf_no_head, refdf)
+ tm.assert_frame_equal(xlsdf_with_head, refdf)
+
+ def test_date_conversion_overflow(self):
+ # GH 10001 : pandas.ExcelFile ignore parse_dates=False
+ expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
+ [pd.Timestamp('2016-03-16'), 'Jack Black'],
+ [1e+20, 'Timothy Brown']],
+ columns=['DateColWithBigInt', 'StringCol'])
+
+ result = self.get_exceldf('testdateoverflow')
+ tm.assert_frame_equal(result, expected)
+
+
+class XlrdTests(ReadingTestsBase):
+ """
+ This is the base class for the xlrd tests, and 3 different file formats
+ are supported: xls, xlsx, xlsm
+ """
+
+ def test_excel_read_buffer(self):
+
+ pth = os.path.join(self.dirpath, 'test1' + self.ext)
+ expected = read_excel(pth, 'Sheet1', index_col=0)
+ with open(pth, 'rb') as f:
+ actual = read_excel(f, 'Sheet1', index_col=0)
+ tm.assert_frame_equal(expected, actual)
+
+ with open(pth, 'rb') as f:
+ xls = ExcelFile(f)
+ actual = read_excel(xls, 'Sheet1', index_col=0)
+ tm.assert_frame_equal(expected, actual)
+
+ def test_read_xlrd_Book(self):
+ _skip_if_no_xlwt()
+
+ import xlrd
+ df = self.frame
+ with ensure_clean('.xls') as pth:
+ df.to_excel(pth, "SheetA")
+ book = xlrd.open_workbook(pth)
+
+ with ExcelFile(book, engine="xlrd") as xl:
+ result = read_excel(xl, "SheetA")
+ tm.assert_frame_equal(df, result)
+
+ result = read_excel(book, sheetname="SheetA", engine="xlrd")
+ tm.assert_frame_equal(df, result)
+
+ @tm.network
+ def test_read_from_http_url(self):
+ url = ('https://raw.github.com/pydata/pandas/master/'
+ 'pandas/io/tests/data/test1' + self.ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1')
+ tm.assert_frame_equal(url_table, local_table)
+
+ @tm.network(check_before_test=True)
+ def test_read_from_s3_url(self):
+ _skip_if_no_boto()
+
+ url = ('s3://pandas-test/test1' + self.ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1')
+ tm.assert_frame_equal(url_table, local_table)
+
+ @tm.slow
+ def test_read_from_file_url(self):
+
+ # FILE
+ if sys.version_info[:2] < (2, 6):
+ raise nose.SkipTest("file:// not supported with Python < 2.6")
+
+ localtable = os.path.join(self.dirpath, 'test1' + self.ext)
+ local_table = read_excel(localtable)
+
+ try:
+ url_table = read_excel('file://localhost/' + localtable)
+ except URLError:
+ # fails on some systems
+ import platform
+ raise nose.SkipTest("failing on %s" %
+ ' '.join(platform.uname()).strip())
+
+ tm.assert_frame_equal(url_table, local_table)
+
+ def test_read_from_pathlib_path(self):
+
+ # GH12655
+ tm._skip_if_no_pathlib()
+
+ from pathlib import Path
+
+ str_path = os.path.join(self.dirpath, 'test1' + self.ext)
+ expected = read_excel(str_path, 'Sheet1', index_col=0)
+
+ path_obj = Path(self.dirpath, 'test1' + self.ext)
+ actual = read_excel(path_obj, 'Sheet1', index_col=0)
+
+ tm.assert_frame_equal(expected, actual)
+
+ def test_read_from_py_localpath(self):
+
+ # GH12655
+ tm._skip_if_no_localpath()
+
+ from py.path import local as LocalPath
+
+ str_path = os.path.join(self.dirpath, 'test1' + self.ext)
+ expected = read_excel(str_path, 'Sheet1', index_col=0)
+
+ abs_dir = os.path.abspath(self.dirpath)
+ path_obj = LocalPath(abs_dir).join('test1' + self.ext)
+ actual = read_excel(path_obj, 'Sheet1', index_col=0)
+
+ tm.assert_frame_equal(expected, actual)
+
+ def test_reader_closes_file(self):
+
+ pth = os.path.join(self.dirpath, 'test1' + self.ext)
+ f = open(pth, 'rb')
+ with ExcelFile(f) as xlsx:
+ # parses okay
+ read_excel(xlsx, 'Sheet1', index_col=0)
+
+ self.assertTrue(f.closed)
+
+ def test_creating_and_reading_multiple_sheets(self):
+ # Test reading multiple sheets, from a runtime created excel file
+ # with multiple sheets.
+ # See PR #9450
+
+ _skip_if_no_xlwt()
+ _skip_if_no_openpyxl()
+
+ def tdf(sheetname):
+ d, i = [11, 22, 33], [1, 2, 3]
+ return DataFrame(d, i, columns=[sheetname])
+
+ sheets = ['AAA', 'BBB', 'CCC']
+
+ dfs = [tdf(s) for s in sheets]
+ dfs = dict(zip(sheets, dfs))
+
+ with ensure_clean(self.ext) as pth:
+ with ExcelWriter(pth) as ew:
+ for sheetname, df in iteritems(dfs):
+ df.to_excel(ew, sheetname)
+ dfs_returned = read_excel(pth, sheetname=sheets)
+ for s in sheets:
+ tm.assert_frame_equal(dfs[s], dfs_returned[s])
+
+ def test_reader_seconds(self):
+ # Test reading times with and without milliseconds. GH5945.
+ import xlrd
+
+ if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
+ # Xlrd >= 0.9.3 can handle Excel milliseconds.
+ expected = DataFrame.from_items([("Time",
+ [time(1, 2, 3),
+ time(2, 45, 56, 100000),
+ time(4, 29, 49, 200000),
+ time(6, 13, 42, 300000),
+ time(7, 57, 35, 400000),
+ time(9, 41, 28, 500000),
+ time(11, 25, 21, 600000),
+ time(13, 9, 14, 700000),
+ time(14, 53, 7, 800000),
+ time(16, 37, 0, 900000),
+ time(18, 20, 54)])])
+ else:
+ # Xlrd < 0.9.3 rounds Excel milliseconds.
+ expected = DataFrame.from_items([("Time",
+ [time(1, 2, 3),
+ time(2, 45, 56),
+ time(4, 29, 49),
+ time(6, 13, 42),
+ time(7, 57, 35),
+ time(9, 41, 29),
+ time(11, 25, 22),
+ time(13, 9, 15),
+ time(14, 53, 8),
+ time(16, 37, 1),
+ time(18, 20, 54)])])
+
+ actual = self.get_exceldf('times_1900', 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.get_exceldf('times_1904', 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_multiindex(self):
+ # GH 4679
+ mi = MultiIndex.from_product([['foo', 'bar'], ['a', 'b']])
+ mi_file = os.path.join(self.dirpath, 'testmultiindex' + self.ext)
+
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns=mi)
+
+ actual = read_excel(mi_file, 'mi_column', header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+ actual = read_excel(mi_file, 'mi_column', header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.columns = ['a', 'b', 'c', 'd']
+ expected.index = mi
+ actual = read_excel(mi_file, 'mi_index', index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ expected.columns = mi
+ actual = read_excel(mi_file, 'both', index_col=[0, 1], header=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ expected.index = mi.set_names(['ilvl1', 'ilvl2'])
+ expected.columns = ['a', 'b', 'c', 'd']
+ actual = read_excel(mi_file, 'mi_index_name', index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = list(range(4))
+ expected.columns = mi.set_names(['c1', 'c2'])
+ actual = read_excel(mi_file, 'mi_column_name',
+ header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ # Issue #11317
+ expected.columns = mi.set_levels(
+ [1, 2], level=1).set_names(['c1', 'c2'])
+ actual = read_excel(mi_file, 'name_with_int',
+ index_col=0, header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ expected.columns = mi.set_names(['c1', 'c2'])
+ expected.index = mi.set_names(['ilvl1', 'ilvl2'])
+ actual = read_excel(mi_file, 'both_name',
+ index_col=[0, 1], header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = read_excel(mi_file, 'both_name',
+ index_col=[0, 1], header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0, 1],
+ header=[0, 1], skiprows=2)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_multiindex_empty_level(self):
+ # GH 12453
+ _skip_if_no_xlsxwriter()
+ with ensure_clean('.xlsx') as path:
+ df = DataFrame({
+ ('Zero', ''): {0: 0},
+ ('One', 'x'): {0: 1},
+ ('Two', 'X'): {0: 3},
+ ('Two', 'Y'): {0: 7}
+ })
+
+ expected = DataFrame({
+ ('Zero', 'Unnamed: 3_level_1'): {0: 0},
+ ('One', u'x'): {0: 1},
+ ('Two', u'X'): {0: 3},
+ ('Two', u'Y'): {0: 7}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ df = pd.DataFrame({
+ ('Beg', ''): {0: 0},
+ ('Middle', 'x'): {0: 1},
+ ('Tail', 'X'): {0: 3},
+ ('Tail', 'Y'): {0: 7}
+ })
+
+ expected = pd.DataFrame({
+ ('Beg', 'Unnamed: 0_level_1'): {0: 0},
+ ('Middle', u'x'): {0: 1},
+ ('Tail', u'X'): {0: 3},
+ ('Tail', u'Y'): {0: 7}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1])
+ tm.assert_frame_equal(actual, expected)
+
+ def test_excel_multindex_roundtrip(self):
+ # GH 4679
+ _skip_if_no_xlsxwriter()
+ with ensure_clean('.xlsx') as pth:
+ for c_idx_names in [True, False]:
+ for r_idx_names in [True, False]:
+ for c_idx_levels in [1, 3]:
+ for r_idx_levels in [1, 3]:
+ # column index name can't be serialized unless
+ # MultiIndex
+ if (c_idx_levels == 1 and c_idx_names):
+ continue
+
+ # empty name case current read in as unamed levels,
+ # not Nones
+ check_names = True
+ if not r_idx_names and r_idx_levels > 1:
+ check_names = False
+
+ df = mkdf(5, 5, c_idx_names,
+ r_idx_names, c_idx_levels,
+ r_idx_levels)
+ df.to_excel(pth)
+ act = pd.read_excel(
+ pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(
+ df, act, check_names=check_names)
+
+ df.iloc[0, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(
+ pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(
+ df, act, check_names=check_names)
+
+ df.iloc[-1, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(
+ pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(
+ df, act, check_names=check_names)
+
+ def test_excel_oldindex_format(self):
+ # GH 4679
+ data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
+ ['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
+ ['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
+ ['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
+ ['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
+ columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
+ mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
+ 'R_l0_g3', 'R_l0_g4'],
+ ['R_l1_g0', 'R_l1_g1', 'R_l1_g2',
+ 'R_l1_g3', 'R_l1_g4']],
+ labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
+ names=['R0', 'R1'])
+ si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
+ 'R_l0_g3', 'R_l0_g4'], name='R0')
+
+ in_file = os.path.join(
+ self.dirpath, 'test_index_name_pre17' + self.ext)
+
+ expected = pd.DataFrame(data, index=si, columns=columns)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(
+ in_file, 'single_names', has_index_names=True)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index.name = None
+ actual = pd.read_excel(in_file, 'single_no_names')
+ tm.assert_frame_equal(actual, expected)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(
+ in_file, 'single_no_names', has_index_names=False)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = mi
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(
+ in_file, 'multi_names', has_index_names=True)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index.names = [None, None]
+ actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1],
+ has_index_names=False)
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ def test_read_excel_bool_header_arg(self):
+ # GH 6114
+ for arg in [True, False]:
+ with tm.assertRaises(TypeError):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ header=arg)
+
+ def test_read_excel_chunksize(self):
+ # GH 8011
+ with tm.assertRaises(NotImplementedError):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ chunksize=100)
+
+ def test_read_excel_parse_dates(self):
+ # GH 11544, 12051
+
+ df = DataFrame(
+ {'col': [1, 2, 3],
+ 'date_strings': pd.date_range('2012-01-01', periods=3)})
+ df2 = df.copy()
+ df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y')
+
+ with ensure_clean(self.ext) as pth:
+ df2.to_excel(pth)
+
+ res = read_excel(pth)
+ tm.assert_frame_equal(df2, res)
+
+ res = read_excel(pth, parse_dates=['date_strings'])
+ tm.assert_frame_equal(df, res)
+
+ dateparser = lambda x: pd.datetime.strptime(x, '%m/%d/%Y')
+ res = read_excel(pth, parse_dates=['date_strings'],
+ date_parser=dateparser)
+ tm.assert_frame_equal(df, res)
+
+ def test_read_excel_skiprows_list(self):
+ # GH 4903
+ actual = pd.read_excel(os.path.join(self.dirpath,
+ 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=[0, 2])
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns=['a', 'b', 'c', 'd'])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(os.path.join(self.dirpath,
+ 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=np.array([0, 2]))
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_squeeze(self):
+ # GH 12157
+ f = os.path.join(self.dirpath, 'test_squeeze' + self.ext)
+
+ actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
+ expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
+ expected.index.name = 'a'
+ tm.assert_series_equal(actual, expected)
+
+ actual = pd.read_excel(f, 'two_columns', squeeze=True)
+ expected = pd.DataFrame({'a': [4, 5, 6],
+ 'b': [2, 3, 4]})
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(f, 'one_column', squeeze=True)
+ expected = pd.Series([1, 2, 3], name='a')
+ tm.assert_series_equal(actual, expected)
+
+
+class XlsReaderTests(XlrdTests, tm.TestCase):
+ ext = '.xls'
+ engine_name = 'xlrd'
+ check_skip = staticmethod(_skip_if_no_xlrd)
+
+
+class XlsxReaderTests(XlrdTests, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'xlrd'
+ check_skip = staticmethod(_skip_if_no_xlrd)
+
+
+class XlsmReaderTests(XlrdTests, tm.TestCase):
+ ext = '.xlsm'
+ engine_name = 'xlrd'
+ check_skip = staticmethod(_skip_if_no_xlrd)
+
+
+class ExcelWriterBase(SharedItems):
+ # Base class for test cases to run with different Excel writers.
+ # To add a writer test, define the following:
+ # 1. A check_skip function that skips your tests if your writer isn't
+ # installed.
+ # 2. Add a property ext, which is the file extension that your writer
+ # writes to. (needs to start with '.' so it's a valid path)
+ # 3. Add a property engine_name, which is the name of the writer class.
+
+ # Test with MultiIndex and Hierarchical Rows as merged cells.
+ merge_cells = True
+
+ def setUp(self):
+ self.check_skip()
+ super(ExcelWriterBase, self).setUp()
+ self.option_name = 'io.excel.%s.writer' % self.ext.strip('.')
+ self.prev_engine = get_option(self.option_name)
+ set_option(self.option_name, self.engine_name)
+
+ def tearDown(self):
+ set_option(self.option_name, self.prev_engine)
+
+ def test_excel_sheet_by_name_raise(self):
+ _skip_if_no_xlrd()
+ import xlrd
+
+ with ensure_clean(self.ext) as pth:
+ gt = DataFrame(np.random.randn(10, 2))
+ gt.to_excel(pth)
+ xl = ExcelFile(pth)
+ df = read_excel(xl, 0)
+ tm.assert_frame_equal(gt, df)
+
+ with tm.assertRaises(xlrd.XLRDError):
+ read_excel(xl, '0')
+
+ def test_excelwriter_contextmanager(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as pth:
+ with ExcelWriter(pth) as writer:
+ self.frame.to_excel(writer, 'Data1')
+ self.frame2.to_excel(writer, 'Data2')
+
+ with ExcelFile(pth) as reader:
+ found_df = read_excel(reader, 'Data1')
+ found_df2 = read_excel(reader, 'Data2')
+ tm.assert_frame_equal(found_df, self.frame)
+ tm.assert_frame_equal(found_df2, self.frame2)
+
+ def test_roundtrip(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # test roundtrip
+ self.frame.to_excel(path, 'test1')
+ recons = read_excel(path, 'test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', index=False)
+ recons = read_excel(path, 'test1', index_col=None)
+ recons.index = self.frame.index
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', na_rep='NA')
+ recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ # GH 3611
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0, na_values=['88'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0,
+ na_values=[88, 88.0])
+ tm.assert_frame_equal(self.frame, recons)
+
+ # GH 6573
+ self.frame.to_excel(path, 'Sheet1')
+ recons = read_excel(path, index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, '0')
+ recons = read_excel(path, index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+
+ def test_mixed(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ self.mixed_frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1', index_col=0)
+ tm.assert_frame_equal(self.mixed_frame, recons)
+
+ def test_tsframe(self):
+ _skip_if_no_xlrd()
+
+ df = tm.makeTimeDataFrame()[:5]
+
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(df, recons)
+
+ def test_basics_with_nan(self):
+ _skip_if_no_xlrd()
+ with ensure_clean(self.ext) as path:
+ self.frame['A'][:5] = nan
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ def test_int_types(self):
+ _skip_if_no_xlrd()
+
+ for np_type in (np.int8, np.int16, np.int32, np.int64):
+
+ with ensure_clean(self.ext) as path:
+ # Test np.int values read come back as int (rather than float
+ # which is Excel's format).
+ frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
+ dtype=np_type)
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ int_frame = frame.astype(np.int64)
+ tm.assert_frame_equal(int_frame, recons)
+ recons2 = read_excel(path, 'test1')
+ tm.assert_frame_equal(int_frame, recons2)
+
+ # test with convert_float=False comes back as float
+ float_frame = frame.astype(float)
+ recons = read_excel(path, 'test1', convert_float=False)
+ tm.assert_frame_equal(recons, float_frame,
+ check_index_type=False,
+ check_column_type=False)
+
+ def test_float_types(self):
+ _skip_if_no_xlrd()
+
+ for np_type in (np.float16, np.float32, np.float64):
+ with ensure_clean(self.ext) as path:
+ # Test np.float values read come back as float.
+ frame = DataFrame(np.random.random_sample(10), dtype=np_type)
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1').astype(np_type)
+ tm.assert_frame_equal(frame, recons, check_dtype=False)
+
+ def test_bool_types(self):
+ _skip_if_no_xlrd()
+
+ for np_type in (np.bool8, np.bool_):
+ with ensure_clean(self.ext) as path:
+ # Test np.bool values read come back as float.
+ frame = (DataFrame([1, 0, True, False], dtype=np_type))
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1').astype(np_type)
+ tm.assert_frame_equal(frame, recons)
+
+ def test_inf_roundtrip(self):
+ _skip_if_no_xlrd()
+
+ frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
+ with ensure_clean(self.ext) as path:
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(frame, recons)
+
+ def test_sheets(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # Test writing to separate sheets
+ writer = ExcelWriter(path)
+ self.frame.to_excel(writer, 'test1')
+ self.tsframe.to_excel(writer, 'test2')
+ writer.save()
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1', index_col=0)
+ tm.assert_frame_equal(self.frame, recons)
+ recons = read_excel(reader, 'test2', index_col=0)
+ tm.assert_frame_equal(self.tsframe, recons)
+ self.assertEqual(2, len(reader.sheet_names))
+ self.assertEqual('test1', reader.sheet_names[0])
+ self.assertEqual('test2', reader.sheet_names[1])
+
+ def test_colaliases(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # column aliases
+ col_aliases = Index(['AA', 'X', 'Y', 'Z'])
+ self.frame2.to_excel(path, 'test1', header=col_aliases)
+ reader = ExcelFile(path)
+ rs = read_excel(reader, 'test1', index_col=0)
+ xp = self.frame2.copy()
+ xp.columns = col_aliases
+ tm.assert_frame_equal(xp, rs)
+
+ def test_roundtrip_indexlabels(self):
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+
+ self.frame['A'][:5] = nan
+
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', columns=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
+ # test index_label
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(path, 'test1',
+ index_label=['test'],
+ merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ self.assertEqual(frame.index.names, recons.index.names)
+
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(path,
+ 'test1',
+ index_label=['test', 'dummy', 'dummy2'],
+ merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ self.assertEqual(frame.index.names, recons.index.names)
+
+ frame = (DataFrame(np.random.randn(10, 2)) >= 0)
+ frame.to_excel(path,
+ 'test1',
+ index_label='test',
+ merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1',
+ index_col=0,
+ ).astype(np.int64)
+ frame.index.names = ['test']
+ tm.assert_frame_equal(frame, recons.astype(bool))
+
+ with ensure_clean(self.ext) as path:
+
+ self.frame.to_excel(path,
+ 'test1',
+ columns=['A', 'B', 'C', 'D'],
+ index=False, merge_cells=self.merge_cells)
+ # take 'A' and 'B' as indexes (same row as cols 'C', 'D')
+ df = self.frame.copy()
+ df = df.set_index(['A', 'B'])
+
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1', index_col=[0, 1])
+ tm.assert_frame_equal(df, recons, check_less_precise=True)
+
+ def test_excel_roundtrip_indexname(self):
+ _skip_if_no_xlrd()
+
+ df = DataFrame(np.random.randn(10, 4))
+ df.index.name = 'foo'
+
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, merge_cells=self.merge_cells)
+
+ xf = ExcelFile(path)
+ result = read_excel(xf, xf.sheet_names[0],
+ index_col=0)
+
+ tm.assert_frame_equal(result, df)
+ self.assertEqual(result.index.name, 'foo')
+
+ def test_excel_roundtrip_datetime(self):
+ _skip_if_no_xlrd()
+
+ # datetime.date, not sure what to test here exactly
+ tsf = self.tsframe.copy()
+ with ensure_clean(self.ext) as path:
+
+ tsf.index = [x.date() for x in self.tsframe.index]
+ tsf.to_excel(path, 'test1', merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(self.tsframe, recons)
+
+ # GH4133 - excel output format strings
+ def test_excel_date_datetime_format(self):
+ _skip_if_no_xlrd()
+ df = DataFrame([[date(2014, 1, 31),
+ date(1999, 9, 24)],
+ [datetime(1998, 5, 26, 23, 33, 4),
+ datetime(2014, 2, 28, 13, 5, 13)]],
+ index=['DATE', 'DATETIME'], columns=['X', 'Y'])
+ df_expected = DataFrame([[datetime(2014, 1, 31),
+ datetime(1999, 9, 24)],
+ [datetime(1998, 5, 26, 23, 33, 4),
+ datetime(2014, 2, 28, 13, 5, 13)]],
+ index=['DATE', 'DATETIME'], columns=['X', 'Y'])
+
+ with ensure_clean(self.ext) as filename1:
+ with ensure_clean(self.ext) as filename2:
+ writer1 = ExcelWriter(filename1)
+ writer2 = ExcelWriter(filename2,
+ date_format='DD.MM.YYYY',
+ datetime_format='DD.MM.YYYY HH-MM-SS')
+
+ df.to_excel(writer1, 'test1')
+ df.to_excel(writer2, 'test1')
+
+ writer1.close()
+ writer2.close()
+
+ reader1 = ExcelFile(filename1)
+ reader2 = ExcelFile(filename2)
+
+ rs1 = read_excel(reader1, 'test1', index_col=None)
+ rs2 = read_excel(reader2, 'test1', index_col=None)
+
+ tm.assert_frame_equal(rs1, rs2)
+
+ # since the reader returns a datetime object for dates, we need
+ # to use df_expected to check the result
+ tm.assert_frame_equal(rs2, df_expected)
+
+ def test_to_excel_periodindex(self):
+ _skip_if_no_xlrd()
+
+ frame = self.tsframe
+ xp = frame.resample('M', kind='period').mean()
+
+ with ensure_clean(self.ext) as path:
+ xp.to_excel(path, 'sht1')
+
+ reader = ExcelFile(path)
+ rs = read_excel(reader, 'sht1', index_col=0)
+ tm.assert_frame_equal(xp, rs.to_period('M'))
+
+ def test_to_excel_multiindex(self):
+ _skip_if_no_xlrd()
+
+ frame = self.frame
+ arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
+ new_index = MultiIndex.from_arrays(arrays,
+ names=['first', 'second'])
+ frame.index = new_index
+
+ with ensure_clean(self.ext) as path:
+ frame.to_excel(path, 'test1', header=False)
+ frame.to_excel(path, 'test1', columns=['A', 'B'])
+
+ # round trip
+ frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ df = read_excel(reader, 'test1', index_col=[0, 1])
+ tm.assert_frame_equal(frame, df)
+
+ # GH13511
+ def test_to_excel_multiindex_nan_label(self):
+ _skip_if_no_xlrd()
+
+ frame = pd.DataFrame({'A': [None, 2, 3],
+ 'B': [10, 20, 30],
+ 'C': np.random.sample(3)})
+ frame = frame.set_index(['A', 'B'])
+
+ with ensure_clean(self.ext) as path:
+ frame.to_excel(path, merge_cells=self.merge_cells)
+ df = read_excel(path, index_col=[0, 1])
+ tm.assert_frame_equal(frame, df)
+
+ # Test for Issue 11328. If column indices are integers, make
+ # sure they are handled correctly for either setting of
+ # merge_cells
+ def test_to_excel_multiindex_cols(self):
+ _skip_if_no_xlrd()
+
+ frame = self.frame
+ arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
+ new_index = MultiIndex.from_arrays(arrays,
+ names=['first', 'second'])
+ frame.index = new_index
+
+ new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
+ (50, 1), (50, 2)])
+ frame.columns = new_cols_index
+ header = [0, 1]
+ if not self.merge_cells:
+ header = 0
+
+ with ensure_clean(self.ext) as path:
+ # round trip
+ frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ df = read_excel(reader, 'test1', header=header,
+ index_col=[0, 1])
+ if not self.merge_cells:
+ fm = frame.columns.format(sparsify=False,
+ adjoin=False, names=False)
+ frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
+ tm.assert_frame_equal(frame, df)
+
+ def test_to_excel_multiindex_dates(self):
+ _skip_if_no_xlrd()
+
+ # try multiindex with dates
+ tsframe = self.tsframe.copy()
+ new_index = [tsframe.index, np.arange(len(tsframe.index))]
+ tsframe.index = MultiIndex.from_arrays(new_index)
+
+ with ensure_clean(self.ext) as path:
+ tsframe.index.names = ['time', 'foo']
+ tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1',
+ index_col=[0, 1])
+
+ tm.assert_frame_equal(tsframe, recons)
+ self.assertEqual(recons.index.names, ('time', 'foo'))
+
+ def test_to_excel_multiindex_no_write_index(self):
+ _skip_if_no_xlrd()
+
+ # Test writing and re-reading a MI witout the index. GH 5616.
+
+ # Initial non-MI frame.
+ frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
+
+ # Add a MI.
+ frame2 = frame1.copy()
+ multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
+ frame2.index = multi_index
+
+ with ensure_clean(self.ext) as path:
+
+ # Write out to Excel without the index.
+ frame2.to_excel(path, 'test1', index=False)
+
+ # Read it back in.
+ reader = ExcelFile(path)
+ frame3 = read_excel(reader, 'test1')
+
+ # Test that it is the same as the initial frame.
+ tm.assert_frame_equal(frame1, frame3)
+
+ def test_to_excel_float_format(self):
+ _skip_if_no_xlrd()
+
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+
+ with ensure_clean(self.ext) as filename:
+ df.to_excel(filename, 'test1', float_format='%.2f')
+
+ reader = ExcelFile(filename)
+ rs = read_excel(reader, 'test1', index_col=None)
+ xp = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ tm.assert_frame_equal(rs, xp)
+
+ def test_to_excel_output_encoding(self):
+ _skip_if_no_xlrd()
+
+ # avoid mixed inferred_type
+ df = DataFrame([[u'\u0192', u'\u0193', u'\u0194'],
+ [u'\u0195', u'\u0196', u'\u0197']],
+ index=[u'A\u0192', u'B'],
+ columns=[u'X\u0193', u'Y', u'Z'])
+
+ with ensure_clean('__tmp_to_excel_float_format__.' + self.ext)\
+ as filename:
+ df.to_excel(filename, sheet_name='TestSheet', encoding='utf8')
+ result = read_excel(filename, 'TestSheet', encoding='utf8')
+ tm.assert_frame_equal(result, df)
+
+ def test_to_excel_unicode_filename(self):
+ _skip_if_no_xlrd()
+ with ensure_clean(u('\u0192u.') + self.ext) as filename:
+ try:
+ f = open(filename, 'wb')
+ except UnicodeEncodeError:
+ raise nose.SkipTest('no unicode file names on this system')
+ else:
+ f.close()
+
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+
+ df.to_excel(filename, 'test1', float_format='%.2f')
+
+ reader = ExcelFile(filename)
+ rs = read_excel(reader, 'test1', index_col=None)
+ xp = DataFrame([[0.12, 0.23, 0.57],
+ [12.32, 123123.20, 321321.20]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+ tm.assert_frame_equal(rs, xp)
+
+ # def test_to_excel_header_styling_xls(self):
+
+ # import StringIO
+ # s = StringIO(
+ # """Date,ticker,type,value
+ # 2001-01-01,x,close,12.2
+ # 2001-01-01,x,open ,12.1
+ # 2001-01-01,y,close,12.2
+ # 2001-01-01,y,open ,12.1
+ # 2001-02-01,x,close,12.2
+ # 2001-02-01,x,open ,12.1
+ # 2001-02-01,y,close,12.2
+ # 2001-02-01,y,open ,12.1
+ # 2001-03-01,x,close,12.2
+ # 2001-03-01,x,open ,12.1
+ # 2001-03-01,y,close,12.2
+ # 2001-03-01,y,open ,12.1""")
+ # df = read_csv(s, parse_dates=["Date"])
+ # pdf = df.pivot_table(values="value", rows=["ticker"],
+ # cols=["Date", "type"])
+
+ # try:
+ # import xlwt
+ # import xlrd
+ # except ImportError:
+ # raise nose.SkipTest
+
+ # filename = '__tmp_to_excel_header_styling_xls__.xls'
+ # pdf.to_excel(filename, 'test1')
+
+ # wbk = xlrd.open_workbook(filename,
+ # formatting_info=True)
+ # self.assertEqual(["test1"], wbk.sheet_names())
+ # ws = wbk.sheet_by_name('test1')
+ # self.assertEqual([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)],
+ # ws.merged_cells)
+ # for i in range(0, 2):
+ # for j in range(0, 7):
+ # xfx = ws.cell_xf_index(0, 0)
+ # cell_xf = wbk.xf_list[xfx]
+ # font = wbk.font_list
+ # self.assertEqual(1, font[cell_xf.font_index].bold)
+ # self.assertEqual(1, cell_xf.border.top_line_style)
+ # self.assertEqual(1, cell_xf.border.right_line_style)
+ # self.assertEqual(1, cell_xf.border.bottom_line_style)
+ # self.assertEqual(1, cell_xf.border.left_line_style)
+ # self.assertEqual(2, cell_xf.alignment.hor_align)
+ # os.remove(filename)
+ # def test_to_excel_header_styling_xlsx(self):
+ # import StringIO
+ # s = StringIO(
+ # """Date,ticker,type,value
+ # 2001-01-01,x,close,12.2
+ # 2001-01-01,x,open ,12.1
+ # 2001-01-01,y,close,12.2
+ # 2001-01-01,y,open ,12.1
+ # 2001-02-01,x,close,12.2
+ # 2001-02-01,x,open ,12.1
+ # 2001-02-01,y,close,12.2
+ # 2001-02-01,y,open ,12.1
+ # 2001-03-01,x,close,12.2
+ # 2001-03-01,x,open ,12.1
+ # 2001-03-01,y,close,12.2
+ # 2001-03-01,y,open ,12.1""")
+ # df = read_csv(s, parse_dates=["Date"])
+ # pdf = df.pivot_table(values="value", rows=["ticker"],
+ # cols=["Date", "type"])
+ # try:
+ # import openpyxl
+ # from openpyxl.cell import get_column_letter
+ # except ImportError:
+ # raise nose.SkipTest
+ # if openpyxl.__version__ < '1.6.1':
+ # raise nose.SkipTest
+ # # test xlsx_styling
+ # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
+ # pdf.to_excel(filename, 'test1')
+ # wbk = openpyxl.load_workbook(filename)
+ # self.assertEqual(["test1"], wbk.get_sheet_names())
+ # ws = wbk.get_sheet_by_name('test1')
+ # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
+ # xlsaddrs += ["A%s" % i for i in range(1, 6)]
+ # xlsaddrs += ["B1", "D1", "F1"]
+ # for xlsaddr in xlsaddrs:
+ # cell = ws.cell(xlsaddr)
+ # self.assertTrue(cell.style.font.bold)
+ # self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ # cell.style.borders.top.border_style)
+ # self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ # cell.style.borders.right.border_style)
+ # self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ # cell.style.borders.bottom.border_style)
+ # self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ # cell.style.borders.left.border_style)
+ # self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
+ # cell.style.alignment.horizontal)
+ # mergedcells_addrs = ["C1", "E1", "G1"]
+ # for maddr in mergedcells_addrs:
+ # self.assertTrue(ws.cell(maddr).merged)
+ # os.remove(filename)
+
+ def test_excel_010_hemstring(self):
+ _skip_if_no_xlrd()
+
+ if self.merge_cells:
+ raise nose.SkipTest('Skip tests for merged MI format.')
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ # ensure limited functionality in 0.10
+ # override of #2370 until sorted out in 0.11
+
+ def roundtrip(df, header=True, parser_hdr=0, index=True):
+
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, header=header,
+ merge_cells=self.merge_cells, index=index)
+ xf = ExcelFile(path)
+ res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
+ return res
+
+ nrows = 5
+ ncols = 3
+ for use_headers in (True, False):
+ for i in range(1, 4): # row multindex upto nlevel=3
+ for j in range(1, 4): # col ""
+ df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
+
+ # this if will be removed once multi column excel writing
+ # is implemented for now fixing #9794
+ if j > 1:
+ with tm.assertRaises(NotImplementedError):
+ res = roundtrip(df, use_headers, index=False)
+ else:
+ res = roundtrip(df, use_headers)
+
+ if use_headers:
+ self.assertEqual(res.shape, (nrows, ncols + i))
+ else:
+ # first row taken as columns
+ self.assertEqual(res.shape, (nrows - 1, ncols + i))
+
+ # no nans
+ for r in range(len(res.index)):
+ for c in range(len(res.columns)):
+ self.assertTrue(res.ix[r, c] is not np.nan)
+
+ res = roundtrip(DataFrame([0]))
+ self.assertEqual(res.shape, (1, 1))
+ self.assertTrue(res.ix[0, 0] is not np.nan)
+
+ res = roundtrip(DataFrame([0]), False, None)
+ self.assertEqual(res.shape, (1, 2))
+ self.assertTrue(res.ix[0, 0] is not np.nan)
+
+ def test_excel_010_hemstring_raises_NotImplementedError(self):
+ # This test was failing only for j>1 and header=False,
+ # So I reproduced a simple test.
+ _skip_if_no_xlrd()
+
+ if self.merge_cells:
+ raise nose.SkipTest('Skip tests for merged MI format.')
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ # ensure limited functionality in 0.10
+ # override of #2370 until sorted out in 0.11
+
+ def roundtrip2(df, header=True, parser_hdr=0, index=True):
+
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, header=header,
+ merge_cells=self.merge_cells, index=index)
+ xf = ExcelFile(path)
+ res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
+ return res
+
+ nrows = 5
+ ncols = 3
+ j = 2
+ i = 1
+ df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
+ with tm.assertRaises(NotImplementedError):
+ roundtrip2(df, header=False, index=False)
+
+ def test_duplicated_columns(self):
+ # Test for issue #5235
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
+ colnames = ['A', 'B', 'B']
+
+ write_frame.columns = colnames
+ write_frame.to_excel(path, 'test1')
+
+ read_frame = read_excel(path, 'test1')
+ read_frame.columns = colnames
+ tm.assert_frame_equal(write_frame, read_frame)
+
+ # 11007 / #10970
+ write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
+ columns=['A', 'B', 'A', 'B'])
+ write_frame.to_excel(path, 'test1')
+ read_frame = read_excel(path, 'test1')
+ read_frame.columns = ['A', 'B', 'A', 'B']
+ tm.assert_frame_equal(write_frame, read_frame)
+
+ # 10982
+ write_frame.to_excel(path, 'test1', index=False, header=False)
+ read_frame = read_excel(path, 'test1', header=None)
+ write_frame.columns = [0, 1, 2, 3]
+ tm.assert_frame_equal(write_frame, read_frame)
+
+ def test_swapped_columns(self):
+ # Test for issue #5427.
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ write_frame = DataFrame({'A': [1, 1, 1],
+ 'B': [2, 2, 2]})
+ write_frame.to_excel(path, 'test1', columns=['B', 'A'])
+
+ read_frame = read_excel(path, 'test1', header=0)
+
+ tm.assert_series_equal(write_frame['A'], read_frame['A'])
+ tm.assert_series_equal(write_frame['B'], read_frame['B'])
+
+ def test_invalid_columns(self):
+ # 10982
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ write_frame = DataFrame({'A': [1, 1, 1],
+ 'B': [2, 2, 2]})
+
+ write_frame.to_excel(path, 'test1', columns=['B', 'C'])
+ expected = write_frame.loc[:, ['B', 'C']]
+ read_frame = read_excel(path, 'test1')
+ tm.assert_frame_equal(expected, read_frame)
+
+ with tm.assertRaises(KeyError):
+ write_frame.to_excel(path, 'test1', columns=['C', 'D'])
+
+ def test_datetimes(self):
+
+ # Test writing and reading datetimes. For issue #9139. (xref #9185)
+ _skip_if_no_xlrd()
+
+ datetimes = [datetime(2013, 1, 13, 1, 2, 3),
+ datetime(2013, 1, 13, 2, 45, 56),
+ datetime(2013, 1, 13, 4, 29, 49),
+ datetime(2013, 1, 13, 6, 13, 42),
+ datetime(2013, 1, 13, 7, 57, 35),
+ datetime(2013, 1, 13, 9, 41, 28),
+ datetime(2013, 1, 13, 11, 25, 21),
+ datetime(2013, 1, 13, 13, 9, 14),
+ datetime(2013, 1, 13, 14, 53, 7),
+ datetime(2013, 1, 13, 16, 37, 0),
+ datetime(2013, 1, 13, 18, 20, 52)]
+
+ with ensure_clean(self.ext) as path:
+ write_frame = DataFrame.from_items([('A', datetimes)])
+ write_frame.to_excel(path, 'Sheet1')
+ read_frame = read_excel(path, 'Sheet1', header=0)
+
+ tm.assert_series_equal(write_frame['A'], read_frame['A'])
+
+ # GH7074
+ def test_bytes_io(self):
+ _skip_if_no_xlrd()
+
+ bio = BytesIO()
+ df = DataFrame(np.random.randn(10, 2))
+ writer = ExcelWriter(bio)
+ df.to_excel(writer)
+ writer.save()
+ bio.seek(0)
+ reread_df = read_excel(bio)
+ tm.assert_frame_equal(df, reread_df)
+
+ # GH8188
+ def test_write_lists_dict(self):
+ _skip_if_no_xlrd()
+
+ df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}],
+ 'numeric': [1, 2, 3.0],
+ 'str': ['apple', 'banana', 'cherry']})
+ expected = df.copy()
+ expected.mixed = expected.mixed.apply(str)
+ expected.numeric = expected.numeric.astype('int64')
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, 'Sheet1')
+ read = read_excel(path, 'Sheet1', header=0)
+ tm.assert_frame_equal(read, expected)
+
+ # GH13347
+ def test_true_and_false_value_options(self):
+ df = pd.DataFrame([['foo', 'bar']], columns=['col1', 'col2'])
+ expected = df.replace({'foo': True,
+ 'bar': False})
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path)
+ read_frame = read_excel(path, true_values=['foo'],
+ false_values=['bar'])
+ tm.assert_frame_equal(read_frame, expected)
+
+
+def raise_wrapper(major_ver):
+ def versioned_raise_wrapper(orig_method):
+ @functools.wraps(orig_method)
+ def wrapped(self, *args, **kwargs):
+ _skip_if_no_openpyxl()
+ if openpyxl_compat.is_compat(major_ver=major_ver):
+ orig_method(self, *args, **kwargs)
+ else:
+ msg = ('Installed openpyxl is not supported at this '
+ 'time\. Use.+')
+ with tm.assertRaisesRegexp(ValueError, msg):
+ orig_method(self, *args, **kwargs)
+ return wrapped
+ return versioned_raise_wrapper
+
+
+def raise_on_incompat_version(major_ver):
+ def versioned_raise_on_incompat_version(cls):
+ methods = filter(operator.methodcaller(
+ 'startswith', 'test_'), dir(cls))
+ for method in methods:
+ setattr(cls, method, raise_wrapper(
+ major_ver)(getattr(cls, method)))
+ return cls
+ return versioned_raise_on_incompat_version
+
+
+@raise_on_incompat_version(1)
+class OpenpyxlTests(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'openpyxl1'
+ check_skip = staticmethod(lambda *args, **kwargs: None)
+
+ def test_to_excel_styleconverter(self):
+ _skip_if_no_openpyxl()
+ if not openpyxl_compat.is_compat(major_ver=1):
+ raise nose.SkipTest('incompatiable openpyxl version')
+
+ import openpyxl
+
+ hstyle = {"font": {"bold": True},
+ "borders": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "alignment": {"horizontal": "center", "vertical": "top"}}
+
+ xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle)
+ self.assertTrue(xlsx_style.font.bold)
+ self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.top.border_style)
+ self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.right.border_style)
+ self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.bottom.border_style)
+ self.assertEqual(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.left.border_style)
+ self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
+ xlsx_style.alignment.horizontal)
+ self.assertEqual(openpyxl.style.Alignment.VERTICAL_TOP,
+ xlsx_style.alignment.vertical)
+
+
+def skip_openpyxl_gt21(cls):
+ """Skip a TestCase instance if openpyxl >= 2.2"""
+
+ @classmethod
+ def setUpClass(cls):
+ _skip_if_no_openpyxl()
+ import openpyxl
+ ver = openpyxl.__version__
+ if (not (LooseVersion(ver) >= LooseVersion('2.0.0') and
+ LooseVersion(ver) < LooseVersion('2.2.0'))):
+ raise nose.SkipTest("openpyxl %s >= 2.2" % str(ver))
+
+ cls.setUpClass = setUpClass
+ return cls
+
+
+@raise_on_incompat_version(2)
+@skip_openpyxl_gt21
+class Openpyxl20Tests(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'openpyxl20'
+ check_skip = staticmethod(lambda *args, **kwargs: None)
+
+ def test_to_excel_styleconverter(self):
+ import openpyxl
+ from openpyxl import styles
+
+ hstyle = {
+ "font": {
+ "color": '00FF0000',
+ "bold": True,
+ },
+ "borders": {
+ "top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin",
+ },
+ "alignment": {
+ "horizontal": "center",
+ "vertical": "top",
+ },
+ "fill": {
+ "patternType": 'solid',
+ 'fgColor': {
+ 'rgb': '006666FF',
+ 'tint': 0.3,
+ },
+ },
+ "number_format": {
+ "format_code": "0.00"
+ },
+ "protection": {
+ "locked": True,
+ "hidden": False,
+ },
+ }
+
+ font_color = styles.Color('00FF0000')
+ font = styles.Font(bold=True, color=font_color)
+ side = styles.Side(style=styles.borders.BORDER_THIN)
+ border = styles.Border(top=side, right=side, bottom=side, left=side)
+ alignment = styles.Alignment(horizontal='center', vertical='top')
+ fill_color = styles.Color(rgb='006666FF', tint=0.3)
+ fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
+
+ # ahh openpyxl API changes
+ ver = openpyxl.__version__
+ if ver >= LooseVersion('2.0.0') and ver < LooseVersion('2.1.0'):
+ number_format = styles.NumberFormat(format_code='0.00')
+ else:
+ number_format = '0.00' # XXX: Only works with openpyxl-2.1.0
+
+ protection = styles.Protection(locked=True, hidden=False)
+
+ kw = _Openpyxl20Writer._convert_to_style_kwargs(hstyle)
+ self.assertEqual(kw['font'], font)
+ self.assertEqual(kw['border'], border)
+ self.assertEqual(kw['alignment'], alignment)
+ self.assertEqual(kw['fill'], fill)
+ self.assertEqual(kw['number_format'], number_format)
+ self.assertEqual(kw['protection'], protection)
+
+ def test_write_cells_merge_styled(self):
+ from pandas.formats.format import ExcelCell
+ from openpyxl import styles
+
+ sheet_name = 'merge_styled'
+
+ sty_b1 = {'font': {'color': '00FF0000'}}
+ sty_a2 = {'font': {'color': '0000FF00'}}
+
+ initial_cells = [
+ ExcelCell(col=1, row=0, val=42, style=sty_b1),
+ ExcelCell(col=0, row=1, val=99, style=sty_a2),
+ ]
+
+ sty_merged = {'font': {'color': '000000FF', 'bold': True}}
+ sty_kwargs = _Openpyxl20Writer._convert_to_style_kwargs(sty_merged)
+ openpyxl_sty_merged = styles.Style(**sty_kwargs)
+ merge_cells = [
+ ExcelCell(col=0, row=0, val='pandas',
+ mergestart=1, mergeend=1, style=sty_merged),
+ ]
+
+ with ensure_clean('.xlsx') as path:
+ writer = _Openpyxl20Writer(path)
+ writer.write_cells(initial_cells, sheet_name=sheet_name)
+ writer.write_cells(merge_cells, sheet_name=sheet_name)
+
+ wks = writer.sheets[sheet_name]
+ xcell_b1 = wks.cell('B1')
+ xcell_a2 = wks.cell('A2')
+ self.assertEqual(xcell_b1.style, openpyxl_sty_merged)
+ self.assertEqual(xcell_a2.style, openpyxl_sty_merged)
+
+
+def skip_openpyxl_lt22(cls):
+ """Skip a TestCase instance if openpyxl < 2.2"""
+
+ @classmethod
+ def setUpClass(cls):
+ _skip_if_no_openpyxl()
+ import openpyxl
+ ver = openpyxl.__version__
+ if LooseVersion(ver) < LooseVersion('2.2.0'):
+ raise nose.SkipTest("openpyxl %s < 2.2" % str(ver))
+
+ cls.setUpClass = setUpClass
+ return cls
+
+
+@raise_on_incompat_version(2)
+@skip_openpyxl_lt22
+class Openpyxl22Tests(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'openpyxl22'
+ check_skip = staticmethod(lambda *args, **kwargs: None)
+
+ def test_to_excel_styleconverter(self):
+ from openpyxl import styles
+
+ hstyle = {
+ "font": {
+ "color": '00FF0000',
+ "bold": True,
+ },
+ "borders": {
+ "top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin",
+ },
+ "alignment": {
+ "horizontal": "center",
+ "vertical": "top",
+ },
+ "fill": {
+ "patternType": 'solid',
+ 'fgColor': {
+ 'rgb': '006666FF',
+ 'tint': 0.3,
+ },
+ },
+ "number_format": {
+ "format_code": "0.00"
+ },
+ "protection": {
+ "locked": True,
+ "hidden": False,
+ },
+ }
+
+ font_color = styles.Color('00FF0000')
+ font = styles.Font(bold=True, color=font_color)
+ side = styles.Side(style=styles.borders.BORDER_THIN)
+ border = styles.Border(top=side, right=side, bottom=side, left=side)
+ alignment = styles.Alignment(horizontal='center', vertical='top')
+ fill_color = styles.Color(rgb='006666FF', tint=0.3)
+ fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
+
+ number_format = '0.00'
+
+ protection = styles.Protection(locked=True, hidden=False)
+
+ kw = _Openpyxl22Writer._convert_to_style_kwargs(hstyle)
+ self.assertEqual(kw['font'], font)
+ self.assertEqual(kw['border'], border)
+ self.assertEqual(kw['alignment'], alignment)
+ self.assertEqual(kw['fill'], fill)
+ self.assertEqual(kw['number_format'], number_format)
+ self.assertEqual(kw['protection'], protection)
+
+ def test_write_cells_merge_styled(self):
+ if not openpyxl_compat.is_compat(major_ver=2):
+ raise nose.SkipTest('incompatiable openpyxl version')
+
+ from pandas.formats.format import ExcelCell
+
+ sheet_name = 'merge_styled'
+
+ sty_b1 = {'font': {'color': '00FF0000'}}
+ sty_a2 = {'font': {'color': '0000FF00'}}
+
+ initial_cells = [
+ ExcelCell(col=1, row=0, val=42, style=sty_b1),
+ ExcelCell(col=0, row=1, val=99, style=sty_a2),
+ ]
+
+ sty_merged = {'font': {'color': '000000FF', 'bold': True}}
+ sty_kwargs = _Openpyxl22Writer._convert_to_style_kwargs(sty_merged)
+ openpyxl_sty_merged = sty_kwargs['font']
+ merge_cells = [
+ ExcelCell(col=0, row=0, val='pandas',
+ mergestart=1, mergeend=1, style=sty_merged),
+ ]
+
+ with ensure_clean('.xlsx') as path:
+ writer = _Openpyxl22Writer(path)
+ writer.write_cells(initial_cells, sheet_name=sheet_name)
+ writer.write_cells(merge_cells, sheet_name=sheet_name)
+
+ wks = writer.sheets[sheet_name]
+ xcell_b1 = wks.cell('B1')
+ xcell_a2 = wks.cell('A2')
+ self.assertEqual(xcell_b1.font, openpyxl_sty_merged)
+ self.assertEqual(xcell_a2.font, openpyxl_sty_merged)
+
+
+class XlwtTests(ExcelWriterBase, tm.TestCase):
+ ext = '.xls'
+ engine_name = 'xlwt'
+ check_skip = staticmethod(_skip_if_no_xlwt)
+
+ def test_excel_raise_error_on_multiindex_columns_and_no_index(self):
+ _skip_if_no_xlwt()
+ # MultiIndex as columns is not yet implemented 9794
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = DataFrame(np.random.randn(10, 3), columns=cols)
+ with tm.assertRaises(NotImplementedError):
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, index=False)
+
+ def test_excel_multiindex_columns_and_index_true(self):
+ _skip_if_no_xlwt()
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, index=True)
+
+ def test_excel_multiindex_index(self):
+ _skip_if_no_xlwt()
+ # MultiIndex as index works so assert no error #9794
+ cols = MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = DataFrame(np.random.randn(3, 10), index=cols)
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, index=False)
+
+ def test_to_excel_styleconverter(self):
+ _skip_if_no_xlwt()
+
+ import xlwt
+
+ hstyle = {"font": {"bold": True},
+ "borders": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "alignment": {"horizontal": "center", "vertical": "top"}}
+
+ xls_style = _XlwtWriter._convert_to_style(hstyle)
+ self.assertTrue(xls_style.font.bold)
+ self.assertEqual(xlwt.Borders.THIN, xls_style.borders.top)
+ self.assertEqual(xlwt.Borders.THIN, xls_style.borders.right)
+ self.assertEqual(xlwt.Borders.THIN, xls_style.borders.bottom)
+ self.assertEqual(xlwt.Borders.THIN, xls_style.borders.left)
+ self.assertEqual(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz)
+ self.assertEqual(xlwt.Alignment.VERT_TOP, xls_style.alignment.vert)
+
+
+class XlsxWriterTests(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'xlsxwriter'
+ check_skip = staticmethod(_skip_if_no_xlsxwriter)
+
+ def test_column_format(self):
+ # Test that column formats are applied to cells. Test for issue #9167.
+ # Applicable to xlsxwriter only.
+ _skip_if_no_xlsxwriter()
+
+ with warnings.catch_warnings():
+ # Ignore the openpyxl lxml warning.
+ warnings.simplefilter("ignore")
+ _skip_if_no_openpyxl()
+ import openpyxl
+
+ with ensure_clean(self.ext) as path:
+ frame = DataFrame({'A': [123456, 123456],
+ 'B': [123456, 123456]})
+
+ writer = ExcelWriter(path)
+ frame.to_excel(writer)
+
+ # Add a number format to col B and ensure it is applied to cells.
+ num_format = '#,##0'
+ write_workbook = writer.book
+ write_worksheet = write_workbook.worksheets()[0]
+ col_format = write_workbook.add_format({'num_format': num_format})
+ write_worksheet.set_column('B:B', None, col_format)
+ writer.save()
+
+ read_workbook = openpyxl.load_workbook(path)
+ read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
+
+ # Get the number format from the cell. This method is backward
+ # compatible with older versions of openpyxl.
+ cell = read_worksheet.cell('B2')
+
+ try:
+ read_num_format = cell.number_format
+ except:
+ read_num_format = cell.style.number_format._format_code
+
+ self.assertEqual(read_num_format, num_format)
+
+
+class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'openpyxl'
+ check_skip = staticmethod(_skip_if_no_openpyxl)
+
+ # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
+ merge_cells = False
+
+
+class XlwtTests_NoMerge(ExcelWriterBase, tm.TestCase):
+ ext = '.xls'
+ engine_name = 'xlwt'
+ check_skip = staticmethod(_skip_if_no_xlwt)
+
+ # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
+ merge_cells = False
+
+
+class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase):
+ ext = '.xlsx'
+ engine_name = 'xlsxwriter'
+ check_skip = staticmethod(_skip_if_no_xlsxwriter)
+
+ # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
+ merge_cells = False
+
+
+class ExcelWriterEngineTests(tm.TestCase):
+
+ def test_ExcelWriter_dispatch(self):
+ with tm.assertRaisesRegexp(ValueError, 'No engine'):
+ ExcelWriter('nothing')
+
+ try:
+ import xlsxwriter # noqa
+ writer_klass = _XlsxWriter
+ except ImportError:
+ _skip_if_no_openpyxl()
+ if not openpyxl_compat.is_compat(major_ver=1):
+ raise nose.SkipTest('incompatible openpyxl version')
+ writer_klass = _Openpyxl1Writer
+
+ with ensure_clean('.xlsx') as path:
+ writer = ExcelWriter(path)
+ tm.assertIsInstance(writer, writer_klass)
+
+ _skip_if_no_xlwt()
+ with ensure_clean('.xls') as path:
+ writer = ExcelWriter(path)
+ tm.assertIsInstance(writer, _XlwtWriter)
+
+ def test_register_writer(self):
+ # some awkward mocking to test out dispatch and such actually works
+ called_save = []
+ called_write_cells = []
+
+ class DummyClass(ExcelWriter):
+ called_save = False
+ called_write_cells = False
+ supported_extensions = ['test', 'xlsx', 'xls']
+ engine = 'dummy'
+
+ def save(self):
+ called_save.append(True)
+
+ def write_cells(self, *args, **kwargs):
+ called_write_cells.append(True)
+
+ def check_called(func):
+ func()
+ self.assertTrue(len(called_save) >= 1)
+ self.assertTrue(len(called_write_cells) >= 1)
+ del called_save[:]
+ del called_write_cells[:]
+
+ with pd.option_context('io.excel.xlsx.writer', 'dummy'):
+ register_writer(DummyClass)
+ writer = ExcelWriter('something.test')
+ tm.assertIsInstance(writer, DummyClass)
+ df = tm.makeCustomDataframe(1, 1)
+ panel = tm.makePanel()
+ func = lambda: df.to_excel('something.test')
+ check_called(func)
+ check_called(lambda: panel.to_excel('something.test'))
+ check_called(lambda: df.to_excel('something.xlsx'))
+ check_called(lambda: df.to_excel('something.xls', engine='dummy'))
+
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py
index fea6a5370109e..4b40f6d7e6be4 100644
--- a/pandas/tests/frame/test_asof.py
+++ b/pandas/tests/frame/test_asof.py
@@ -1,6 +1,6 @@
# coding=utf-8
-
import numpy as np
+import pytest
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
@@ -106,3 +106,21 @@ def test_all_nans(self):
result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof(3)
expected = Series(np.nan, index=['A', 'B'], name=3)
tm.assert_series_equal(result, expected)
+
+ # Testing awareness of DataFrame index considering different
+ # UTC and timezone
+ @pytest.mark.parametrize(
+ "stamp,expected",
+ [(Timestamp('2018-01-01 23:22:43.325+00:00'),
+ Series(2.0, name=Timestamp('2018-01-01 23:22:43.325+00:00'))),
+ (Timestamp('2018-01-01 22:33:20.682+01:00'),
+ Series(1.0, name=Timestamp('2018-01-01 22:33:20.682+01:00'))),
+ ]
+ )
+ def test_time_zone_aware_index(self, stamp, expected):
+ # GH21194
+ df = DataFrame(data=[1, 2],
+ index=[Timestamp('2018-01-01 21:00:05.001+00:00'),
+ Timestamp('2018-01-01 22:35:10.550+00:00')])
+ result = df.asof(stamp)
+ tm.assert_series_equal(result, expected)
| - [x] closes #21194
- [x] tests added / passed
- [x] whatsnew entry
- modified whatsnew.v0.23.1.txt
| https://api.github.com/repos/pandas-dev/pandas/pulls/21284 | 2018-06-01T11:32:43Z | 2018-08-04T13:14:59Z | null | 2018-08-04T13:14:59Z |
BUG: Using DatetimeIndex.date with timezone returns incorrect date #2… | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 9c29c34adb7dd..974527624a312 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -86,8 +86,10 @@ Indexing
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
+- Bug in :attr:`DatetimeIndex.date` where an incorrect date is returned when the input date has a non-UTC timezone (:issue:`21230`)
- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`)
- Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`)
+- Bug in :attr:`DatetimeIndex.time` where given a tz-aware Timestamp, a tz-aware Time is returned instead of tz-naive (:issue:`21267`)
-
I/O
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 17453d8af1297..0f58cfa761f21 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -77,7 +77,7 @@ cdef inline object create_time_from_ts(
int64_t value, pandas_datetimestruct dts,
object tz, object freq):
""" convenience routine to construct a datetime.time from its parts """
- return time(dts.hour, dts.min, dts.sec, dts.us, tz)
+ return time(dts.hour, dts.min, dts.sec, dts.us)
def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 83950f1d71633..0ddf33cdcae73 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -2032,7 +2032,16 @@ def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
- return libts.ints_to_pydatetime(self.asi8, self.tz, box="time")
+
+ # If the Timestamps have a timezone that is not UTC,
+ # convert them into their i8 representation while
+ # keeping their timezone and not using UTC
+ if (self.tz is not None and self.tz is not utc):
+ timestamps = self._local_timestamps()
+ else:
+ timestamps = self.asi8
+
+ return libts.ints_to_pydatetime(timestamps, box="time")
@property
def date(self):
@@ -2040,7 +2049,16 @@ def date(self):
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
- return libts.ints_to_pydatetime(self.normalize().asi8, box="date")
+
+ # If the Timestamps have a timezone that is not UTC,
+ # convert them into their i8 representation while
+ # keeping their timezone and not using UTC
+ if (self.tz is not None and self.tz is not utc):
+ timestamps = self._local_timestamps()
+ else:
+ timestamps = self.asi8
+
+ return libts.ints_to_pydatetime(timestamps, box="date")
def normalize(self):
"""
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 09210d8b64d1b..573940edaa08f 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -2,7 +2,7 @@
"""
Tests for DatetimeIndex timezone-related methods
"""
-from datetime import datetime, timedelta, tzinfo
+from datetime import datetime, timedelta, tzinfo, date, time
from distutils.version import LooseVersion
import pytest
@@ -706,6 +706,32 @@ def test_join_utc_convert(self, join_type):
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == 'UTC'
+ @pytest.mark.parametrize("dtype", [
+ None, 'datetime64[ns, CET]',
+ 'datetime64[ns, EST]', 'datetime64[ns, UTC]'
+ ])
+ def test_date_accessor(self, dtype):
+ # Regression test for GH#21230
+ expected = np.array([date(2018, 6, 4), pd.NaT])
+
+ index = DatetimeIndex(['2018-06-04 10:00:00', pd.NaT], dtype=dtype)
+ result = index.date
+
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [
+ None, 'datetime64[ns, CET]',
+ 'datetime64[ns, EST]', 'datetime64[ns, UTC]'
+ ])
+ def test_time_accessor(self, dtype):
+ # Regression test for GH#21267
+ expected = np.array([time(10, 20, 30), pd.NaT])
+
+ index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], dtype=dtype)
+ result = index.time
+
+ tm.assert_numpy_array_equal(result, expected)
+
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
|
- [ ] closes #21230, closes https://github.com/pandas-dev/pandas/issues/21267
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Added 2 tests for DatetimeIndex.date in tests/indexes/datetimes/test_datetime.py. Please let me know if I should move them somewhere else. | https://api.github.com/repos/pandas-dev/pandas/pulls/21281 | 2018-06-01T01:51:48Z | 2018-06-07T09:39:25Z | 2018-06-07T09:39:24Z | 2018-06-12T16:30:35Z |
BUG: Fix encoding for Stata format 118 files | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 69b07d12c1e98..bddb9d3b8e2a7 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -95,6 +95,7 @@ I/O
- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`)
- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`)
- Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
+- Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`)
-
Plotting
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 2797924985c70..8584e1f0e3f14 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -182,7 +182,7 @@ def read_stata(filepath_or_buffer, convert_dates=True,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
- chunksize=chunksize, encoding=encoding)
+ chunksize=chunksize)
if iterator or chunksize:
data = reader
@@ -838,15 +838,8 @@ def get_base_missing_value(cls, dtype):
class StataParser(object):
- _default_encoding = 'latin-1'
- def __init__(self, encoding):
- if encoding is not None:
- if encoding not in VALID_ENCODINGS:
- raise ValueError('Unknown encoding. Only latin-1 and ascii '
- 'supported.')
-
- self._encoding = encoding
+ def __init__(self):
# type code.
# --------------------
@@ -964,8 +957,8 @@ def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index_col=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
- encoding='latin-1', chunksize=None):
- super(StataReader, self).__init__(encoding)
+ encoding=None, chunksize=None):
+ super(StataReader, self).__init__()
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
@@ -977,10 +970,6 @@ def __init__(self, path_or_buf, convert_dates=True,
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
- if encoding is not None:
- if encoding not in VALID_ENCODINGS:
- raise ValueError('Unknown encoding. Only latin-1 and ascii '
- 'supported.')
self._encoding = encoding
self._chunksize = chunksize
@@ -998,18 +987,13 @@ def __init__(self, path_or_buf, convert_dates=True,
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _, should_close = get_filepath_or_buffer(
- path_or_buf, encoding=self._default_encoding
- )
+ path_or_buf)
if isinstance(path_or_buf, (str, text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
- try:
- contents = contents.encode(self._default_encoding)
- except:
- pass
self.path_or_buf = BytesIO(contents)
self._read_header()
@@ -1030,6 +1014,15 @@ def close(self):
except IOError:
pass
+ def _set_encoding(self):
+ """
+ Set string encoding which depends on file version
+ """
+ if self.format_version < 118:
+ self._encoding = 'latin-1'
+ else:
+ self._encoding = 'utf-8'
+
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
@@ -1049,6 +1042,7 @@ def _read_new_header(self, first_char):
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
+ self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b'MSF' and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
@@ -1235,6 +1229,7 @@ def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
+ self._set_encoding()
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
@@ -1338,16 +1333,9 @@ def _decode(self, s):
return s.decode('utf-8')
def _null_terminate(self, s):
- if compat.PY3 or self._encoding is not None:
- # have bytes not strings, so must decode
- s = s.partition(b"\0")[0]
- return s.decode(self._encoding or self._default_encoding)
- else:
- null_byte = "\0"
- try:
- return s.lstrip(null_byte)[:s.index(null_byte)]
- except:
- return s
+ # have bytes not strings, so must decode
+ s = s.partition(b"\0")[0]
+ return s.decode(self._encoding)
def _read_value_labels(self):
if self._value_labels_read:
@@ -1433,10 +1421,7 @@ def _read_strls(self):
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
- encoding = 'utf-8'
- if self.format_version == 117:
- encoding = self._encoding or self._default_encoding
- va = va[0:-1].decode(encoding)
+ va = va[0:-1].decode(self._encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
@@ -1980,9 +1965,14 @@ class StataWriter(StataParser):
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
- super(StataWriter, self).__init__(encoding)
+ super(StataWriter, self).__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
+ if encoding is not None:
+ if encoding not in VALID_ENCODINGS:
+ raise ValueError('Unknown encoding. Only latin-1 and ascii '
+ 'supported.')
+ self._encoding = encoding
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
diff --git a/pandas/tests/io/data/stata16_118.dta b/pandas/tests/io/data/stata16_118.dta
new file mode 100644
index 0000000000000..49cfa49d1b302
Binary files /dev/null and b/pandas/tests/io/data/stata16_118.dta differ
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index f3a465da4e87f..e5585902a9dd6 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -96,6 +96,7 @@ def setup_method(self, method):
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
+ self.dta25_118 = os.path.join(self.dirpath, 'stata16_118.dta')
self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta')
@@ -363,19 +364,14 @@ def test_encoding(self, version):
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
- if compat.PY3:
- expected = raw.kreis1849[0]
- assert result == expected
- assert isinstance(result, compat.string_types)
- else:
- expected = raw.kreis1849.str.decode("latin-1")[0]
- assert result == expected
- assert isinstance(result, unicode) # noqa
+ expected = raw.kreis1849[0]
+ assert result == expected
+ assert isinstance(result, compat.string_types)
with tm.ensure_clean() as path:
encoded.to_stata(path, encoding='latin-1',
write_index=False, version=version)
- reread_encoded = read_stata(path, encoding='latin-1')
+ reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
@@ -1500,3 +1496,18 @@ def test_gzip_writing(self):
with gzip.GzipFile(path, 'rb') as gz:
reread = pd.read_stata(gz, index_col='index')
tm.assert_frame_equal(df, reread)
+
+ def test_unicode_dta_118(self):
+ unicode_df = self.read_dta(self.dta25_118)
+
+ columns = ['utf8', 'latin1', 'ascii', 'utf8_strl', 'ascii_strl']
+ values = [[u'ραηδας', u'PÄNDÄS', 'p', u'ραηδας', 'p'],
+ [u'ƤĀńĐąŜ', u'Ö', 'a', u'ƤĀńĐąŜ', 'a'],
+ [u'ᴘᴀᴎᴅᴀS', u'Ü', 'n', u'ᴘᴀᴎᴅᴀS', 'n'],
+ [' ', ' ', 'd', ' ', 'd'],
+ [' ', '', 'a', ' ', 'a'],
+ ['', '', 's', '', 's'],
+ ['', '', ' ', '', ' ']]
+ expected = pd.DataFrame(values, columns=columns)
+
+ tm.assert_frame_equal(unicode_df, expected)
| Ensure that Stata 118 files always use utf-8 encoding
- [x] closes #21244
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21279 | 2018-05-31T23:35:47Z | 2018-06-06T13:15:24Z | 2018-06-06T13:15:24Z | 2019-03-21T13:27:46Z |
Add missing period to get_dummies docs | diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 0829aa8f5a509..2757e0797a410 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -725,7 +725,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
- String to append DataFrame column names
+ String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
| Newline characters are stripped from the docstring when formatting parameter documentation. Because of this, the docs for the `prefix` parameter of [`pandas.get_dummies`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html) look like a single run-on sentence. Added a period to the end of the first sentence to fix this. | https://api.github.com/repos/pandas-dev/pandas/pulls/21277 | 2018-05-31T19:28:46Z | 2018-06-01T00:14:34Z | 2018-06-01T00:14:34Z | 2018-06-09T09:18:47Z |
PERF: improve performance of NDFrame.describe | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 4ff71c706cd34..12e4824b2dd2a 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -512,3 +512,21 @@ def time_nlargest(self, keep):
def time_nsmallest(self, keep):
self.df.nsmallest(100, 'A', keep=keep)
+
+
+class Describe(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ self.df = DataFrame({
+ 'a': np.random.randint(0, 100, int(1e6)),
+ 'b': np.random.randint(0, 100, int(1e6)),
+ 'c': np.random.randint(0, 100, int(1e6))
+ })
+
+ def time_series_describe(self):
+ self.df['a'].describe()
+
+ def time_dataframe_describe(self):
+ self.df.describe()
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 6cbc19cca99e1..c69de149a0f35 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -63,8 +63,7 @@ Removal of prior version deprecations/changes
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
--
--
+- Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`)
-
.. _whatsnew_0240.docs:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9e4eda1bc4dc7..2adc15651ffca 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8519,7 +8519,7 @@ def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
- [series.quantile(x) for x in percentiles] + [series.max()])
+ series.quantile(percentiles).tolist() + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
| A one-line change that enables to calculate the percentiles in `describe` more efficiently. The point is that calculating percentiles in one pass is faster than separately.
`describe` (with default `percentiles` argument) becomes 25-30% faster than before for numerical Series and DataFrames.
### Setup
```
import timeit
setup = '''
import numpy as np
import pandas as pd
np.random.seed(123)
s = pd.Series(np.random.randint(0, 100, 1000000))
'''
```
### Benchmark
```
min(timeit.Timer('s.describe()', setup=setup).repeat(100, 1))
```
### Results
On master:
```
0.06349272100487724
```
With this change:
```
0.04745814300258644
```
Results are similar for DataFrames.
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21274 | 2018-05-31T12:11:36Z | 2018-06-05T17:01:14Z | 2018-06-05T17:01:14Z | 2018-06-08T08:12:31Z |
Fix an attribute error in __init__.py | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 97ae73174c09c..da5257257ac50 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -67,7 +67,7 @@
parser = _DeprecatedModule(deprmod='pandas.parser',
removals=['na_values'],
moved={'CParserError': 'pandas.errors.ParserError'})
-lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto=False,
+lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto='pandas._libs.lib',
moved={'Timestamp': 'pandas.Timestamp',
'Timedelta': 'pandas.Timedelta',
'NaT': 'pandas.NaT',
| This fixes a problem, where a bool is treated like a string.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21273 | 2018-05-31T11:53:02Z | 2018-06-19T00:49:34Z | null | 2018-06-19T00:49:34Z |
CI: bump numpy on appveyor ci | diff --git a/ci/appveyor-27.yaml b/ci/appveyor-27.yaml
index 84107c605b14f..cfc6a796bd77e 100644
--- a/ci/appveyor-27.yaml
+++ b/ci/appveyor-27.yaml
@@ -11,9 +11,9 @@ dependencies:
- lxml
- matplotlib
- numexpr
- - numpy=1.10*
+ - numpy=1.12*
- openpyxl
- - pytables==3.2.2
+ - pytables
- python=2.7.*
- pytz
- s3fs
diff --git a/ci/appveyor-36.yaml b/ci/appveyor-36.yaml
index 5e370de39958a..868724419c464 100644
--- a/ci/appveyor-36.yaml
+++ b/ci/appveyor-36.yaml
@@ -9,7 +9,7 @@ dependencies:
- feather-format
- matplotlib
- numexpr
- - numpy=1.13*
+ - numpy=1.14*
- openpyxl
- pyarrow
- pytables
| https://api.github.com/repos/pandas-dev/pandas/pulls/21271 | 2018-05-31T10:25:50Z | 2018-05-31T15:59:16Z | 2018-05-31T15:59:16Z | 2018-05-31T15:59:37Z | |
DOC: fill in class names for rename methods | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0899e9cd87aba..1c7339a91c2fd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3720,7 +3720,7 @@ def rename(self, *args, **kwargs):
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
- Whether to return a new %(klass)s. If True then value of copy is
+ Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f25f73513df30..d59401414181f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3269,7 +3269,7 @@ def rename(self, index=None, **kwargs):
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
- Whether to return a new %(klass)s. If True then value of copy is
+ Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
| The documentation for the `inplace` parameter has the `%(klass)` placeholder from the original NDFrame. This fills in the appropriate values in the following places:
* http://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DataFrame.rename.html
* http://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.rename.html | https://api.github.com/repos/pandas-dev/pandas/pulls/21268 | 2018-05-31T04:53:13Z | 2018-05-31T10:19:19Z | 2018-05-31T10:19:19Z | 2018-06-08T17:20:02Z |
TST: Add failing tests for minute rounding | diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index aecddab8477fc..0336ea41c4e3f 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -118,6 +118,25 @@ def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
expected = Timestamp(expected)
assert result == expected
+ @pytest.mark.parametrize('test_input, freq, expected', [
+ ('2018-01-01 00:02:00', '2T', '2018-01-01 00:02:00'),
+ ('2018-01-01 00:04:00', '4T', '2018-01-01 00:04:00'),
+ ('2018-01-01 00:15:00', '15T', '2018-01-01 00:15:00'),
+ ('2018-01-01 00:20:00', '20T', '2018-01-01 00:20:00'),
+ ])
+ def test_round_minute_freq(self, test_input, freq, expected):
+ # ensure timestamps that shouldn't round don't
+ # GH#21262
+ dt = Timestamp(test_input)
+ expected = Timestamp(expected)
+
+ result_ceil = dt.ceil(freq)
+ assert result_ceil == expected
+ result_floor = dt.floor(freq)
+ assert result_floor == expected
+ result_round = dt.round(freq)
+ assert result_round == expected
+
def test_ceil(self):
dt = Timestamp('20130101 09:10:11')
result = dt.ceil('D')
| For some minute frequencies, `.ceil()` will round up even if it should not.
- [ ] closes #21262
- [x] tests added
- [ ] tests passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21265 | 2018-05-30T23:13:29Z | 2018-06-19T00:50:13Z | null | 2018-06-29T08:54:20Z |
DOC: Fix renaming categories section | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index e4ce7ebd01dac..c6827f67a390b 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -358,10 +358,10 @@ Renaming categories is done by assigning new values to the
s
s.cat.categories = ["Group %s" % g for g in s.cat.categories]
s
- s.cat.rename_categories([1,2,3])
+ s = s.cat.rename_categories([1,2,3])
s
# You can also pass a dict-like object to map the renaming
- s.cat.rename_categories({1: 'x', 2: 'y', 3: 'z'})
+ s = s.cat.rename_categories({1: 'x', 2: 'y', 3: 'z'})
s
.. note::
| The categories were not correctly saved so that the renaming using the
dict-like object had no effect.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21264 | 2018-05-30T22:29:56Z | 2018-05-31T08:29:23Z | 2018-05-31T08:29:23Z | 2018-05-31T08:30:30Z |
Color text based on background color when using `_background_gradient()` | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..0c604f9aad993 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -181,7 +181,7 @@ Reshaping
Other
^^^^^
--
+- :meth: `~pandas.io.formats.style.Styler.background_gradient` now takes a ``text_color_threshold`` parameter to automatically lighten the text color based on the luminance of the background color. This improves readability with dark background colors without the need to limit the background colormap range. (:issue:`21258`)
-
-
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index f876ceb8a26bf..668cafec4b522 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -863,7 +863,7 @@ def highlight_null(self, null_color='red'):
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
- subset=None):
+ subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
@@ -879,6 +879,12 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
+ text_color_threshold: float or int
+ luminance threshold for determining text color. Facilitates text
+ visibility across varying background colors. From 0 to 1.
+ 0 = all text is dark colored, 1 = all text is light colored.
+
+ .. versionadded:: 0.24.0
Returns
-------
@@ -886,19 +892,26 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
Notes
-----
- Tune ``low`` and ``high`` to keep the text legible by
- not using the entire range of the color map. These extend
- the range of the data by ``low * (x.max() - x.min())``
- and ``high * (x.max() - x.min())`` before normalizing.
+ Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
+ text legible by not using the entire range of the color map. The range
+ of the data is extended by ``low * (x.max() - x.min())`` and ``high *
+ (x.max() - x.min())`` before normalizing.
+
+ Raises
+ ------
+ ValueError
+ If ``text_color_threshold`` is not a value from 0 to 1.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
- axis=axis, low=low, high=high)
+ axis=axis, low=low, high=high,
+ text_color_threshold=text_color_threshold)
return self
@staticmethod
- def _background_gradient(s, cmap='PuBu', low=0, high=0):
+ def _background_gradient(s, cmap='PuBu', low=0, high=0,
+ text_color_threshold=0.408):
"""Color background in a range according to the data."""
with _mpl(Styler.background_gradient) as (plt, colors):
rng = s.max() - s.min()
@@ -909,8 +922,39 @@ def _background_gradient(s, cmap='PuBu', low=0, high=0):
# https://github.com/matplotlib/matplotlib/issues/5427
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
- return ['background-color: {color}'.format(color=color)
- for color in c]
+ if (not isinstance(text_color_threshold, (float, int)) or
+ not 0 <= text_color_threshold <= 1):
+ msg = "`text_color_threshold` must be a value from 0 to 1."
+ raise ValueError(msg)
+
+ def relative_luminance(color):
+ """
+ Calculate relative luminance of a color.
+
+ The calculation adheres to the W3C standards
+ (https://www.w3.org/WAI/GL/wiki/Relative_luminance)
+
+ Parameters
+ ----------
+ color : matplotlib color
+ Hex code, rgb-tuple, or HTML color name.
+
+ Returns
+ -------
+ float
+ The relative luminance as a value from 0 to 1
+ """
+ rgb = colors.colorConverter.to_rgba_array(color)[:, :3]
+ rgb = np.where(rgb <= .03928, rgb / 12.92,
+ ((rgb + .055) / 1.055) ** 2.4)
+ lum = rgb.dot([.2126, .7152, .0722])
+ return lum.item()
+
+ text_colors = ['#f1f1f1' if relative_luminance(x) <
+ text_color_threshold else '#000000' for x in c]
+
+ return ['background-color: {color};color: {tc}'.format(
+ color=color, tc=tc) for color, tc in zip(c, text_colors)]
def set_properties(self, subset=None, **kwargs):
"""
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index c1ab9cd184340..b355cda8df1bd 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1017,9 +1017,9 @@ def test_hide_columns_mult_levels(self):
assert ctx['body'][1][2]['display_value'] == 3
+@td.skip_if_no_mpl
class TestStylerMatplotlibDep(object):
- @td.skip_if_no_mpl
def test_background_gradient(self):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
@@ -1031,7 +1031,30 @@ def test_background_gradient(self):
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
- assert result[(1, 0)] == ['background-color: #fff7fb']
+
+ assert result[(1, 0)] == ['background-color: #fff7fb',
+ 'color: #000000']
+
+ @pytest.mark.parametrize(
+ 'c_map,expected', [
+ (None, {
+ (0, 0): ['background-color: #440154', 'color: #f1f1f1'],
+ (1, 0): ['background-color: #fde725', 'color: #000000']}),
+ ('YlOrRd', {
+ (0, 0): ['background-color: #ffffcc', 'color: #000000'],
+ (1, 0): ['background-color: #800026', 'color: #f1f1f1']})])
+ def test_text_color_threshold(self, c_map, expected):
+ df = pd.DataFrame([1, 2], columns=['A'])
+ result = df.style.background_gradient(cmap=c_map)._compute().ctx
+ assert result == expected
+
+ @pytest.mark.parametrize("text_color_threshold", [1.1, '1', -1, [2, 2]])
+ def test_text_color_threshold_raises(self, text_color_threshold):
+ df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
+ msg = "`text_color_threshold` must be a value from 0 to 1."
+ with tm.assert_raises_regex(ValueError, msg):
+ df.style.background_gradient(
+ text_color_threshold=text_color_threshold)._compute()
def test_block_names():
| The purpose of this PR is to automatically color text dark or light based on the background color of the HTML table:
================`old behavior` ===========================`new behavior`========
 
As described in #21258, I use the luminance-based approach from `seaborn`'s annotated heatmaps. Tagging @WillAyd who commented on that issue. A few comments on this PR
1. Initially, I was not sure if defining the `relative_luminance()` method within `_background_gradient()` was the right way to go, but I opted for this since I saw the same approach elsewhere in the file. Let me know if you prefer a different approach.
2. I am not sure how intuitive it is that a parameter named `text_color` takes a numeric argument and not a color, but I think it is a good name for discoverability. Naming it `luminance_threshold` or similar might be confusing for users looking for a way to change the text color.
3. I opted to make the light text not completely white. The focus should be the background color so the text should not pop out too much. Thoughts?
================`#ffffff` ============================`#f1f1f1` (current choice)===
 
 
4. I think 0.2 is a good default threshold value for `text_color` based on my own qualitative assessment, feel free to disagree. The seaborn default of 0.4 makes too much text white in my opinion. Since colors and contrast can be quite subjective, I thought I would include some comparisons.
============`text_color=0.4` ======================`text_color=0.2` (current choice)===
 
 
 
This is my first PR, apologies if I have misunderstood something.
- [x] closes #21258
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21263 | 2018-05-30T20:41:26Z | 2018-06-07T12:52:21Z | 2018-06-07T12:52:20Z | 2018-06-07T19:26:03Z |
ENH: Support ExtensionArray operators via a mixin | diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 8018d35770924..38b3b19031a0e 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -61,7 +61,7 @@ Extension Types
.. warning::
- The :class:`pandas.api.extension.ExtensionDtype` and :class:`pandas.api.extension.ExtensionArray` APIs are new and
+ The :class:`pandas.api.extensions.ExtensionDtype` and :class:`pandas.api.extensions.ExtensionArray` APIs are new and
experimental. They may change between versions without warning.
Pandas defines an interface for implementing data types and arrays that *extend*
@@ -79,10 +79,10 @@ on :ref:`ecosystem.extensions`.
The interface consists of two classes.
-:class:`~pandas.api.extension.ExtensionDtype`
+:class:`~pandas.api.extensions.ExtensionDtype`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A :class:`pandas.api.extension.ExtensionDtype` is similar to a ``numpy.dtype`` object. It describes the
+A :class:`pandas.api.extensions.ExtensionDtype` is similar to a ``numpy.dtype`` object. It describes the
data type. Implementors are responsible for a few unique items like the name.
One particularly important item is the ``type`` property. This should be the
@@ -91,7 +91,7 @@ extension array for IP Address data, this might be ``ipaddress.IPv4Address``.
See the `extension dtype source`_ for interface definition.
-:class:`~pandas.api.extension.ExtensionArray`
+:class:`~pandas.api.extensions.ExtensionArray`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This class provides all the array-like functionality. ExtensionArrays are
@@ -113,6 +113,54 @@ by some other storage type, like Python lists.
See the `extension array source`_ for the interface definition. The docstrings
and comments contain guidance for properly implementing the interface.
+.. _extending.extension.operator:
+
+:class:`~pandas.api.extensions.ExtensionArray` Operator Support
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. versionadded:: 0.24.0
+
+By default, there are no operators defined for the class :class:`~pandas.api.extensions.ExtensionArray`.
+There are two approaches for providing operator support for your ExtensionArray:
+
+1. Define each of the operators on your ``ExtensionArray`` subclass.
+2. Use an operator implementation from pandas that depends on operators that are already defined
+ on the underlying elements (scalars) of the ExtensionArray.
+
+For the first approach, you define selected operators, e.g., ``__add__``, ``__le__``, etc. that
+you want your ``ExtensionArray`` subclass to support.
+
+The second approach assumes that the underlying elements (i.e., scalar type) of the ``ExtensionArray``
+have the individual operators already defined. In other words, if your ``ExtensionArray``
+named ``MyExtensionArray`` is implemented so that each element is an instance
+of the class ``MyExtensionElement``, then if the operators are defined
+for ``MyExtensionElement``, the second approach will automatically
+define the operators for ``MyExtensionArray``.
+
+A mixin class, :class:`~pandas.api.extensions.ExtensionScalarOpsMixin` supports this second
+approach. If developing an ``ExtensionArray`` subclass, for example ``MyExtensionArray``,
+can simply include ``ExtensionScalarOpsMixin`` as a parent class of ``MyExtensionArray``,
+and then call the methods :meth:`~MyExtensionArray._add_arithmetic_ops` and/or
+:meth:`~MyExtensionArray._add_comparison_ops` to hook the operators into
+your ``MyExtensionArray`` class, as follows:
+
+.. code-block:: python
+
+ class MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin):
+ pass
+
+ MyExtensionArray._add_arithmetic_ops()
+ MyExtensionArray._add_comparison_ops()
+
+Note that since ``pandas`` automatically calls the underlying operator on each
+element one-by-one, this might not be as performant as implementing your own
+version of the associated operators directly on the ``ExtensionArray``.
+
+.. _extending.extension.testing:
+
+Testing Extension Arrays
+^^^^^^^^^^^^^^^^^^^^^^^^
+
We provide a test suite for ensuring that your extension arrays satisfy the expected
behavior. To use the test suite, you must provide several pytest fixtures and inherit
from the base test class. The required fixtures are found in
@@ -174,11 +222,11 @@ There are 3 constructor properties to be defined:
Following table shows how ``pandas`` data structures define constructor properties by default.
=========================== ======================= =============
-Property Attributes ``Series`` ``DataFrame``
+Property Attributes ``Series`` ``DataFrame``
=========================== ======================= =============
-``_constructor`` ``Series`` ``DataFrame``
-``_constructor_sliced`` ``NotImplementedError`` ``Series``
-``_constructor_expanddim`` ``DataFrame`` ``Panel``
+``_constructor`` ``Series`` ``DataFrame``
+``_constructor_sliced`` ``NotImplementedError`` ``Series``
+``_constructor_expanddim`` ``DataFrame`` ``Panel``
=========================== ======================= =============
Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame`` overriding constructor properties.
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 1ab67bd80a5e8..2b38e7b1d5cc3 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -10,6 +10,22 @@ New features
- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
+.. _whatsnew_0240.enhancements.extension_array_operators
+
+``ExtensionArray`` operator support
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison
+operators. (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``:
+
+1. Define each of the operators on your ``ExtensionArray`` subclass.
+2. Use an operator implementation from pandas that depends on operators that are already defined
+ on the underlying elements (scalars) of the ``ExtensionArray``.
+
+See the :ref:`ExtensionArray Operator Support
+<extending.extension.operator>` documentation section for details on both
+ways of adding operator support.
+
.. _whatsnew_0240.enhancements.other:
Other Enhancements
diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py
index 3e6e192a3502c..851a63725952a 100644
--- a/pandas/api/extensions/__init__.py
+++ b/pandas/api/extensions/__init__.py
@@ -3,5 +3,6 @@
register_index_accessor,
register_series_accessor)
from pandas.core.algorithms import take # noqa
-from pandas.core.arrays.base import ExtensionArray # noqa
+from pandas.core.arrays.base import (ExtensionArray, # noqa
+ ExtensionScalarOpsMixin)
from pandas.core.dtypes.dtypes import ExtensionDtype # noqa
diff --git a/pandas/conftest.py b/pandas/conftest.py
index ae08e0817de29..8ca90722d17f7 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -89,7 +89,8 @@ def observed(request):
'__mul__', '__rmul__',
'__floordiv__', '__rfloordiv__',
'__truediv__', '__rtruediv__',
- '__pow__', '__rpow__']
+ '__pow__', '__rpow__',
+ '__mod__', '__rmod__']
if not PY3:
_all_arithmetic_operators.extend(['__div__', '__rdiv__'])
@@ -102,6 +103,22 @@ def all_arithmetic_operators(request):
return request.param
+@pytest.fixture(params=['__eq__', '__ne__', '__le__',
+ '__lt__', '__ge__', '__gt__'])
+def all_compare_operators(request):
+ """
+ Fixture for dunder names for common compare operations
+
+ * >=
+ * >
+ * ==
+ * !=
+ * <
+ * <=
+ """
+ return request.param
+
+
@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression(request):
@@ -320,20 +337,3 @@ def mock():
return importlib.import_module("unittest.mock")
else:
return pytest.importorskip("mock")
-
-
-@pytest.fixture(params=['__eq__', '__ne__', '__le__',
- '__lt__', '__ge__', '__gt__'])
-def all_compare_operators(request):
- """
- Fixture for dunder names for common compare operations
-
- * >=
- * >
- * ==
- * !=
- * <
- * <=
- """
-
- return request.param
diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py
index f8adcf520c15b..f57348116c195 100644
--- a/pandas/core/arrays/__init__.py
+++ b/pandas/core/arrays/__init__.py
@@ -1,2 +1,3 @@
-from .base import ExtensionArray # noqa
+from .base import (ExtensionArray, # noqa
+ ExtensionScalarOpsMixin)
from .categorical import Categorical # noqa
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 30949ca6d1d6b..a572fff1c44d7 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -7,8 +7,13 @@
"""
import numpy as np
+import operator
+
from pandas.errors import AbstractMethodError
from pandas.compat.numpy import function as nv
+from pandas.compat import set_function_name, PY3
+from pandas.core.dtypes.common import is_list_like
+from pandas.core import ops
_not_implemented_message = "{} does not implement {}."
@@ -610,3 +615,125 @@ def _ndarray_values(self):
used for interacting with our indexers.
"""
return np.array(self)
+
+
+class ExtensionOpsMixin(object):
+ """
+ A base class for linking the operators to their dunder names
+ """
+ @classmethod
+ def _add_arithmetic_ops(cls):
+ cls.__add__ = cls._create_arithmetic_method(operator.add)
+ cls.__radd__ = cls._create_arithmetic_method(ops.radd)
+ cls.__sub__ = cls._create_arithmetic_method(operator.sub)
+ cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
+ cls.__mul__ = cls._create_arithmetic_method(operator.mul)
+ cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
+ cls.__pow__ = cls._create_arithmetic_method(operator.pow)
+ cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
+ cls.__mod__ = cls._create_arithmetic_method(operator.mod)
+ cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
+ cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
+ cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
+ cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
+ cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
+ if not PY3:
+ cls.__div__ = cls._create_arithmetic_method(operator.div)
+ cls.__rdiv__ = cls._create_arithmetic_method(ops.rdiv)
+
+ cls.__divmod__ = cls._create_arithmetic_method(divmod)
+ cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
+
+ @classmethod
+ def _add_comparison_ops(cls):
+ cls.__eq__ = cls._create_comparison_method(operator.eq)
+ cls.__ne__ = cls._create_comparison_method(operator.ne)
+ cls.__lt__ = cls._create_comparison_method(operator.lt)
+ cls.__gt__ = cls._create_comparison_method(operator.gt)
+ cls.__le__ = cls._create_comparison_method(operator.le)
+ cls.__ge__ = cls._create_comparison_method(operator.ge)
+
+
+class ExtensionScalarOpsMixin(ExtensionOpsMixin):
+ """A mixin for defining the arithmetic and logical operations on
+ an ExtensionArray class, where it is assumed that the underlying objects
+ have the operators already defined.
+
+ Usage
+ ------
+ If you have defined a subclass MyExtensionArray(ExtensionArray), then
+ use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
+ get the arithmetic operators. After the definition of MyExtensionArray,
+ insert the lines
+
+ MyExtensionArray._add_arithmetic_ops()
+ MyExtensionArray._add_comparison_ops()
+
+ to link the operators to your class.
+ """
+
+ @classmethod
+ def _create_method(cls, op, coerce_to_dtype=True):
+ """
+ A class method that returns a method that will correspond to an
+ operator for an ExtensionArray subclass, by dispatching to the
+ relevant operator defined on the individual elements of the
+ ExtensionArray.
+
+ Parameters
+ ----------
+ op : function
+ An operator that takes arguments op(a, b)
+ coerce_to_dtype : bool
+ boolean indicating whether to attempt to convert
+ the result to the underlying ExtensionArray dtype
+ (default True)
+
+ Returns
+ -------
+ A method that can be bound to a method of a class
+
+ Example
+ -------
+ Given an ExtensionArray subclass called MyExtensionArray, use
+
+ >>> __add__ = cls._create_method(operator.add)
+
+ in the class definition of MyExtensionArray to create the operator
+ for addition, that will be based on the operator implementation
+ of the underlying elements of the ExtensionArray
+
+ """
+
+ def _binop(self, other):
+ def convert_values(param):
+ if isinstance(param, ExtensionArray) or is_list_like(param):
+ ovalues = param
+ else: # Assume its an object
+ ovalues = [param] * len(self)
+ return ovalues
+ lvalues = self
+ rvalues = convert_values(other)
+
+ # If the operator is not defined for the underlying objects,
+ # a TypeError should be raised
+ res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
+
+ if coerce_to_dtype:
+ try:
+ res = self._from_sequence(res)
+ except TypeError:
+ pass
+
+ return res
+
+ op_name = ops._get_op_name(op, True)
+ return set_function_name(_binop, op_name, cls)
+
+ @classmethod
+ def _create_arithmetic_method(cls, op):
+ return cls._create_method(op)
+
+ @classmethod
+ def _create_comparison_method(cls, op):
+ return cls._create_method(op, coerce_to_dtype=False)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 540ebeee438f6..fa6d88648cc63 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -33,6 +33,7 @@
is_bool_dtype,
is_list_like,
is_scalar,
+ is_extension_array_dtype,
_ensure_object)
from pandas.core.dtypes.cast import (
maybe_upcast_putmask, find_common_type,
@@ -993,6 +994,26 @@ def _construct_divmod_result(left, result, index, name, dtype):
)
+def dispatch_to_extension_op(op, left, right):
+ """
+ Assume that left or right is a Series backed by an ExtensionArray,
+ apply the operator defined by op.
+ """
+
+ # The op calls will raise TypeError if the op is not defined
+ # on the ExtensionArray
+ if is_extension_array_dtype(left):
+ res_values = op(left.values, right)
+ else:
+ # We know that left is not ExtensionArray and is Series and right is
+ # ExtensionArray. Want to force ExtensionArray op to get called
+ res_values = op(list(left.values), right.values)
+
+ res_name = get_op_result_name(left, right)
+ return left._constructor(res_values, index=left.index,
+ name=res_name)
+
+
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
@@ -1061,6 +1082,11 @@ def wrapper(left, right):
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(left).__name__, op=str_rep))
+ elif (is_extension_array_dtype(left) or
+ (is_extension_array_dtype(right) and
+ not is_categorical_dtype(right))):
+ return dispatch_to_extension_op(op, left, right)
+
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
@@ -1238,6 +1264,11 @@ def wrapper(self, other, axis=None):
return self._constructor(res_values, index=self.index,
name=res_name)
+ elif (is_extension_array_dtype(self) or
+ (is_extension_array_dtype(other) and
+ not is_categorical_dtype(other))):
+ return dispatch_to_extension_op(op, self, other)
+
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 9da985625c4ee..640b894e2245f 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -47,6 +47,7 @@ class TestMyDtype(BaseDtypeTests):
from .groupby import BaseGroupbyTests # noqa
from .interface import BaseInterfaceTests # noqa
from .methods import BaseMethodsTests # noqa
+from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests # noqa
from .missing import BaseMissingTests # noqa
from .reshaping import BaseReshapingTests # noqa
from .setitem import BaseSetitemTests # noqa
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
new file mode 100644
index 0000000000000..659b9757ac1e3
--- /dev/null
+++ b/pandas/tests/extension/base/ops.py
@@ -0,0 +1,94 @@
+import pytest
+
+import operator
+
+import pandas as pd
+from .base import BaseExtensionTests
+
+
+class BaseOpsUtil(BaseExtensionTests):
+ def get_op_from_name(self, op_name):
+ short_opname = op_name.strip('_')
+ try:
+ op = getattr(operator, short_opname)
+ except AttributeError:
+ # Assume it is the reverse operator
+ rop = getattr(operator, short_opname[1:])
+ op = lambda x, y: rop(y, x)
+
+ return op
+
+ def check_opname(self, s, op_name, other, exc=NotImplementedError):
+ op = self.get_op_from_name(op_name)
+
+ self._check_op(s, op, other, exc)
+
+ def _check_op(self, s, op, other, exc=NotImplementedError):
+ if exc is None:
+ result = op(s, other)
+ expected = s.combine(other, op)
+ self.assert_series_equal(result, expected)
+ else:
+ with pytest.raises(exc):
+ op(s, other)
+
+
+class BaseArithmeticOpsTests(BaseOpsUtil):
+ """Various Series and DataFrame arithmetic ops methods."""
+
+ def test_arith_scalar(self, data, all_arithmetic_operators):
+ # scalar
+ op_name = all_arithmetic_operators
+ s = pd.Series(data)
+ self.check_opname(s, op_name, s.iloc[0], exc=TypeError)
+
+ def test_arith_array(self, data, all_arithmetic_operators):
+ # ndarray & other series
+ op_name = all_arithmetic_operators
+ s = pd.Series(data)
+ self.check_opname(s, op_name, [s.iloc[0]] * len(s), exc=TypeError)
+
+ def test_divmod(self, data):
+ s = pd.Series(data)
+ self._check_op(s, divmod, 1, exc=TypeError)
+ self._check_op(1, divmod, s, exc=TypeError)
+
+ def test_error(self, data, all_arithmetic_operators):
+ # invalid ops
+ op_name = all_arithmetic_operators
+ with pytest.raises(AttributeError):
+ getattr(data, op_name)
+
+
+class BaseComparisonOpsTests(BaseOpsUtil):
+ """Various Series and DataFrame comparison ops methods."""
+
+ def _compare_other(self, s, data, op_name, other):
+ op = self.get_op_from_name(op_name)
+ if op_name == '__eq__':
+ assert getattr(data, op_name)(other) is NotImplemented
+ assert not op(s, other).all()
+ elif op_name == '__ne__':
+ assert getattr(data, op_name)(other) is NotImplemented
+ assert op(s, other).all()
+
+ else:
+
+ # array
+ assert getattr(data, op_name)(other) is NotImplemented
+
+ # series
+ s = pd.Series(data)
+ with pytest.raises(TypeError):
+ op(s, other)
+
+ def test_compare_scalar(self, data, all_compare_operators):
+ op_name = all_compare_operators
+ s = pd.Series(data)
+ self._compare_other(s, data, op_name, 0)
+
+ def test_compare_array(self, data, all_compare_operators):
+ op_name = all_compare_operators
+ s = pd.Series(data)
+ other = [0] * len(data)
+ self._compare_other(s, data, op_name, other)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 61fdb8454b542..ae0d72c204d13 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -183,3 +183,29 @@ def test_combine_add(self, data_repeated):
class TestCasting(base.BaseCastingTests):
pass
+
+
+class TestArithmeticOps(base.BaseArithmeticOpsTests):
+
+ def test_arith_scalar(self, data, all_arithmetic_operators):
+
+ op_name = all_arithmetic_operators
+ if op_name != '__rmod__':
+ super(TestArithmeticOps, self).test_arith_scalar(data, op_name)
+ else:
+ pytest.skip('rmod never called when string is first argument')
+
+
+class TestComparisonOps(base.BaseComparisonOpsTests):
+
+ def _compare_other(self, s, data, op_name, other):
+ op = self.get_op_from_name(op_name)
+ if op_name == '__eq__':
+ assert not op(data, other).all()
+
+ elif op_name == '__ne__':
+ assert op(data, other).all()
+
+ else:
+ with pytest.raises(TypeError):
+ op(data, other)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index cc6fadc483d5e..3f2f24cd26af0 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -6,7 +6,8 @@
import numpy as np
import pandas as pd
-from pandas.core.arrays import ExtensionArray
+from pandas.core.arrays import (ExtensionArray,
+ ExtensionScalarOpsMixin)
from pandas.core.dtypes.base import ExtensionDtype
@@ -24,13 +25,14 @@ def construct_from_string(cls, string):
"'{}'".format(cls, string))
-class DecimalArray(ExtensionArray):
+class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin):
dtype = DecimalDtype()
def __init__(self, values):
for val in values:
if not isinstance(val, self.dtype.type):
- raise TypeError
+ raise TypeError("All values must be of type " +
+ str(self.dtype.type))
values = np.asarray(values, dtype=object)
self._data = values
@@ -103,5 +105,9 @@ def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
+DecimalArray._add_arithmetic_ops()
+DecimalArray._add_comparison_ops()
+
+
def make_data():
return [decimal.Decimal(random.random()) for _ in range(100)]
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index f74b4d7e94f11..45ee7f227c4f0 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -191,3 +191,64 @@ def test_dataframe_constructor_with_different_dtype_raises():
xpr = "Cannot coerce extension array to dtype 'int64'. "
with tm.assert_raises_regex(ValueError, xpr):
pd.DataFrame({"A": arr}, dtype='int64')
+
+
+class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests):
+
+ def check_opname(self, s, op_name, other, exc=None):
+ super(TestArithmeticOps, self).check_opname(s, op_name,
+ other, exc=None)
+
+ def test_arith_array(self, data, all_arithmetic_operators):
+ op_name = all_arithmetic_operators
+ s = pd.Series(data)
+
+ context = decimal.getcontext()
+ divbyzerotrap = context.traps[decimal.DivisionByZero]
+ invalidoptrap = context.traps[decimal.InvalidOperation]
+ context.traps[decimal.DivisionByZero] = 0
+ context.traps[decimal.InvalidOperation] = 0
+
+ # Decimal supports ops with int, but not float
+ other = pd.Series([int(d * 100) for d in data])
+ self.check_opname(s, op_name, other)
+
+ if "mod" not in op_name:
+ self.check_opname(s, op_name, s * 2)
+
+ self.check_opname(s, op_name, 0)
+ self.check_opname(s, op_name, 5)
+ context.traps[decimal.DivisionByZero] = divbyzerotrap
+ context.traps[decimal.InvalidOperation] = invalidoptrap
+
+ @pytest.mark.skip(reason="divmod not appropriate for decimal")
+ def test_divmod(self, data):
+ pass
+
+ def test_error(self):
+ pass
+
+
+class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests):
+
+ def check_opname(self, s, op_name, other, exc=None):
+ super(TestComparisonOps, self).check_opname(s, op_name,
+ other, exc=None)
+
+ def _compare_other(self, s, data, op_name, other):
+ self.check_opname(s, op_name, other)
+
+ def test_compare_scalar(self, data, all_compare_operators):
+ op_name = all_compare_operators
+ s = pd.Series(data)
+ self._compare_other(s, data, op_name, 0.5)
+
+ def test_compare_array(self, data, all_compare_operators):
+ op_name = all_compare_operators
+ s = pd.Series(data)
+
+ alter = np.random.choice([-1, 0, 1], len(data))
+ # Randomly double, halve or keep same value
+ other = pd.Series(data) * [decimal.Decimal(pow(2.0, i))
+ for i in alter]
+ self._compare_other(s, data, op_name, other)
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 10be7836cb8d7..d3043bf0852d2 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -47,7 +47,8 @@ class JSONArray(ExtensionArray):
def __init__(self, values):
for val in values:
if not isinstance(val, self.dtype.type):
- raise TypeError
+ raise TypeError("All values must be of type " +
+ str(self.dtype.type))
self.data = values
# Some aliases for common attribute names to ensure pandas supports
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 85a282ae4007f..268134dc8c333 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -238,3 +238,12 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping):
super(TestGroupby, self).test_groupby_extension_agg(
as_index, data_for_grouping
)
+
+
+class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
+ def test_error(self, data, all_arithmetic_operators):
+ pass
+
+
+class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
+ pass
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index a5afcb6915034..11e9942079aad 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -29,7 +29,8 @@
is_categorical_dtype,
is_interval_dtype,
is_sequence,
- is_list_like)
+ is_list_like,
+ is_extension_array_dtype)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.core.common as com
@@ -1243,6 +1244,10 @@ def assert_series_equal(left, right, check_dtype=True,
right = pd.IntervalIndex(right)
assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj))
+ elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
+ is_extension_array_dtype(right) and not is_categorical_dtype(right)):
+ return assert_extension_array_equal(left.values, right.values)
+
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
| - [x] closes #19577
- [x] tests added / passed
- tests/extension/decimal/test_decimal.py:TestOperator
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- in v0.24.0
Based on a discussion in #20889, this provides a mixin (via a mixin factory) that provides a default implementation of operators for `ExtensionArray` using the operators defined on the underlying `ExtensionDtype` . Tested using the `DecimalArray` implementation.
NOTE: This requires #21183 ~~and #21260~~ to be accepted into master, and so the changes from those pull requests are included here.
Comments from @jorisvandenbossche @TomAugspurger @jreback @jbrockmendel are welcome.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21261 | 2018-05-30T18:32:47Z | 2018-06-29T02:01:01Z | 2018-06-29T02:01:01Z | 2018-07-02T14:39:19Z |
BUG: Series.get() with ExtensionArray and integer index | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 1ab67bd80a5e8..76d08e84a6efd 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -270,6 +270,7 @@ Reshaping
ExtensionArray
^^^^^^^^^^^^^^
+- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`)
- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 122f8662abb61..ba60d10099948 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2988,16 +2988,20 @@ def get_value(self, series, key):
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
- # GH 20825
+ # GH 20882, 21257
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
- # If that fails, see if key is an integer, and
+ # If that fails, raise a KeyError if an integer
+ # index, otherwise, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
- if is_integer(key):
+ if (len(self) > 0 and
+ self.inferred_type in ['integer', 'boolean']):
+ raise
+ elif is_integer(key):
return s[key]
s = com._values_from_object(series)
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 883b3f5588aef..e9df49780f119 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -130,7 +130,7 @@ def test_get(self, data):
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
- assert s.get(-1) == s.iloc[-1]
+ assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list('abcdef'))
@@ -147,6 +147,11 @@ def test_get(self, data):
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
+ # GH 21257
+ s = pd.Series(data)
+ s2 = s[::2]
+ assert s2.get(1) is None
+
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 8571fbc10e9bb..25bc394e312a0 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -187,6 +187,49 @@ def test_getitem_box_float64(test_data):
assert isinstance(value, np.float64)
+@pytest.mark.parametrize(
+ 'arr',
+ [
+ np.random.randn(10),
+ tm.makeDateIndex(10, name='a').tz_localize(
+ tz='US/Eastern'),
+ ])
+def test_get(arr):
+ # GH 21260
+ s = Series(arr, index=[2 * i for i in range(len(arr))])
+ assert s.get(4) == s.iloc[2]
+
+ result = s.get([4, 6])
+ expected = s.iloc[[2, 3]]
+ tm.assert_series_equal(result, expected)
+
+ result = s.get(slice(2))
+ expected = s.iloc[[0, 1]]
+ tm.assert_series_equal(result, expected)
+
+ assert s.get(-1) is None
+ assert s.get(s.index.max() + 1) is None
+
+ s = Series(arr[:6], index=list('abcdef'))
+ assert s.get('c') == s.iloc[2]
+
+ result = s.get(slice('b', 'd'))
+ expected = s.iloc[[1, 2, 3]]
+ tm.assert_series_equal(result, expected)
+
+ result = s.get('Z')
+ assert result is None
+
+ assert s.get(4) == s.iloc[4]
+ assert s.get(-1) == s.iloc[-1]
+ assert s.get(len(s)) is None
+
+ # GH 21257
+ s = pd.Series(arr)
+ s2 = s[::2]
+ assert s2.get(1) is None
+
+
def test_series_box_timestamp():
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
| - [x] closes #21257
- [x] tests added / passed
- added example in tests/extension/base/getitem.py:test_get()
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- Put this in 0.23.1 for now
| https://api.github.com/repos/pandas-dev/pandas/pulls/21260 | 2018-05-30T17:35:51Z | 2018-06-29T00:47:09Z | 2018-06-29T00:47:09Z | 2018-06-29T13:44:00Z |
ENH: Add support for tablewise application of style.background_gradient with axis=None | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 034a56b2ac0cb..c042275f00a17 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -425,6 +425,7 @@ Other
- :meth: `~pandas.io.formats.style.Styler.background_gradient` now takes a ``text_color_threshold`` parameter to automatically lighten the text color based on the luminance of the background color. This improves readability with dark background colors without the need to limit the background colormap range. (:issue:`21258`)
- Require at least 0.28.2 version of ``cython`` to support read-only memoryviews (:issue:`21688`)
+- :meth: `~pandas.io.formats.style.Styler.background_gradient` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` (:issue:`15204`)
-
-
-
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 62c2ea8ab9273..808b6979b235e 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -913,21 +913,22 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""Color background in a range according to the data."""
+ if (not isinstance(text_color_threshold, (float, int)) or
+ not 0 <= text_color_threshold <= 1):
+ msg = "`text_color_threshold` must be a value from 0 to 1."
+ raise ValueError(msg)
+
with _mpl(Styler.background_gradient) as (plt, colors):
- rng = s.max() - s.min()
+ smin = s.values.min()
+ smax = s.values.max()
+ rng = smax - smin
# extend lower / upper bounds, compresses color range
- norm = colors.Normalize(s.min() - (rng * low),
- s.max() + (rng * high))
- # matplotlib modifies inplace?
+ norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
+ # matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
- normed = norm(s.values)
- c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
- if (not isinstance(text_color_threshold, (float, int)) or
- not 0 <= text_color_threshold <= 1):
- msg = "`text_color_threshold` must be a value from 0 to 1."
- raise ValueError(msg)
+ rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
- def relative_luminance(color):
+ def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
@@ -936,25 +937,33 @@ def relative_luminance(color):
Parameters
----------
- color : matplotlib color
- Hex code, rgb-tuple, or HTML color name.
+ color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
- rgb = colors.colorConverter.to_rgba_array(color)[:, :3]
- rgb = np.where(rgb <= .03928, rgb / 12.92,
- ((rgb + .055) / 1.055) ** 2.4)
- lum = rgb.dot([.2126, .7152, .0722])
- return lum.item()
-
- text_colors = ['#f1f1f1' if relative_luminance(x) <
- text_color_threshold else '#000000' for x in c]
-
- return ['background-color: {color};color: {tc}'.format(
- color=color, tc=tc) for color, tc in zip(c, text_colors)]
+ r, g, b = (
+ x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
+ for x in rgba[:3]
+ )
+ return 0.2126 * r + 0.7152 * g + 0.0722 * b
+
+ def css(rgba):
+ dark = relative_luminance(rgba) < text_color_threshold
+ text_color = '#f1f1f1' if dark else '#000000'
+ return 'background-color: {b};color: {c};'.format(
+ b=colors.rgb2hex(rgba), c=text_color
+ )
+
+ if s.ndim == 1:
+ return [css(rgba) for rgba in rgbas]
+ else:
+ return pd.DataFrame(
+ [[css(rgba) for rgba in row] for row in rgbas],
+ index=s.index, columns=s.columns
+ )
def set_properties(self, subset=None, **kwargs):
"""
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index b355cda8df1bd..293dadd19031d 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1056,6 +1056,34 @@ def test_text_color_threshold_raises(self, text_color_threshold):
df.style.background_gradient(
text_color_threshold=text_color_threshold)._compute()
+ @td.skip_if_no_mpl
+ def test_background_gradient_axis(self):
+ df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
+
+ low = ['background-color: #f7fbff', 'color: #000000']
+ high = ['background-color: #08306b', 'color: #f1f1f1']
+ mid = ['background-color: #abd0e6', 'color: #000000']
+ result = df.style.background_gradient(cmap='Blues',
+ axis=0)._compute().ctx
+ assert result[(0, 0)] == low
+ assert result[(0, 1)] == low
+ assert result[(1, 0)] == high
+ assert result[(1, 1)] == high
+
+ result = df.style.background_gradient(cmap='Blues',
+ axis=1)._compute().ctx
+ assert result[(0, 0)] == low
+ assert result[(0, 1)] == high
+ assert result[(1, 0)] == low
+ assert result[(1, 1)] == high
+
+ result = df.style.background_gradient(cmap='Blues',
+ axis=None)._compute().ctx
+ assert result[(0, 0)] == low
+ assert result[(0, 1)] == mid
+ assert result[(1, 0)] == mid
+ assert result[(1, 1)] == high
+
def test_block_names():
# catch accidental removal of a block
|
- [x] closes #15204
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21259 | 2018-05-30T15:58:02Z | 2018-07-12T12:38:38Z | 2018-07-12T12:38:38Z | 2018-07-12T12:38:40Z |
BUG: Allow IntervalIndex to be constructed from categorical data with appropriate dtype | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 35484e34ee9eb..5dd2490dd5b39 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -81,6 +81,7 @@ Indexing
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
+- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`)
-
I/O
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 8f8d8760583ce..eb9d7efc06c27 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -112,6 +112,10 @@ def maybe_convert_platform_interval(values):
-------
array
"""
+ if is_categorical_dtype(values):
+ # GH 21243/21253
+ values = np.array(values)
+
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is not
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py
index 5fdf92dcb2044..b1711c3444586 100644
--- a/pandas/tests/indexes/interval/test_construction.py
+++ b/pandas/tests/indexes/interval/test_construction.py
@@ -6,8 +6,9 @@
from pandas import (
Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical,
- date_range, timedelta_range, period_range, notna)
+ CategoricalIndex, date_range, timedelta_range, period_range, notna)
from pandas.compat import lzip
+from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
import pandas.core.common as com
import pandas.util.testing as tm
@@ -111,6 +112,22 @@ def test_constructor_string(self, constructor, breaks):
with tm.assert_raises_regex(TypeError, msg):
constructor(**self.get_kwargs_from_breaks(breaks))
+ @pytest.mark.parametrize('cat_constructor', [
+ Categorical, CategoricalIndex])
+ def test_constructor_categorical_valid(self, constructor, cat_constructor):
+ # GH 21243/21253
+ if isinstance(constructor, partial) and constructor.func is Index:
+ # Index is defined to create CategoricalIndex from categorical data
+ pytest.skip()
+
+ breaks = np.arange(10, dtype='int64')
+ expected = IntervalIndex.from_breaks(breaks)
+
+ cat_breaks = cat_constructor(breaks)
+ result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
+ result = constructor(**result_kwargs)
+ tm.assert_index_equal(result, expected)
+
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
@@ -238,6 +255,8 @@ def get_kwargs_from_breaks(self, breaks, closed='right'):
tuples = lzip(breaks[:-1], breaks[1:])
if isinstance(breaks, (list, tuple)):
return {'data': tuples}
+ elif is_categorical_dtype(breaks):
+ return {'data': breaks._constructor(tuples)}
return {'data': com._asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
@@ -286,6 +305,8 @@ def get_kwargs_from_breaks(self, breaks, closed='right'):
if isinstance(breaks, list):
return {'data': ivs}
+ elif is_categorical_dtype(breaks):
+ return {'data': breaks._constructor(ivs)}
return {'data': np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
| - [X] closes #21243
- [X] closes #21253
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Added this to 0.23.1 since it's a regression and the fix is a minor change outside the `IntervalIndex` class. Not opposed to pushing to 0.24.0 if backporting this could be problematic. | https://api.github.com/repos/pandas-dev/pandas/pulls/21254 | 2018-05-30T06:49:31Z | 2018-06-04T21:28:52Z | 2018-06-04T21:28:51Z | 2018-06-22T17:14:31Z |
BUG: dropna incorrect with categoricals in pivot_table | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 2b64ef32c1eb6..97a5975dad9a6 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -29,6 +29,8 @@ Fixed Regressions
- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
+- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing
+ values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`)
.. _whatsnew_0231.performance:
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index e02420323704e..9a2ad5d13d77a 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -1,8 +1,10 @@
# pylint: disable=E1103
-from pandas.core.dtypes.common import is_list_like, is_scalar
+from pandas.core.dtypes.common import (
+ is_list_like, is_scalar, is_integer_dtype)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.reshape.concat import concat
from pandas.core.series import Series
@@ -79,8 +81,22 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
pass
values = list(values)
- grouped = data.groupby(keys, observed=dropna)
+ # group by the cartesian product of the grouper
+ # if we have a categorical
+ grouped = data.groupby(keys, observed=False)
agged = grouped.agg(aggfunc)
+ if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
+ agged = agged.dropna(how='all')
+
+ # gh-21133
+ # we want to down cast if
+ # the original values are ints
+ # as we grouped with a NaN value
+ # and then dropped, coercing to floats
+ for v in [v for v in values if v in data and v in agged]:
+ if (is_integer_dtype(data[v]) and
+ not is_integer_dtype(agged[v])):
+ agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index d2cf3fc11e165..3ec60d50f2792 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
@@ -16,6 +17,11 @@
from pandas.api.types import CategoricalDtype as CDT
+@pytest.fixture(params=[True, False])
+def dropna(request):
+ return request.param
+
+
class TestPivotTable(object):
def setup_method(self, method):
@@ -109,7 +115,6 @@ def test_pivot_table_categorical(self):
index=exp_index)
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize('dropna', [True, False])
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
@@ -137,6 +142,25 @@ def test_pivot_table_dropna_categoricals(self, dropna):
tm.assert_frame_equal(result, expected)
+ def test_pivot_with_non_observable_dropna(self, dropna):
+ # gh-21133
+ df = pd.DataFrame(
+ {'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
+ categories=['low', 'high'],
+ ordered=True),
+ 'B': range(5)})
+
+ result = df.pivot_table(index='A', values='B', dropna=dropna)
+ expected = pd.DataFrame(
+ {'B': [2, 3]},
+ index=pd.Index(
+ pd.Categorical.from_codes([0, 1],
+ categories=['low', 'high'],
+ ordered=True),
+ name='A'))
+
+ tm.assert_frame_equal(result, expected)
+
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
| closes #21133
| https://api.github.com/repos/pandas-dev/pandas/pulls/21252 | 2018-05-29T23:50:34Z | 2018-06-07T22:05:58Z | 2018-06-07T22:05:58Z | 2018-06-12T16:30:35Z |
Append Mode for ExcelWriter with openpyxl | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..6997ea84e5b83 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -8,6 +8,8 @@ v0.24.0
New features
~~~~~~~~~~~~
+- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
+
.. _whatsnew_0240.enhancements.other:
Other Enhancements
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 5608c29637447..e86d33742b266 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -804,6 +804,10 @@ class ExcelWriter(object):
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
+ mode : {'w' or 'a'}, default 'w'
+ File mode to use (write or append).
+
+ .. versionadded:: 0.24.0
Notes
-----
@@ -897,7 +901,8 @@ def save(self):
pass
def __init__(self, path, engine=None,
- date_format=None, datetime_format=None, **engine_kwargs):
+ date_format=None, datetime_format=None, mode='w',
+ **engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
@@ -919,6 +924,8 @@ def __init__(self, path, engine=None,
else:
self.datetime_format = datetime_format
+ self.mode = mode
+
def __fspath__(self):
return _stringify_path(self.path)
@@ -993,23 +1000,27 @@ class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
- def __init__(self, path, engine=None, **engine_kwargs):
+ def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
- super(_OpenpyxlWriter, self).__init__(path, **engine_kwargs)
+ super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
- # Create workbook object with default optimized_write=True.
- self.book = Workbook()
+ if self.mode == 'a': # Load from existing workbook
+ from openpyxl import load_workbook
+ book = load_workbook(self.path)
+ self.book = book
+ else:
+ # Create workbook object with default optimized_write=True.
+ self.book = Workbook()
- # Openpyxl 1.6.1 adds a dummy sheet. We remove it.
- if self.book.worksheets:
- try:
- self.book.remove(self.book.worksheets[0])
- except AttributeError:
+ if self.book.worksheets:
+ try:
+ self.book.remove(self.book.worksheets[0])
+ except AttributeError:
- # compat
- self.book.remove_sheet(self.book.worksheets[0])
+ # compat - for openpyxl <= 2.4
+ self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
@@ -1443,11 +1454,16 @@ class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
- def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
+ def __init__(self, path, engine=None, encoding=None, mode='w',
+ **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
- super(_XlwtWriter, self).__init__(path, **engine_kwargs)
+
+ if mode == 'a':
+ raise ValueError('Append mode is not supported with xlwt!')
+
+ super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
@@ -1713,13 +1729,18 @@ class _XlsxWriter(ExcelWriter):
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
- date_format=None, datetime_format=None, **engine_kwargs):
+ date_format=None, datetime_format=None, mode='w',
+ **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
+ if mode == 'a':
+ raise ValueError('Append mode is not supported with xlsxwriter!')
+
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
+ mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 05423474f330a..2a225e6fe6a45 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2006,6 +2006,31 @@ def test_write_cells_merge_styled(self, merge_cells, ext, engine):
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
+ @pytest.mark.parametrize("mode,expected", [
+ ('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
+ def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
+ import openpyxl
+ df = DataFrame([1], columns=['baz'])
+
+ with ensure_clean(ext) as f:
+ wb = openpyxl.Workbook()
+ wb.worksheets[0].title = 'foo'
+ wb.worksheets[0]['A1'].value = 'foo'
+ wb.create_sheet('bar')
+ wb.worksheets[1]['A1'].value = 'bar'
+ wb.save(f)
+
+ writer = ExcelWriter(f, engine=engine, mode=mode)
+ df.to_excel(writer, sheet_name='baz', index=False)
+ writer.save()
+
+ wb2 = openpyxl.load_workbook(f)
+ result = [sheet.title for sheet in wb2.worksheets]
+ assert result == expected
+
+ for index, cell_value in enumerate(expected):
+ assert wb2.worksheets[index]['A1'].value == cell_value
+
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
@@ -2060,6 +2085,13 @@ def test_to_excel_styleconverter(self, merge_cells, ext, engine):
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlwt!"
+
+ with ensure_clean(ext) as f:
+ with tm.assert_raises_regex(ValueError, msg):
+ ExcelWriter(f, engine=engine, mode='a')
+
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
@@ -2111,6 +2143,13 @@ def test_column_format(self, merge_cells, ext, engine):
assert read_num_format == num_format
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlsxwriter!"
+
+ with ensure_clean(ext) as f:
+ with tm.assert_raises_regex(ValueError, msg):
+ ExcelWriter(f, engine=engine, mode='a')
+
class TestExcelWriterEngineTests(object):
| - [X] closes #3441
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21251 | 2018-05-29T23:40:00Z | 2018-06-19T01:03:15Z | 2018-06-19T01:03:15Z | 2019-05-02T21:13:37Z |
EHN: to_csv compression accepts file-like object | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..55e76512b2440 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -13,7 +13,7 @@ New features
Other Enhancements
^^^^^^^^^^^^^^^^^^
- :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`)
--
+- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
-
.. _whatsnew_0240.api_breaking:
@@ -184,4 +184,3 @@ Other
-
-
-
-
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b09cb872a12fb..a463f573c82e0 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -105,6 +105,16 @@ def compression(request):
return request.param
+@pytest.fixture(params=['gzip', 'bz2', 'zip',
+ pytest.param('xz', marks=td.skip_if_no_lzma)])
+def compression_only(request):
+ """
+ Fixture for trying common compression types in compression tests excluding
+ uncompressed case
+ """
+ return request.param
+
+
@pytest.fixture(scope='module')
def datetime_tz_utc():
from datetime import timezone
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 22677b19192e1..0899e9cd87aba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1689,8 +1689,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
A string representing the compression to use in the output file.
- Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
- used when the first argument is a filename.
+ Allowed values are 'gzip', 'bz2', 'zip', 'xz'.
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c9329e8b9e572..f25f73513df30 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3761,8 +3761,7 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='',
non-ascii, for python versions prior to 3
compression : string, optional
A string representing the compression to use in the output file.
- Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
- used when the first argument is a filename.
+ Allowed values are 'gzip', 'bz2', 'zip', 'xz'.
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 29b8d29af0808..0be2a180fbfa2 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -154,9 +154,9 @@ def save(self):
# GH 17778 handles compression for byte strings.
if not close and self.compression:
f.close()
- with open(self.path_or_buf, 'r') as f:
+ with open(f.name, 'r') as f:
data = f.read()
- f, handles = _get_handle(self.path_or_buf, self.mode,
+ f, handles = _get_handle(f.name, self.mode,
encoding=encoding,
compression=self.compression)
f.write(data)
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index bb7ee1b911fee..88e469731060d 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -231,13 +231,33 @@ def test_standardize_mapping():
columns=['X', 'Y', 'Z']),
Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv'])
-def test_compression_size(obj, method, compression):
- if not compression:
- pytest.skip("only test compression case.")
+def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as filename:
- getattr(obj, method)(filename, compression=compression)
+ getattr(obj, method)(filename, compression=compression_only)
compressed = os.path.getsize(filename)
getattr(obj, method)(filename, compression=None)
uncompressed = os.path.getsize(filename)
assert uncompressed > compressed
+
+
+@pytest.mark.parametrize('obj', [
+ DataFrame(100 * [[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ columns=['X', 'Y', 'Z']),
+ Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
+@pytest.mark.parametrize('method', ['to_csv'])
+def test_compression_size_fh(obj, method, compression_only):
+
+ with tm.ensure_clean() as filename:
+ with open(filename, 'w') as fh:
+ getattr(obj, method)(fh, compression=compression_only)
+ # GH 17778
+ assert fh.closed
+ compressed = os.path.getsize(filename)
+ with tm.ensure_clean() as filename:
+ with open(filename, 'w') as fh:
+ getattr(obj, method)(fh, compression=None)
+ assert not fh.closed
+ uncompressed = os.path.getsize(filename)
+ assert uncompressed > compressed
| - [x] closes #21227
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Handle an unsupported case when a file-like object instead of path passed into to_csv with compression. According to documentation, compression keyword requires it to be a filename.
At the moment, when a handle is passed, it appears to be uncompressed.
Tentative enhancement. | https://api.github.com/repos/pandas-dev/pandas/pulls/21249 | 2018-05-29T17:03:13Z | 2018-05-30T23:27:47Z | 2018-05-30T23:27:47Z | 2018-05-31T20:06:22Z |
BUG: Fix handling of encoding for the StataReader #21244 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..ee33f31aad01b 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -146,7 +146,8 @@ MultiIndex
I/O
^^^
--
+- :func:`pandas.read_stata` now honours the ``encoding`` parameter, and supports the 'utf-8'
+ encoding. #21244
-
-
@@ -184,4 +185,3 @@ Other
-
-
-
-
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 2797924985c70..f0e8b8d638d0d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -37,7 +37,8 @@
from pandas.util._decorators import deprecate_kwarg
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
- 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
+ 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1',
+ 'utf-8', 'utf8')
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
@@ -1335,7 +1336,7 @@ def _calcsize(self, fmt):
def _decode(self, s):
s = s.partition(b"\0")[0]
- return s.decode('utf-8')
+ return s.decode(self._encoding or self._default_encoding)
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index f3a465da4e87f..db38227155df4 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -99,9 +99,9 @@ def setup_method(self, method):
self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta')
- def read_dta(self, file):
+ def read_dta(self, file, encoding='latin-1'):
# Legacy default reader configuration
- return read_stata(file, convert_dates=True)
+ return read_stata(file, convert_dates=True, encoding=encoding)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
@@ -268,7 +268,7 @@ def test_read_dta12(self):
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
- parsed_118 = self.read_dta(self.dta22_118)
+ parsed_118 = self.read_dta(self.dta22_118, encoding='utf-8')
parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
expected = DataFrame.from_records(
[['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
@@ -283,7 +283,7 @@ def test_read_dta18(self):
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
- with StataReader(self.dta22_118) as rdr:
+ with StataReader(self.dta22_118, encoding='utf-8') as rdr:
vl = rdr.variable_labels()
vl_expected = {u'Unicode_Cities_Strl':
u'Here are some strls with Ünicode chars',
@@ -1358,7 +1358,7 @@ def test_invalid_encoding(self):
original = self.read_csv(self.csv3)
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
- original.to_stata(path, encoding='utf-8')
+ original.to_stata(path, encoding='pokemon')
def test_path_pathlib(self):
df = tm.makeDataFrame()
| - [x] closes #21244
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21246 | 2018-05-29T13:08:47Z | 2018-06-12T11:02:59Z | null | 2023-05-11T01:17:54Z |
BUG: Support to create DataFrame from list subclasses | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 60f6a66e07a7b..dfb6ab5b189b2 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -91,4 +91,17 @@ def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
+class FromLists(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 1000
+ M = 100
+ self.data = [[j for j in range(M)] for i in range(N)]
+
+ def time_frame_from_lists(self):
+ self.df = DataFrame(self.data)
+
+
from .pandas_vb_common import setup # noqa: F401
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index cc40e6d42a70b..54faa9ba75d9f 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1606,6 +1606,7 @@ Other
- Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`)
- Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`)
- Constructing a DataFrame with an index argument that wasn't already an instance of :class:`~pandas.core.Index` was broken in `4efb39f <https://github.com/pandas-dev/pandas/commit/4efb39f01f5880122fa38d91e12d217ef70fad9e>`_ (:issue:`22227`).
+- Bug in :func:`to_object_array` prevented list subclasses to be used to create :class:`DataFrame` (:issue:`21226`)
.. _whatsnew_0.24.0.contributors:
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 0c081986d83c5..2736133a79d8e 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2208,7 +2208,7 @@ def map_infer(ndarray arr, object f, bint convert=1):
return result
-def to_object_array(rows: list, min_width: int=0):
+def to_object_array(rows: object, int min_width=0):
"""
Convert a list of lists into an object array.
@@ -2229,20 +2229,22 @@ def to_object_array(rows: list, min_width: int=0):
cdef:
Py_ssize_t i, j, n, k, tmp
ndarray[object, ndim=2] result
+ list input_rows
list row
- n = len(rows)
+ input_rows = <list>rows
+ n = len(input_rows)
k = min_width
for i in range(n):
- tmp = len(rows[i])
+ tmp = len(input_rows[i])
if tmp > k:
k = tmp
result = np.empty((n, k), dtype=object)
for i in range(n):
- row = rows[i]
+ row = <list>input_rows[i]
for j in range(len(row)):
result[i, j] = row[j]
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 76e92042cbe6a..fa1117a647850 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2165,6 +2165,15 @@ def test_constructor_range_dtype(self, dtype):
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
+ def test_frame_from_list_subclass(self):
+ # GH21226
+ class List(list):
+ pass
+
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]])
+ result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameConstructorWithDatetimeTZ(TestData):
| - [x] closes #21226
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Alternative fix could be that instead of doing `isinstance` check in frame's `_to_arrays` method:
```
if isinstance(data[0], (list, tuple)):
```
We would do:
```
if type(data[0]) in (list, tuple):
```
This would then assure that `to_object_array` is called really just with exactly lists. But currently there is a mismatch, `if` checks with subtypes, while `to_object_array` does not support them. I think it is better to support them so this merge request adds support for subtypes/subclasses of list. | https://api.github.com/repos/pandas-dev/pandas/pulls/21238 | 2018-05-29T04:16:58Z | 2018-12-15T21:21:24Z | 2018-12-15T21:21:24Z | 2018-12-15T23:23:09Z |
pct change bug issue 21200 | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 4d0373e4571da..80317d6806346 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -382,6 +382,33 @@ Backwards incompatible API changes
- :meth:`read_csv` will now raise a ``ValueError`` if a column with missing values is declared as having dtype ``bool`` (:issue:`20591`)
- The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`)
+Percentage change on groupby changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixed a bug where calling :func:`SeriesGroupBy.pct_change` or :func:`DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`).
+
+.. ipython:: python
+
+ df = pd.DataFrame({'grp': ['a', 'a', 'b'], 'foo': [1.0, 1.1, 2.2]})
+ df
+
+Previous behavior:
+
+.. code-block:: ipython
+
+ In [1]: df.groupby('grp').pct_change()
+ Out[1]:
+ foo
+ 0 NaN
+ 1 0.1
+ 2 1.0
+
+New behavior:
+
+.. ipython:: python
+
+ df.groupby('grp').pct_change()
+
.. _whatsnew_0240.api_breaking.deps:
Dependencies have increased minimum versions
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2f54f61818aa6..47ac1260d5179 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1221,9 +1221,15 @@ def _apply_to_column_groupbys(self, func):
return func(self)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):
- """Calculate percent change of each value to previous entry in group"""
+ """Calcuate pct_change of each value to previous entry in group"""
+ # TODO: Remove this conditional when #23918 is fixed
+ if freq:
+ return self.apply(lambda x: x.pct_change(periods=periods,
+ fill_method=fill_method,
+ limit=limit, freq=freq))
filled = getattr(self, fill_method)(limit=limit)
- shifted = filled.shift(periods=periods, freq=freq)
+ fill_grp = filled.groupby(self.grouper.labels)
+ shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 45eaa3efa948a..4b915922cef93 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2025,11 +2025,10 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
-
- filled = getattr(self, fill_method)(limit=limit).drop(
- self.grouper.names, axis=1)
- shifted = filled.shift(periods=periods, freq=freq)
-
+ filled = getattr(self, fill_method)(limit=limit)
+ filled = filled.drop(self.grouper.names, axis=1)
+ fill_grp = filled.groupby(self.grouper.labels)
+ shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index dbbf6e583796f..b6361b4ad76a0 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -765,36 +765,36 @@ def test_pad_stable_sorting(fill_method):
@pytest.mark.parametrize("test_series", [True, False])
+@pytest.mark.parametrize("freq", [
+ None,
+ pytest.param('D', marks=pytest.mark.xfail(
+ reason='GH#23918 before method uses freq in vectorized approach'))])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
- (-1, 'bfill', None), (-1, 'bfill', 1)])
-def test_pct_change(test_series, periods, fill_method, limit):
- vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
- exp_vals = Series(vals).pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit).tolist()
-
- df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
- 'vals': vals * 2})
- grp = df.groupby('key')
-
- def get_result(grp_obj):
- return grp_obj.pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit)
+ (-1, 'bfill', None), (-1, 'bfill', 1),
+])
+def test_pct_change(test_series, freq, periods, fill_method, limit):
+ # GH 21200, 21621
+ vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
+ keys = ['a', 'b']
+ key_v = np.repeat(keys, len(vals))
+ df = DataFrame({'key': key_v, 'vals': vals * 2})
+
+ df_g = getattr(df.groupby('key'), fill_method)(limit=limit)
+ grp = df_g.groupby('key')
+
+ expected = grp['vals'].obj / grp['vals'].shift(periods) - 1
if test_series:
- exp = pd.Series(exp_vals * 2)
- exp.name = 'vals'
- grp = grp['vals']
- result = get_result(grp)
- tm.assert_series_equal(result, exp)
+ result = df.groupby('key')['vals'].pct_change(
+ periods=periods, fill_method=fill_method, limit=limit, freq=freq)
+ tm.assert_series_equal(result, expected)
else:
- exp = DataFrame({'vals': exp_vals * 2})
- result = get_result(grp)
- tm.assert_frame_equal(result, exp)
+ result = df.groupby('key').pct_change(
+ periods=periods, fill_method=fill_method, limit=limit, freq=freq)
+ tm.assert_frame_equal(result, expected.to_frame('vals'))
@pytest.mark.parametrize("func", [np.any, np.all])
| closes #21200
closes #21621
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This addresses [#21200](https://github.com/pandas-dev/pandas/issues/21200?_pjax=%23js-repo-pjax-container) . When there are different groups in a dataframe, by using groupby it is expected that the pct_change function be applied on each group. However, combining groupby with pct_change does not produce the correct result.
**Explanation:**
Currently the groupby method in the pandas series and pandas dataframe pct change method can implement a vectorized solution, rather than calling apply, if certain conditions are met. For the pandas series method, the vectorized solution is the only option.
This is certainly inappropriate in cases where the groupby object is non-monotonic in its group order. To solve this I've added a check for monotonicity in both the series and dataframe implementation, as well as adding the opportunity to call apply for the series method.
In addition, I have augmented the UT to accept a parameter that can shuffle the dataframe, in order to ensure that the correct calculation occurs.
**Concern**
One concern I have is that depending on whether the apply or vectorized solution is used within the pct change method (e.g. depending on whether the groupby object is monotonic or not), the result returned to the user may have a different index structure. While this was the case prior to the PR, It's not clear to me if (1) this is an acceptable design within the pandas infrastructure, and (2) whether or not this is within the scope of a single PR that was originally opened to address a very specific bug.
As this is my first pandas PR, I would certainly appreciate feedback, and will incorporate any constructive feedback into future issues. | https://api.github.com/repos/pandas-dev/pandas/pulls/21235 | 2018-05-28T23:10:49Z | 2018-12-12T12:41:38Z | 2018-12-12T12:41:37Z | 2023-02-12T20:13:28Z |
CLN: move common printing utilties to pandas.io.formats.printing | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f79288c167356..145d116261a82 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -57,17 +57,11 @@
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
-from pandas.io.formats.printing import pprint_thing
+from pandas.io.formats.printing import (
+ pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
-from pandas.core.config import get_option
from pandas.core.strings import StringMethods
-
-# simplify
-default_pprint = lambda x, max_seq_items=None: \
- pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
- max_seq_items=max_seq_items)
-
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
@@ -1034,7 +1028,7 @@ def _format_space(self):
@property
def _formatter_func(self):
"""
- Return the formatted data as a unicode string
+ Return the formatter function
"""
return default_pprint
@@ -1042,125 +1036,20 @@ def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
- from pandas.io.formats.console import get_console_size
- from pandas.io.formats.format import _get_adjustment
- display_width, _ = get_console_size()
- if display_width is None:
- display_width = get_option('display.width') or 80
- if name is None:
- name = self.__class__.__name__
-
- space1 = "\n%s" % (' ' * (len(name) + 1))
- space2 = "\n%s" % (' ' * (len(name) + 2))
-
- n = len(self)
- sep = ','
- max_seq_items = get_option('display.max_seq_items') or n
- formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
- # are we a truncated display
- is_truncated = n > max_seq_items
-
- # adj can optionally handle unicode eastern asian width
- adj = _get_adjustment()
-
- def _extend_line(s, line, value, display_width, next_line_prefix):
-
- if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
- display_width):
- s += line.rstrip()
- line = next_line_prefix
- line += value
- return s, line
-
- def best_len(values):
- if values:
- return max(adj.len(x) for x in values)
- else:
- return 0
-
- if n == 0:
- summary = '[], '
- elif n == 1:
- first = formatter(self[0])
- summary = '[%s], ' % first
- elif n == 2:
- first = formatter(self[0])
- last = formatter(self[-1])
- summary = '[%s, %s], ' % (first, last)
- else:
-
- if n > max_seq_items:
- n = min(max_seq_items // 2, 10)
- head = [formatter(x) for x in self[:n]]
- tail = [formatter(x) for x in self[-n:]]
- else:
- head = []
- tail = [formatter(x) for x in self]
-
- # adjust all values to max length if needed
- if is_justify:
-
- # however, if we are not truncated and we are only a single
- # line, then don't justify
- if (is_truncated or
- not (len(', '.join(head)) < display_width and
- len(', '.join(tail)) < display_width)):
- max_len = max(best_len(head), best_len(tail))
- head = [x.rjust(max_len) for x in head]
- tail = [x.rjust(max_len) for x in tail]
-
- summary = ""
- line = space2
-
- for i in range(len(head)):
- word = head[i] + sep + ' '
- summary, line = _extend_line(summary, line, word,
- display_width, space2)
-
- if is_truncated:
- # remove trailing space of last line
- summary += line.rstrip() + space2 + '...'
- line = space2
-
- for i in range(len(tail) - 1):
- word = tail[i] + sep + ' '
- summary, line = _extend_line(summary, line, word,
- display_width, space2)
-
- # last value: no sep added + 1 space of width used for trailing ','
- summary, line = _extend_line(summary, line, tail[-1],
- display_width - 2, space2)
- summary += line
- summary += '],'
-
- if len(summary) > (display_width):
- summary += space1
- else: # one row
- summary += ' '
-
- # remove initial space
- summary = '[' + summary[len(space2):]
-
- return summary
+ return format_object_summary(self, self._formatter_func,
+ is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
- attrs = []
- attrs.append(('dtype', "'%s'" % self.dtype))
- if self.name is not None:
- attrs.append(('name', default_pprint(self.name)))
- max_seq_items = get_option('display.max_seq_items') or len(self)
- if len(self) > max_seq_items:
- attrs.append(('length', len(self)))
- return attrs
+ return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index a101113da23ba..e22d7bce42841 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -261,3 +261,157 @@ class TableSchemaFormatter(BaseFormatter):
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
+
+
+default_pprint = lambda x, max_seq_items=None: \
+ pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
+ max_seq_items=max_seq_items)
+
+
+def format_object_summary(obj, formatter, is_justify=True, name=None):
+ """
+ Return the formatted obj as a unicode string
+
+ Parameters
+ ----------
+ obj : object
+ must be iterable and support __getitem__
+ formatter : callable
+ string formatter for an element
+ is_justify : boolean
+ should justify the display
+ name : name, optiona
+ defaults to the class name of the obj
+
+ Returns
+ -------
+ summary string
+
+ """
+ from pandas.io.formats.console import get_console_size
+ from pandas.io.formats.format import _get_adjustment
+
+ display_width, _ = get_console_size()
+ if display_width is None:
+ display_width = get_option('display.width') or 80
+ if name is None:
+ name = obj.__class__.__name__
+
+ space1 = "\n%s" % (' ' * (len(name) + 1))
+ space2 = "\n%s" % (' ' * (len(name) + 2))
+
+ n = len(obj)
+ sep = ','
+ max_seq_items = get_option('display.max_seq_items') or n
+
+ # are we a truncated display
+ is_truncated = n > max_seq_items
+
+ # adj can optionally handle unicode eastern asian width
+ adj = _get_adjustment()
+
+ def _extend_line(s, line, value, display_width, next_line_prefix):
+
+ if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
+ display_width):
+ s += line.rstrip()
+ line = next_line_prefix
+ line += value
+ return s, line
+
+ def best_len(values):
+ if values:
+ return max(adj.len(x) for x in values)
+ else:
+ return 0
+
+ if n == 0:
+ summary = '[], '
+ elif n == 1:
+ first = formatter(obj[0])
+ summary = '[%s], ' % first
+ elif n == 2:
+ first = formatter(obj[0])
+ last = formatter(obj[-1])
+ summary = '[%s, %s], ' % (first, last)
+ else:
+
+ if n > max_seq_items:
+ n = min(max_seq_items // 2, 10)
+ head = [formatter(x) for x in obj[:n]]
+ tail = [formatter(x) for x in obj[-n:]]
+ else:
+ head = []
+ tail = [formatter(x) for x in obj]
+
+ # adjust all values to max length if needed
+ if is_justify:
+
+ # however, if we are not truncated and we are only a single
+ # line, then don't justify
+ if (is_truncated or
+ not (len(', '.join(head)) < display_width and
+ len(', '.join(tail)) < display_width)):
+ max_len = max(best_len(head), best_len(tail))
+ head = [x.rjust(max_len) for x in head]
+ tail = [x.rjust(max_len) for x in tail]
+
+ summary = ""
+ line = space2
+
+ for i in range(len(head)):
+ word = head[i] + sep + ' '
+ summary, line = _extend_line(summary, line, word,
+ display_width, space2)
+
+ if is_truncated:
+ # remove trailing space of last line
+ summary += line.rstrip() + space2 + '...'
+ line = space2
+
+ for i in range(len(tail) - 1):
+ word = tail[i] + sep + ' '
+ summary, line = _extend_line(summary, line, word,
+ display_width, space2)
+
+ # last value: no sep added + 1 space of width used for trailing ','
+ summary, line = _extend_line(summary, line, tail[-1],
+ display_width - 2, space2)
+ summary += line
+ summary += '],'
+
+ if len(summary) > (display_width):
+ summary += space1
+ else: # one row
+ summary += ' '
+
+ # remove initial space
+ summary = '[' + summary[len(space2):]
+
+ return summary
+
+
+def format_object_attrs(obj):
+ """
+ Return a list of tuples of the (attr, formatted_value)
+ for common attrs, including dtype, name, length
+
+ Parameters
+ ----------
+ obj : object
+ must be iterable
+
+ Returns
+ -------
+ list
+
+ """
+ attrs = []
+ if hasattr(obj, 'dtype'):
+ attrs.append(('dtype', "'{}'".format(obj.dtype)))
+ if getattr(obj, 'name', None) is not None:
+ attrs.append(('name', default_pprint(obj.name)))
+ max_seq_items = get_option('display.max_seq_items') or len(obj)
+ if len(obj) > max_seq_items:
+ attrs.append(('length', len(obj)))
+ return attrs
| https://api.github.com/repos/pandas-dev/pandas/pulls/21234 | 2018-05-28T23:00:27Z | 2018-05-29T01:46:20Z | 2018-05-29T01:46:20Z | 2018-05-29T01:46:20Z | |
BUG: df.agg, df.transform and df.apply use different methods when axis=1 than when axis=0 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 3b04d9937d7f2..04c2e253cfa5d 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -475,7 +475,9 @@ Numeric
- Bug in :class:`Series` ``__rmatmul__`` doesn't support matrix vector multiplication (:issue:`21530`)
- Bug in :func:`factorize` fails with read-only array (:issue:`12813`)
- Fixed bug in :func:`unique` handled signed zeros inconsistently: for some inputs 0.0 and -0.0 were treated as equal and for some inputs as different. Now they are treated as equal for all inputs (:issue:`21866`)
--
+- Bug in :meth:`DataFrame.agg`, :meth:`DataFrame.transform` and :meth:`DataFrame.apply` where,
+ when supplied with a list of functions and ``axis=1`` (e.g. ``df.apply(['sum', 'mean'], axis=1)``),
+ a ``TypeError`` was wrongly raised. For all three methods such calculation are now done correctly. (:issue:`16679`).
-
Strings
diff --git a/pandas/conftest.py b/pandas/conftest.py
index a979c3fc3bfac..e878b32fcad7b 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -60,6 +60,26 @@ def spmatrix(request):
return getattr(sparse, request.param + '_matrix')
+@pytest.fixture(params=[0, 1, 'index', 'columns'],
+ ids=lambda x: "axis {!r}".format(x))
+def axis(request):
+ """
+ Fixture for returning the axis numbers of a DataFrame.
+ """
+ return request.param
+
+
+axis_frame = axis
+
+
+@pytest.fixture(params=[0, 'index'], ids=lambda x: "axis {!r}".format(x))
+def axis_series(request):
+ """
+ Fixture for returning the axis numbers of a Series.
+ """
+ return request.param
+
+
@pytest.fixture
def ip():
"""
@@ -103,6 +123,41 @@ def all_arithmetic_operators(request):
return request.param
+# use sorted as dicts in py<3.6 have random order, which xdist doesn't like
+_cython_table = sorted(((key, value) for key, value in
+ pd.core.base.SelectionMixin._cython_table.items()),
+ key=lambda x: x[0].__class__.__name__)
+
+
+@pytest.fixture(params=_cython_table)
+def cython_table_items(request):
+ return request.param
+
+
+def _get_cython_table_params(ndframe, func_names_and_expected):
+ """combine frame, functions from SelectionMixin._cython_table
+ keys and expected result.
+
+ Parameters
+ ----------
+ ndframe : DataFrame or Series
+ func_names_and_expected : Sequence of two items
+ The first item is a name of a NDFrame method ('sum', 'prod') etc.
+ The second item is the expected return value
+
+ Returns
+ -------
+ results : list
+ List of three items (DataFrame, function, expected result)
+ """
+ results = []
+ for func_name, expected in func_names_and_expected:
+ results.append((ndframe, func_name, expected))
+ results += [(ndframe, func, expected) for func, name in _cython_table
+ if name == func_name]
+ return results
+
+
@pytest.fixture(params=['__eq__', '__ne__', '__le__',
'__lt__', '__ge__', '__gt__'])
def all_compare_operators(request):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 27ac5038276d6..989becbf133ca 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -5,6 +5,8 @@
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
+ is_dict_like,
+ is_list_like,
is_sequence)
from pandas.util._decorators import cache_readonly
@@ -105,6 +107,11 @@ def agg_axis(self):
def get_result(self):
""" compute the results """
+ # dispatch to agg
+ if is_list_like(self.f) or is_dict_like(self.f):
+ return self.obj.aggregate(self.f, axis=self.axis,
+ *self.args, **self.kwds)
+
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
@@ -308,15 +315,6 @@ def wrap_results(self):
class FrameRowApply(FrameApply):
axis = 0
- def get_result(self):
-
- # dispatch to agg
- if isinstance(self.f, (list, dict)):
- return self.obj.aggregate(self.f, axis=self.axis,
- *self.args, **self.kwds)
-
- return super(FrameRowApply, self).get_result()
-
def apply_broadcast(self):
return super(FrameRowApply, self).apply_broadcast(self.obj)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 16332738ce610..a66b9a7e92e85 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6070,19 +6070,34 @@ def _gotitem(self,
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
- # TODO: flipped axis
result = None
- if axis == 0:
- try:
- result, how = self._aggregate(func, axis=0, *args, **kwargs)
- except TypeError:
- pass
+ try:
+ result, how = self._aggregate(func, axis=axis, *args, **kwargs)
+ except TypeError:
+ pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
+ def _aggregate(self, arg, axis=0, *args, **kwargs):
+ if axis == 1:
+ # NDFrame.aggregate returns a tuple, and we need to transpose
+ # only result
+ result, how = (super(DataFrame, self.T)
+ ._aggregate(arg, *args, **kwargs))
+ result = result.T if result is not None else result
+ return result, how
+ return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
+
agg = aggregate
+ @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
+ def transform(self, func, axis=0, *args, **kwargs):
+ axis = self._get_axis_number(axis)
+ if axis == 1:
+ return super(DataFrame, self.T).transform(func, *args, **kwargs).T
+ return super(DataFrame, self).transform(func, *args, **kwargs)
+
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 16105014bf74e..1126500fa55b2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9193,16 +9193,14 @@ def ewm(self, com=None, span=None, halflife=None, alpha=None,
cls.ewm = ewm
- @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
- def transform(self, func, *args, **kwargs):
- result = self.agg(func, *args, **kwargs)
- if is_scalar(result) or len(result) != len(self):
- raise ValueError("transforms cannot produce "
- "aggregated results")
+ @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
+ def transform(self, func, *args, **kwargs):
+ result = self.agg(func, *args, **kwargs)
+ if is_scalar(result) or len(result) != len(self):
+ raise ValueError("transforms cannot produce "
+ "aggregated results")
- return result
-
- cls.transform = transform
+ return result
# ----------------------------------------------------------------------
# Misc methods
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index dfb2961befe35..e038588b76ffd 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -5,7 +5,9 @@
import pytest
import operator
+from collections import OrderedDict
from datetime import datetime
+from itertools import chain
import warnings
import numpy as np
@@ -18,6 +20,7 @@
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
+from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
@@ -813,81 +816,97 @@ def test_consistency_for_boxed(self, box):
assert_frame_equal(result, expected)
-def zip_frames(*frames):
+def zip_frames(frames, axis=1):
"""
- take a list of frames, zip the columns together for each
- assume that these all have the first frame columns
+ take a list of frames, zip them together under the
+ assumption that these all have the first frames' index/columns.
- return a new frame
+ Returns
+ -------
+ new_frame : DataFrame
"""
- columns = frames[0].columns
- zipped = [f[c] for c in columns for f in frames]
- return pd.concat(zipped, axis=1)
+ if axis == 1:
+ columns = frames[0].columns
+ zipped = [f.loc[:, c] for c in columns for f in frames]
+ return pd.concat(zipped, axis=1)
+ else:
+ index = frames[0].index
+ zipped = [f.loc[i, :] for i in index for f in frames]
+ return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
- def test_agg_transform(self):
+ def test_agg_transform(self, axis):
+ other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
- f_sqrt = np.sqrt(self.frame)
f_abs = np.abs(self.frame)
+ f_sqrt = np.sqrt(self.frame)
# ufunc
- result = self.frame.transform(np.sqrt)
+ result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
- result = self.frame.apply(np.sqrt)
+ result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
- result = self.frame.transform(np.sqrt)
+ result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
- result = self.frame.apply([np.sqrt])
+ result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
- expected.columns = pd.MultiIndex.from_product(
- [self.frame.columns, ['sqrt']])
+ if axis in {0, 'index'}:
+ expected.columns = pd.MultiIndex.from_product(
+ [self.frame.columns, ['sqrt']])
+ else:
+ expected.index = pd.MultiIndex.from_product(
+ [self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
- result = self.frame.transform([np.sqrt])
+ result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
- expected = zip_frames(f_sqrt, f_abs)
- expected.columns = pd.MultiIndex.from_product(
- [self.frame.columns, ['sqrt', 'absolute']])
- result = self.frame.apply([np.sqrt, np.abs])
+ result = self.frame.apply([np.abs, np.sqrt], axis=axis)
+ expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
+ if axis in {0, 'index'}:
+ expected.columns = pd.MultiIndex.from_product(
+ [self.frame.columns, ['absolute', 'sqrt']])
+ else:
+ expected.index = pd.MultiIndex.from_product(
+ [self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
- result = self.frame.transform(['sqrt', np.abs])
+ result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
- def test_transform_and_agg_err(self):
+ def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
- self.frame.transform(['max', 'min'])
+ self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
- self.frame.agg(['max', 'sqrt'])
+ self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
- self.frame.transform(['max', 'sqrt'])
+ self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
- df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
+ df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
@@ -946,43 +965,57 @@ def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
- def test_agg_reduce(self):
+ def test_agg_reduce(self, axis):
+ other_axis = 1 if axis in {0, 'index'} else 0
+ name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
+
# all reducers
- expected = zip_frames(self.frame.mean().to_frame(),
- self.frame.max().to_frame(),
- self.frame.sum().to_frame()).T
- expected.index = ['mean', 'max', 'sum']
- result = self.frame.agg(['mean', 'max', 'sum'])
+ expected = pd.concat([self.frame.mean(axis=axis),
+ self.frame.max(axis=axis),
+ self.frame.sum(axis=axis),
+ ], axis=1)
+ expected.columns = ['mean', 'max', 'sum']
+ expected = expected.T if axis in {0, 'index'} else expected
+
+ result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
- result = self.frame.agg({'A': 'mean', 'B': 'sum'})
- expected = Series([self.frame.A.mean(), self.frame.B.sum()],
- index=['A', 'B'])
- assert_series_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
+ result = self.frame.agg(func, axis=axis)
+ expected = Series([self.frame.loc(other_axis)[name1].mean(),
+ self.frame.loc(other_axis)[name2].sum()],
+ index=[name1, name2])
+ assert_series_equal(result, expected)
# dict input with lists
- result = self.frame.agg({'A': ['mean'], 'B': ['sum']})
- expected = DataFrame({'A': Series([self.frame.A.mean()],
- index=['mean']),
- 'B': Series([self.frame.B.sum()],
- index=['sum'])})
- assert_frame_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
+ result = self.frame.agg(func, axis=axis)
+ expected = DataFrame({
+ name1: Series([self.frame.loc(other_axis)[name1].mean()],
+ index=['mean']),
+ name2: Series([self.frame.loc(other_axis)[name2].sum()],
+ index=['sum'])})
+ expected = expected.T if axis in {1, 'columns'} else expected
+ assert_frame_equal(result, expected)
# dict input with lists with multiple
- result = self.frame.agg({'A': ['mean', 'sum'],
- 'B': ['sum', 'max']})
- expected = DataFrame({'A': Series([self.frame.A.mean(),
- self.frame.A.sum()],
- index=['mean', 'sum']),
- 'B': Series([self.frame.B.sum(),
- self.frame.B.max()],
- index=['sum', 'max'])})
- assert_frame_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
+ result = self.frame.agg(func, axis=axis)
+ expected = DataFrame(OrderedDict([
+ (name1, Series([self.frame.loc(other_axis)[name1].mean(),
+ self.frame.loc(other_axis)[name1].sum()],
+ index=['mean', 'sum'])),
+ (name2, Series([self.frame.loc(other_axis)[name2].sum(),
+ self.frame.loc(other_axis)[name2].max()],
+ index=['sum', 'max'])),
+ ]))
+ expected = expected.T if axis in {1, 'columns'} else expected
+ assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
@@ -1056,3 +1089,67 @@ def test_non_callable_aggregates(self):
expected = df.size
assert result == expected
+
+ @pytest.mark.parametrize("df, func, expected", chain(
+ _get_cython_table_params(
+ DataFrame(), [
+ ('sum', Series()),
+ ('max', Series()),
+ ('min', Series()),
+ ('all', Series(dtype=bool)),
+ ('any', Series(dtype=bool)),
+ ('mean', Series()),
+ ('prod', Series()),
+ ('std', Series()),
+ ('var', Series()),
+ ('median', Series()),
+ ]),
+ _get_cython_table_params(
+ DataFrame([[np.nan, 1], [1, 2]]), [
+ ('sum', Series([1., 3])),
+ ('max', Series([1., 2])),
+ ('min', Series([1., 1])),
+ ('all', Series([True, True])),
+ ('any', Series([True, True])),
+ ('mean', Series([1, 1.5])),
+ ('prod', Series([1., 2])),
+ ('std', Series([np.nan, 0.707107])),
+ ('var', Series([np.nan, 0.5])),
+ ('median', Series([1, 1.5])),
+ ]),
+ ))
+ def test_agg_cython_table(self, df, func, expected, axis):
+ # GH21224
+ # test reducing functions in
+ # pandas.core.base.SelectionMixin._cython_table
+ result = df.agg(func, axis=axis)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("df, func, expected", chain(
+ _get_cython_table_params(
+ DataFrame(), [
+ ('cumprod', DataFrame()),
+ ('cumsum', DataFrame()),
+ ]),
+ _get_cython_table_params(
+ DataFrame([[np.nan, 1], [1, 2]]), [
+ ('cumprod', DataFrame([[np.nan, 1], [1., 2.]])),
+ ('cumsum', DataFrame([[np.nan, 1], [1., 3.]])),
+ ]),
+ ))
+ def test_agg_cython_table_transform(self, df, func, expected, axis):
+ # GH21224
+ # test transforming functions in
+ # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
+ result = df.agg(func, axis=axis)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("df, func, expected", _get_cython_table_params(
+ DataFrame([['a', 'b'], ['b', 'a']]), [
+ ['cumprod', TypeError],
+ ]),
+ )
+ def test_agg_cython_table_raises(self, df, func, expected, axis):
+ # GH21224
+ with pytest.raises(expected):
+ df.agg(func, axis=axis)
diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py
index 8b133e654a869..8e4d28fc796df 100644
--- a/pandas/tests/generic/test_label_or_level_utils.py
+++ b/pandas/tests/generic/test_label_or_level_utils.py
@@ -76,14 +76,13 @@ def assert_level_reference(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_or_label_reference_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
@@ -91,11 +90,10 @@ def test_is_level_or_label_reference_df_simple(df_levels, axis):
assert_label_reference(df_levels, expected_labels, axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_reference_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
@@ -165,11 +163,10 @@ def test_is_label_or_level_reference_panel_error(panel):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df_ambig has both an on-axis level and off-axis label named L1
@@ -179,7 +176,7 @@ def test_check_label_or_level_ambiguity_df(df_ambig, axis):
assert df_ambig._check_label_or_level_ambiguity('L1', axis=axis)
warning_msg = w[0].message.args[0]
- if axis == 0:
+ if axis in {0, 'index'}:
assert warning_msg.startswith("'L1' is both an index level "
"and a column label")
else:
@@ -239,7 +236,7 @@ def test_check_label_or_level_ambiguity_panel_error(panel):
# ===============================
def assert_label_values(frame, labels, axis):
for label in labels:
- if axis == 0:
+ if axis in {0, 'index'}:
expected = frame[label]._values
else:
expected = frame.loc[label]._values
@@ -251,7 +248,7 @@ def assert_label_values(frame, labels, axis):
def assert_level_values(frame, levels, axis):
for level in levels:
- if axis == 0:
+ if axis in {0, 'index'}:
expected = frame.index.get_level_values(level=level)._values
else:
expected = (frame.columns
@@ -264,14 +261,13 @@ def assert_level_values(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
@@ -279,11 +275,10 @@ def test_get_label_or_level_values_df_simple(df_levels, axis):
assert_level_values(df_levels, expected_levels, axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
@@ -300,11 +295,10 @@ def test_get_label_or_level_values_df_ambig(df_ambig, axis):
assert_label_values(df_ambig, ['L3'], axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_duplabels = df_duplabels.T
# df has unambiguous level 'L1'
@@ -314,7 +308,7 @@ def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
assert_label_values(df_duplabels, ['L3'], axis=axis)
# df has duplicate labels 'L2'
- if axis == 0:
+ if axis in {0, 'index'}:
expected_msg = "The column label 'L2' is not unique"
else:
expected_msg = "The index label 'L2' is not unique"
@@ -361,7 +355,7 @@ def assert_labels_dropped(frame, labels, axis):
for label in labels:
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
- if axis == 0:
+ if axis in {0, 'index'}:
assert label in frame.columns
assert label not in df_dropped.columns
else:
@@ -373,7 +367,7 @@ def assert_levels_dropped(frame, levels, axis):
for level in levels:
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
- if axis == 0:
+ if axis in {0, 'index'}:
assert level in frame.index.names
assert level not in df_dropped.index.names
else:
@@ -383,14 +377,13 @@ def assert_levels_dropped(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_drop_labels_or_levels_df(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index b28b9f342695f..b717d75d835d0 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -4,6 +4,7 @@
import pytest
from collections import Counter, defaultdict, OrderedDict
+from itertools import chain
import numpy as np
import pandas as pd
@@ -11,8 +12,10 @@
from pandas import (Index, Series, DataFrame, isna)
from pandas.compat import lrange
from pandas import compat
-from pandas.util.testing import assert_series_equal, assert_frame_equal
+from pandas.util.testing import (assert_series_equal,
+ assert_frame_equal)
import pandas.util.testing as tm
+from pandas.conftest import _get_cython_table_params
from .common import TestData
@@ -331,6 +334,85 @@ def test_non_callable_aggregates(self):
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series(), [
+ ('sum', 0),
+ ('max', np.nan),
+ ('min', np.nan),
+ ('all', True),
+ ('any', False),
+ ('mean', np.nan),
+ ('prod', 1),
+ ('std', np.nan),
+ ('var', np.nan),
+ ('median', np.nan),
+ ]),
+ _get_cython_table_params(Series([np.nan, 1, 2, 3]), [
+ ('sum', 6),
+ ('max', 3),
+ ('min', 1),
+ ('all', True),
+ ('any', True),
+ ('mean', 2),
+ ('prod', 6),
+ ('std', 1),
+ ('var', 1),
+ ('median', 2),
+ ]),
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('sum', 'abc'),
+ ('max', 'c'),
+ ('min', 'a'),
+ ('all', 'c'), # see GH12863
+ ('any', 'a'),
+ ]),
+ ))
+ def test_agg_cython_table(self, series, func, expected):
+ # GH21224
+ # test reducing functions in
+ # pandas.core.base.SelectionMixin._cython_table
+ result = series.agg(func)
+ if tm.is_number(expected):
+ assert np.isclose(result, expected, equal_nan=True)
+ else:
+ assert result == expected
+
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series(), [
+ ('cumprod', Series([], Index([]))),
+ ('cumsum', Series([], Index([]))),
+ ]),
+ _get_cython_table_params(Series([np.nan, 1, 2, 3]), [
+ ('cumprod', Series([np.nan, 1, 2, 6])),
+ ('cumsum', Series([np.nan, 1, 3, 6])),
+ ]),
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('cumsum', Series(['a', 'ab', 'abc'])),
+ ]),
+ ))
+ def test_agg_cython_table_transform(self, series, func, expected):
+ # GH21224
+ # test transforming functions in
+ # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
+ result = series.agg(func)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('mean', TypeError), # mean raises TypeError
+ ('prod', TypeError),
+ ('std', TypeError),
+ ('var', TypeError),
+ ('median', TypeError),
+ ('cumprod', TypeError),
+ ])
+ ))
+ def test_agg_cython_table_raises(self, series, func, expected):
+ # GH21224
+ with pytest.raises(expected):
+ # e.g. Series('a b'.split()).cumprod() will raise
+ series.agg(func)
+
class TestSeriesMap(TestData):
| - [x] closes #16679
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is a splitoff from #21123, to only fix #16679. #19629 will be fixed in a separate PR afterwards.
Passing functions to ``df.agg``, ``df.transform`` and ``df.apply`` may use different methods when ``axis=1``, than when,``axis=0``, and give different results when NaNs are supplied.
Explanation
-------------
Passing the functions in ``SelectionMixin._cython_table`` to ``df.agg`` should defer to use the relevant cython functions. This currently works as expected when ``axis=0``, but not when ``axis=1``.
The reason for this difference is that ``df.aggregate`` currently defers to ``df._aggregate`` when ``axis=0``, but defers to ``df.apply``, when ``axis=1``, and these may give different result when passed functions and the series/frame contains Nan values. I've solved this by transposing df in ``DataFrame._aggragate`` when ``axis=1``, and passing the possibly transposed on to the super method.
Also, ``df.apply`` delegates back to ``df.agg``, when given lists or dicts as inputs, but only works when axis=0. This PR fixes this, so axis=1 works the as axis=0.
The tests have been heavily parametrized, helping ensure that various ways to call the methods now give correct results for both axes.
@WillAyd @jreback (reviewers of #21123) | https://api.github.com/repos/pandas-dev/pandas/pulls/21224 | 2018-05-27T21:40:04Z | 2018-07-28T14:24:49Z | 2018-07-28T14:24:49Z | 2018-09-16T23:20:10Z |
ENH: Merge DataFrame and Series using `on` (GH21220) | diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index d78e476dd7837..98914c13d4d31 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -506,8 +506,8 @@ You can also pass a list of dicts or Series:
.. _merging.join:
-Database-style DataFrame joining/merging
-----------------------------------------
+Database-style DataFrame or named Series joining/merging
+--------------------------------------------------------
pandas has full-featured, **high performance** in-memory join operations
idiomatically very similar to relational databases like SQL. These methods
@@ -522,7 +522,7 @@ Users who are familiar with SQL but new to pandas might be interested in a
:ref:`comparison with SQL<compare_with_sql.join>`.
pandas provides a single function, :func:`~pandas.merge`, as the entry point for
-all standard database join operations between ``DataFrame`` objects:
+all standard database join operations between ``DataFrame`` or named ``Series`` objects:
::
@@ -531,23 +531,23 @@ all standard database join operations between ``DataFrame`` objects:
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None)
-* ``left``: A DataFrame object.
-* ``right``: Another DataFrame object.
+* ``left``: A DataFrame or named Series object.
+* ``right``: Another DataFrame or named Series object.
* ``on``: Column or index level names to join on. Must be found in both the left
- and right DataFrame objects. If not passed and ``left_index`` and
+ and right DataFrame and/or Series objects. If not passed and ``left_index`` and
``right_index`` are ``False``, the intersection of the columns in the
- DataFrames will be inferred to be the join keys.
-* ``left_on``: Columns or index levels from the left DataFrame to use as
+ DataFrames and/or Series will be inferred to be the join keys.
+* ``left_on``: Columns or index levels from the left DataFrame or Series to use as
keys. Can either be column names, index level names, or arrays with length
- equal to the length of the DataFrame.
-* ``right_on``: Columns or index levels from the right DataFrame to use as
+ equal to the length of the DataFrame or Series.
+* ``right_on``: Columns or index levels from the right DataFrame or Series to use as
keys. Can either be column names, index level names, or arrays with length
- equal to the length of the DataFrame.
+ equal to the length of the DataFrame or Series.
* ``left_index``: If ``True``, use the index (row labels) from the left
- DataFrame as its join key(s). In the case of a DataFrame with a MultiIndex
+ DataFrame or Series as its join key(s). In the case of a DataFrame or Series with a MultiIndex
(hierarchical), the number of levels must match the number of join keys
- from the right DataFrame.
-* ``right_index``: Same usage as ``left_index`` for the right DataFrame
+ from the right DataFrame or Series.
+* ``right_index``: Same usage as ``left_index`` for the right DataFrame or Series
* ``how``: One of ``'left'``, ``'right'``, ``'outer'``, ``'inner'``. Defaults
to ``inner``. See below for more detailed description of each method.
* ``sort``: Sort the result DataFrame by the join keys in lexicographical
@@ -555,7 +555,7 @@ all standard database join operations between ``DataFrame`` objects:
substantially in many cases.
* ``suffixes``: A tuple of string suffixes to apply to overlapping
columns. Defaults to ``('_x', '_y')``.
-* ``copy``: Always copy data (default ``True``) from the passed DataFrame
+* ``copy``: Always copy data (default ``True``) from the passed DataFrame or named Series
objects, even when reindexing is not necessary. Cannot be avoided in many
cases but may improve performance / memory usage. The cases where copying
can be avoided are somewhat pathological but this option is provided
@@ -563,8 +563,8 @@ all standard database join operations between ``DataFrame`` objects:
* ``indicator``: Add a column to the output DataFrame called ``_merge``
with information on the source of each row. ``_merge`` is Categorical-type
and takes on a value of ``left_only`` for observations whose merge key
- only appears in ``'left'`` DataFrame, ``right_only`` for observations whose
- merge key only appears in ``'right'`` DataFrame, and ``both`` if the
+ only appears in ``'left'`` DataFrame or Series, ``right_only`` for observations whose
+ merge key only appears in ``'right'`` DataFrame or Series, and ``both`` if the
observation's merge key is found in both.
* ``validate`` : string, default None.
@@ -584,10 +584,10 @@ all standard database join operations between ``DataFrame`` objects:
Support for specifying index levels as the ``on``, ``left_on``, and
``right_on`` parameters was added in version 0.23.0.
+ Support for merging named ``Series`` objects was added in version 0.24.0.
-The return type will be the same as ``left``. If ``left`` is a ``DataFrame``
-and ``right`` is a subclass of DataFrame, the return type will still be
-``DataFrame``.
+The return type will be the same as ``left``. If ``left`` is a ``DataFrame`` or named ``Series``
+and ``right`` is a subclass of ``DataFrame``, the return type will still be ``DataFrame``.
``merge`` is a function in the pandas namespace, and it is also available as a
``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b015495b095b6..769bda992956b 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -12,6 +12,7 @@ v0.24.0 (Month XX, 2018)
New features
~~~~~~~~~~~~
+- :func:`merge` now directly allows merge between objects of type ``DataFrame`` and named ``Series``, without the need to convert the ``Series`` object into a ``DataFrame`` beforehand (:issue:`21220`)
- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4578d2ac08199..873170eb9813b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -137,8 +137,8 @@
"""
_merge_doc = """
-Merge DataFrame objects by performing a database-style join operation by
-columns or indexes.
+Merge DataFrame or named Series objects by performing a database-style join
+operation by columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
@@ -146,7 +146,7 @@
Parameters
----------%s
-right : DataFrame, Series or dict
+right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
@@ -217,6 +217,7 @@
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
+Support for merging named Series objects was added in version 0.24.0
See Also
--------
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 25d8cb4e804a2..caaeb1bad2358 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -11,7 +11,7 @@
import pandas.compat as compat
from pandas import (Categorical, DataFrame,
- Index, MultiIndex, Timedelta)
+ Index, MultiIndex, Timedelta, Series)
from pandas.core.arrays.categorical import _recode_for_categories
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
@@ -493,6 +493,8 @@ def __init__(self, left, right, how='inner', on=None,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
+ left = validate_operand(left)
+ right = validate_operand(right)
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
@@ -519,13 +521,6 @@ def __init__(self, left, right, how='inner', on=None,
raise ValueError(
'indicator option can only accept boolean or string arguments')
- if not isinstance(left, DataFrame):
- raise ValueError('can not merge DataFrame with instance of '
- 'type {left}'.format(left=type(left)))
- if not isinstance(right, DataFrame):
- raise ValueError('can not merge DataFrame with instance of '
- 'type {right}'.format(right=type(right)))
-
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
@@ -1645,3 +1640,16 @@ def _should_fill(lname, rname):
def _any(x):
return x is not None and com._any_not_none(*x)
+
+
+def validate_operand(obj):
+ if isinstance(obj, DataFrame):
+ return obj
+ elif isinstance(obj, Series):
+ if obj.name is None:
+ raise ValueError('Cannot merge a Series without a name')
+ else:
+ return obj.to_frame()
+ else:
+ raise TypeError('Can only merge Series or DataFrame objects, '
+ 'a {obj} was passed'.format(obj=type(obj)))
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 1b8f3632d381c..09f511886583c 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -228,16 +228,18 @@ def test_join_on_fails_with_different_column_counts(self):
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
- def test_join_on_fails_with_wrong_object_type(self):
- # GH12081
- wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
- df = DataFrame({'a': [1, 1]})
+ @pytest.mark.parametrize("wrong_type", [2, 'str', None, np.array([0, 1])])
+ def test_join_on_fails_with_wrong_object_type(self, wrong_type):
+ # GH12081 - original issue
+
+ # GH21220 - merging of Series and DataFrame is now allowed
+ # Edited test to remove the Series object from test parameters
- for obj in wrongly_typed:
- with tm.assert_raises_regex(ValueError, str(type(obj))):
- merge(obj, df, left_on='a', right_on='a')
- with tm.assert_raises_regex(ValueError, str(type(obj))):
- merge(df, obj, left_on='a', right_on='a')
+ df = DataFrame({'a': [1, 1]})
+ with tm.assert_raises_regex(TypeError, str(type(wrong_type))):
+ merge(wrong_type, df, left_on='a', right_on='a')
+ with tm.assert_raises_regex(TypeError, str(type(wrong_type))):
+ merge(df, wrong_type, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 037bd9cc7cd18..42df4511578f1 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1887,3 +1887,33 @@ def test_merge_index_types(index):
OrderedDict([('left_data', [1, 2]), ('right_data', [1.0, 2.0])]),
index=index)
assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("on,left_on,right_on,left_index,right_index,nms,nm", [
+ (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], 'B'),
+ (None, None, None, True, True, ['outer', 'inner'], 'B'),
+ (None, ['outer', 'inner'], None, False, True, None, 'B'),
+ (None, None, ['outer', 'inner'], True, False, None, 'B'),
+ (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], None),
+ (None, None, None, True, True, ['outer', 'inner'], None),
+ (None, ['outer', 'inner'], None, False, True, None, None),
+ (None, None, ['outer', 'inner'], True, False, None, None)])
+def test_merge_series(on, left_on, right_on, left_index, right_index, nms, nm):
+ # GH 21220
+ a = pd.DataFrame({"A": [1, 2, 3, 4]},
+ index=pd.MultiIndex.from_product([['a', 'b'], [0, 1]],
+ names=['outer', 'inner']))
+ b = pd.Series([1, 2, 3, 4],
+ index=pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
+ names=['outer', 'inner']), name=nm)
+ expected = pd.DataFrame({"A": [2, 4], "B": [1, 3]},
+ index=pd.MultiIndex.from_product([['a', 'b'], [1]],
+ names=nms))
+ if nm is not None:
+ result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
+ left_index=left_index, right_index=right_index)
+ tm.assert_frame_equal(result, expected)
+ else:
+ with tm.assert_raises_regex(ValueError, 'a Series without a name'):
+ result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
+ left_index=left_index, right_index=right_index)
| - [x] closes #21220
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21223 | 2018-05-27T15:08:47Z | 2018-07-23T17:02:14Z | 2018-07-23T17:02:14Z | 2018-07-23T17:03:41Z |
BUG: df.agg(sum, axis=1) uses different method than when axis=0 | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 5a553264e828b..6bad62e8916e9 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -94,3 +94,8 @@ Categorical
^^^^^^^^^^^
-
+
+Numeric
+^^^^^^^
+
+- :meth:`~DataFrame.agg` now correctly handles built-in methods like ``sum`` when axis=1 (:issue:`21134`)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b09cb872a12fb..3eda078a802f4 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -149,3 +149,20 @@ def tz_aware_fixture(request):
Fixture for trying explicit timezones: {0}
"""
return request.param
+
+
+@pytest.fixture(
+ # params: Python 3.5 randomizes dict access and xdist doesn't like that
+ # in fixtures. In order to get predetermined values we need to sort
+ # the list deterministically
+ # GH 21123
+ params=list(sorted(pd.core.base.SelectionMixin._cython_table.items(),
+ key=lambda x: x[0].__name__)),
+ ids=lambda x: "({}-{!r})_fixture".format(x[0].__name__, x[1]),
+)
+def cython_table_items(request):
+ """
+ Fixture for returning the items in
+ pandas.core.base.SelectionMixin._cython_table
+ """
+ return request.param
diff --git a/pandas/core/base.py b/pandas/core/base.py
index c331ead8d2fef..57c0a750ab4f5 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -316,13 +316,14 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs):
raise ValueError("{arg} is an unknown string function".format(arg=arg))
- def _aggregate(self, arg, *args, **kwargs):
+ def _aggregate(self, arg, axis=0, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
+ axis : int
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
@@ -335,17 +336,18 @@ def _aggregate(self, arg, *args, **kwargs):
how can be a string describe the required post-processing, or
None if not required
"""
+ obj = self if axis == 0 else self.T
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
- _axis = getattr(self, 'axis', 0)
+ _axis = getattr(obj, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
- return self._try_aggregate_string_function(arg, *args,
- **kwargs), None
+ return obj._try_aggregate_string_function(arg, *args,
+ **kwargs), None
if isinstance(arg, dict):
@@ -353,7 +355,7 @@ def _aggregate(self, arg, *args, **kwargs):
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
- obj = self._selected_obj
+ selected_obj = obj._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
@@ -388,16 +390,16 @@ def nested_renaming_depr(level=4):
if isinstance(v, dict):
is_nested_renamer = True
- if k not in obj.columns:
+ if k not in selected_obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
- elif isinstance(obj, ABCSeries):
+ elif isinstance(selected_obj, ABCSeries):
nested_renaming_depr()
- elif isinstance(obj, ABCDataFrame) and \
- k not in obj.columns:
+ elif isinstance(selected_obj, ABCDataFrame) and \
+ k not in selected_obj.columns:
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
@@ -407,8 +409,8 @@ def nested_renaming_depr(level=4):
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
- if (isinstance(obj, ABCDataFrame) and
- len(obj.columns.intersection(keys)) != len(keys)):
+ if (isinstance(selected_obj, ABCDataFrame) and len(
+ selected_obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
@@ -417,7 +419,7 @@ def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
- colg = self._gotitem(name, ndim=1, subset=subset)
+ colg = obj._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
@@ -427,8 +429,8 @@ def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
- colg = self._gotitem(self._selection, ndim=2,
- subset=obj)
+ colg = obj._gotitem(obj._selection, ndim=2,
+ subset=selected_obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
@@ -458,20 +460,22 @@ def _agg(arg, func):
else:
- if self._selection is not None:
+ if obj._selection is not None:
keys = None
# some selection on the object
- elif self._selection is not None:
+ elif obj._selection is not None:
- sl = set(self._selection_list)
+ sl = set(obj._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
- result = _agg(arg, lambda fname,
- agg_how: _agg_1dim(self._selection, agg_how))
+ result = _agg(
+ arg,
+ lambda fname, agg_how: _agg_1dim(
+ obj._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
@@ -516,7 +520,7 @@ def is_any_frame():
return concat([result[k] for k in keys],
keys=keys, axis=1), True
- elif isinstance(self, ABCSeries) and is_any_series():
+ elif isinstance(obj, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
@@ -541,20 +545,20 @@ def is_any_frame():
# we have a dict of scalars
result = Series(result,
- name=getattr(self, 'name', None))
+ name=getattr(obj, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
- return self._aggregate_multiple_funcs(arg,
- _level=_level,
- _axis=_axis), None
+ return obj._aggregate_multiple_funcs(arg,
+ _level=_level,
+ _axis=_axis), None
else:
result = None
- f = self._is_cython_func(arg)
- if f and not args and not kwargs:
- return getattr(self, f)(), None
+ f = obj._is_cython_func(arg)
+ if f is not None:
+ return getattr(obj, f)(*args, **kwargs), None
# caller can react
return result, True
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b6c33b4f79478..c515b13aaac82 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5818,13 +5818,11 @@ def _gotitem(self,
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
- # TODO: flipped axis
result = None
- if axis == 0:
- try:
- result, how = self._aggregate(func, axis=0, *args, **kwargs)
- except TypeError:
- pass
+ try:
+ result, how = self._aggregate(func, axis=axis, *args, **kwargs)
+ except TypeError:
+ pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index df7a5dc9dc173..616345dde2d2f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -4086,7 +4086,10 @@ def _post_process_cython_aggregate(self, obj):
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
- result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
+ _agg_kwargs = kwargs.copy()
+ axis = _agg_kwargs.pop('axis', 0)
+ result, how = self._aggregate(arg, axis, _level=_level,
+ *args, **_agg_kwargs)
if how is None:
return result
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index dfb2961befe35..2c05239b76675 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -1056,3 +1056,72 @@ def test_non_callable_aggregates(self):
expected = df.size
assert result == expected
+
+ @pytest.mark.parametrize("frame, expected_dict", [
+ [DataFrame(), {
+ 'sum': Series(),
+ 'max': Series(),
+ 'min': Series(),
+ 'all': Series(dtype=bool),
+ 'any': Series(dtype=bool),
+ 'mean': Series(),
+ 'prod': Series(),
+ 'std': Series(),
+ 'var': Series(),
+ 'median': Series(),
+ 'cumprod': DataFrame(),
+ 'cumsum': DataFrame(),
+ }],
+ [DataFrame([[np.nan, 1], [1, 2]]), {
+ 'sum': Series([1., 3]),
+ 'max': Series([1., 2]),
+ 'min': Series([1., 1]),
+ 'all': Series([True, True]),
+ 'any': Series([True, True]),
+ 'mean': Series([1, 1.5]),
+ 'prod': Series([1., 2]),
+ 'std': Series([np.nan, 0.707107]),
+ 'var': Series([np.nan, 0.5]),
+ 'median': Series([1, 1.5]),
+ 'cumprod': DataFrame([[np.nan, 1], [1., 2.]]),
+ 'cumsum': DataFrame([[np.nan, 1], [1., 3.]]),
+ }],
+ [DataFrame([['a', 'b'], ['b', 'a']]), {
+ 'sum': Series(['ab', 'ba']),
+ 'max': Series(['b', 'b']),
+ 'min': Series(['a', 'a']),
+ 'all': Series([True, True]),
+ 'any': Series([True, True]),
+ 'mean': Series([], index=pd.Index([], dtype='int64')),
+ 'prod': Series([], index=pd.Index([], dtype='int64')),
+ 'std': Series([], index=pd.Index([], dtype='int64')),
+ 'var': Series([], index=pd.Index([], dtype='int64')),
+ 'median': Series([], index=pd.Index([], dtype='int64')),
+ 'cumprod': TypeError,
+ 'cumsum': DataFrame([['a', 'b'], ['ab', 'ba']]),
+ }],
+ ])
+ @pytest.mark.parametrize("axis", [0, 1], ids=lambda x: "axis {}".format(x))
+ def test_agg_function_input(self, cython_table_items,
+ frame, expected_dict, axis):
+ # GH21123
+ # test if using items in _cython_table gives correct results
+ np_func, str_func = cython_table_items
+ expected = expected_dict[str_func]
+
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ with pytest.raises(expected):
+ # e.g. DataFrame(['a b'.split()]).cumprod() will raise
+ frame.agg(np_func, axis=axis)
+ with pytest.raises(expected):
+ frame.agg(str_func, axis=axis)
+ return
+
+ result = frame.agg(np_func, axis=axis)
+ result_str_func = frame.agg(str_func, axis=axis)
+ if str_func in ('cumprod', 'cumsum'):
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result_str_func, expected)
+ else:
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result_str_func, expected)
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index b28b9f342695f..27a7f08116f37 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -331,6 +331,76 @@ def test_non_callable_aggregates(self):
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
+ @pytest.mark.parametrize("series, expected_dict", [
+ [Series(), {
+ 'sum': 0,
+ 'max': np.nan,
+ 'min': np.nan,
+ 'all': True,
+ 'any': False,
+ 'mean': np.nan,
+ 'prod': 1,
+ 'std': np.nan,
+ 'var': np.nan,
+ 'median': np.nan,
+ 'cumprod': Series([], Index([])),
+ 'cumsum': Series([], Index([])),
+ }],
+ [Series([np.nan, 1, 2, 3]), {
+ 'sum': 6,
+ 'max': 3,
+ 'min': 1,
+ 'all': True,
+ 'any': True,
+ 'mean': 2,
+ 'prod': 6,
+ 'std': 1,
+ 'var': 1,
+ 'median': 2,
+ 'cumprod': Series([np.nan, 1, 2, 6]),
+ 'cumsum': Series([np.nan, 1, 3, 6]),
+ }],
+ [Series('a b c'.split()), {
+ 'sum': 'abc',
+ 'max': 'c',
+ 'min': 'a',
+ 'all': 'c', # see GH12863
+ 'any': 'a',
+ 'mean': TypeError, # mean raises TypeError
+ 'prod': TypeError,
+ 'std': TypeError,
+ 'var': TypeError,
+ 'median': TypeError,
+ 'cumprod': TypeError,
+ 'cumsum': Series(['a', 'ab', 'abc']),
+ }],
+ ])
+ def test_agg_cython_table_input(self, cython_table_items,
+ series, expected_dict):
+ # GH21123
+ # test if using items in _cython_table gives correct results
+ np_func, str_func = cython_table_items
+ expected = expected_dict[str_func]
+
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ with pytest.raises(expected):
+ series.agg(np_func)
+ with pytest.raises(expected):
+ series.agg(str_func)
+ return
+
+ result = series.agg(np_func)
+ result_str_func = series.agg(str_func)
+ if str_func in ('cumprod', 'cumsum'):
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result_str_func, expected)
+ elif tm.is_number(expected):
+ assert np.isclose(result, expected, equal_nan=True)
+ assert np.isclose(result_str_func, expected, equal_nan=True)
+ else:
+ assert result == expected
+ assert result_str_func == expected
+
class TestSeriesMap(TestData):
| - [x] closes #21134
- [x] xref #21123
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is a splitoff from #21123, to only fix #21134. #19629 will be fixed in a separate PR afterwards.
Passing builtins to ``df.agg`` is ok when ``axis=0``, but can give wrong result, when ``axis=1`` when NaNs are supplied.
Explanation
-------------
Passing the functions in ``SelectionMixin._cython_table`` to ``df.agg`` should defer to use the relevant cython functions. This currently works as expected when ``axis=0``, but not always when ``axis=1``.
The reason for this difference is that ``df.aggregate`` currently defers to ``df._aggregate`` when ``axis=0``, but defers to ``df.apply``, when ``axis=1``, and these give different result when passed funcions and the series/frame contains Nan values. I've solved this by transposing df in ``_aggragate`` when ``axis=1``.
The tests have been heavily parametrized, helping ensure that the various ways to call ``df.agg`` now give correct result. | https://api.github.com/repos/pandas-dev/pandas/pulls/21222 | 2018-05-27T09:57:24Z | 2018-05-27T10:51:20Z | null | 2018-05-27T21:32:26Z |
DOC: correct header line in v0.24.0 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b94377af770f4..43e513c9d03f5 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1,7 +1,7 @@
.. _whatsnew_0240:
v0.24.0
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------
.. _whatsnew_0240.enhancements:
| Small change, but I *think* that this was the cause of a bunch of warnings in the doc build | https://api.github.com/repos/pandas-dev/pandas/pulls/21218 | 2018-05-26T10:43:24Z | 2018-05-26T13:03:14Z | 2018-05-26T13:03:14Z | 2018-05-26T13:11:17Z |
CI: revert skip of geopandas downstream test | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index c28e2052bd93e..9e46084898b57 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -96,7 +96,6 @@ def test_pandas_datareader():
'F', 'quandl', '2017-01-01', '2017-02-01')
-@pytest.mark.xfail(reaason="downstream install issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
| xref https://github.com/pandas-dev/pandas/pull/21204 | https://api.github.com/repos/pandas-dev/pandas/pulls/21217 | 2018-05-26T09:47:46Z | 2018-06-04T09:53:07Z | 2018-06-04T09:53:07Z | 2018-06-12T16:30:34Z |
API/BUG: DatetimeIndex correctly localizes integer data | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 6d5e40d37c8df..c29197725a2b6 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -36,7 +36,7 @@ Datetimelike API Changes
Other API Changes
^^^^^^^^^^^^^^^^^
--
+- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`)
-
-
@@ -92,7 +92,7 @@ Datetimelike
^^^^^^^^^^^^
- Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`)
--
+- Bug in :class:`Index` with ``datetime64[ns, tz]`` dtype that did not localize integer data correctly (:issue:`20964`)
-
Timedelta
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index d9e4ef7db1158..36345a32a3bf7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1175,6 +1175,10 @@ def astype(self, dtype, copy=True):
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
+ if is_datetime64tz_dtype(dtype):
+ from pandas.core.indexes.datetimes import DatetimeIndex
+ return DatetimeIndex(self.values, name=self.name, dtype=dtype,
+ copy=copy)
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 66622814f172d..e944df7aa83c6 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -395,57 +395,43 @@ def __new__(cls, data=None,
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
- is_integer_dtype(data)):
+ is_integer_dtype(data) or lib.infer_dtype(data) == 'integer'):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
- if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
-
- if isinstance(data, DatetimeIndex):
- if tz is None:
- tz = data.tz
- elif data.tz is None:
- data = data.tz_localize(tz, ambiguous=ambiguous)
- else:
- # the tz's must match
- if str(tz) != str(data.tz):
- msg = ('data is already tz-aware {0}, unable to '
- 'set specified tz: {1}')
- raise TypeError(msg.format(data.tz, tz))
+ if isinstance(data, DatetimeIndex):
+ if tz is None:
+ tz = data.tz
+ elif data.tz is None:
+ data = data.tz_localize(tz, ambiguous=ambiguous)
+ else:
+ # the tz's must match
+ if str(tz) != str(data.tz):
+ msg = ('data is already tz-aware {0}, unable to '
+ 'set specified tz: {1}')
+ raise TypeError(msg.format(data.tz, tz))
- subarr = data.values
+ subarr = data.values
- if freq is None:
- freq = data.freq
- verify_integrity = False
- else:
- if data.dtype != _NS_DTYPE:
- subarr = conversion.ensure_datetime64ns(data)
- else:
- subarr = data
+ if freq is None:
+ freq = data.freq
+ verify_integrity = False
+ elif issubclass(data.dtype.type, np.datetime64):
+ if data.dtype != _NS_DTYPE:
+ data = conversion.ensure_datetime64ns(data)
+ if tz is not None:
+ # Convert tz-naive to UTC
+ tz = timezones.maybe_get_tz(tz)
+ data = conversion.tz_localize_to_utc(data.view('i8'), tz,
+ ambiguous=ambiguous)
+ subarr = data.view(_NS_DTYPE)
else:
# must be integer dtype otherwise
- if isinstance(data, Int64Index):
- raise TypeError('cannot convert Int64Index->DatetimeIndex')
+ # assume this data are epoch timestamps
if data.dtype != _INT64_DTYPE:
- data = data.astype(np.int64)
+ data = data.astype(np.int64, copy=False)
subarr = data.view(_NS_DTYPE)
- if isinstance(subarr, DatetimeIndex):
- if tz is None:
- tz = subarr.tz
- else:
- if tz is not None:
- tz = timezones.maybe_get_tz(tz)
-
- if (not isinstance(data, DatetimeIndex) or
- getattr(data, 'tz', None) is None):
- # Convert tz-naive to UTC
- ints = subarr.view('i8')
- subarr = conversion.tz_localize_to_utc(ints, tz,
- ambiguous=ambiguous)
- subarr = subarr.view(_NS_DTYPE)
-
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
@@ -807,8 +793,9 @@ def _mpl_repr(self):
@cache_readonly
def _is_dates_only(self):
+ """Return a boolean if we are only dates (and don't have a timezone)"""
from pandas.io.formats.format import _is_dates_only
- return _is_dates_only(self.values)
+ return _is_dates_only(self.values) and self.tz is None
@property
def _formatter_func(self):
@@ -1244,7 +1231,7 @@ def join(self, other, how='left', level=None, return_indexers=False,
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
- other.inferred_type not in ('floating', 'mixed-integer',
+ other.inferred_type not in ('floating', 'integer', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
@@ -2100,8 +2087,9 @@ def normalize(self):
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = conversion.date_normalize(self.asi8, self.tz)
- return DatetimeIndex(new_values, freq='infer', name=self.name,
- tz=self.tz)
+ return DatetimeIndex(new_values,
+ freq='infer',
+ name=self.name).tz_localize(self.tz)
@Substitution(klass='DatetimeIndex')
@Appender(_shared_docs['searchsorted'])
@@ -2182,8 +2170,6 @@ def insert(self, loc, item):
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
- if self.tz is not None:
- new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
@@ -2221,8 +2207,6 @@ def delete(self, loc):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
- if self.tz is not None:
- new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 8acdd301f241a..64b8f48f6a4e1 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -225,6 +225,16 @@ def _check_rng(rng):
_check_rng(rng_eastern)
_check_rng(rng_utc)
+ @pytest.mark.parametrize('tz, dtype', [
+ ['US/Pacific', 'datetime64[ns, US/Pacific]'],
+ [None, 'datetime64[ns]']])
+ def test_integer_index_astype_datetime(self, tz, dtype):
+ # GH 20997, 20964
+ val = [pd.Timestamp('2018-01-01', tz=tz).value]
+ result = pd.Index(val).astype(dtype)
+ expected = pd.DatetimeIndex(['2018-01-01'], tz=tz)
+ tm.assert_index_equal(result, expected)
+
class TestToPeriod(object):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index b138b79caac76..f7682a965c038 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -1,8 +1,10 @@
-import pytest
+from datetime import timedelta
+from operator import attrgetter
+from functools import partial
+import pytest
import pytz
import numpy as np
-from datetime import timedelta
import pandas as pd
from pandas import offsets
@@ -26,25 +28,28 @@ def test_construction_caching(self):
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
- def test_construction_with_alt(self):
-
- i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
- i2 = DatetimeIndex(i, dtype=i.dtype)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(
- i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
+ @pytest.mark.parametrize('kwargs', [
+ {'tz': 'dtype.tz'},
+ {'dtype': 'dtype'},
+ {'dtype': 'dtype', 'tz': 'dtype.tz'}])
+ def test_construction_with_alt(self, kwargs, tz_aware_fixture):
+ tz = tz_aware_fixture
+ i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
+ kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
+ result = DatetimeIndex(i, **kwargs)
+ tm.assert_index_equal(i, result)
+
+ @pytest.mark.parametrize('kwargs', [
+ {'tz': 'dtype.tz'},
+ {'dtype': 'dtype'},
+ {'dtype': 'dtype', 'tz': 'dtype.tz'}])
+ def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
+ tz = tz_aware_fixture
+ i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
+ kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
+ result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
+ expected = i.tz_localize(None).tz_localize('UTC').tz_convert(tz)
+ tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
@@ -478,6 +483,19 @@ def test_constructor_timestamp_near_dst(self):
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize('klass', [Index, DatetimeIndex])
+ @pytest.mark.parametrize('box', [
+ np.array, partial(np.array, dtype=object), list])
+ @pytest.mark.parametrize('tz, dtype', [
+ ['US/Pacific', 'datetime64[ns, US/Pacific]'],
+ [None, 'datetime64[ns]']])
+ def test_constructor_with_int_tz(self, klass, box, tz, dtype):
+ # GH 20997, 20964
+ ts = Timestamp('2018-01-01', tz=tz)
+ result = klass(box([ts.value]), dtype=dtype)
+ expected = klass([ts])
+ assert result == expected
+
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index c264f5f79e47e..b8bd218ec25ab 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -402,26 +402,33 @@ def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
- @pytest.mark.parametrize("values", [
- # pass values without timezone, as DatetimeIndex localizes it
- pd.date_range('2011-01-01', periods=5).values,
- pd.date_range('2011-01-01', periods=5).asi8])
+ @pytest.mark.parametrize("attr, utc", [
+ ['values', False],
+ ['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
- def test_constructor_dtypes_datetime(self, tz_naive_fixture, values,
+ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
- index = pd.date_range('2011-01-01', periods=5, tz=tz_naive_fixture)
+ # Test constructing with a datetimetz dtype
+ # .values produces numpy datetimes, so these are considered naive
+ # .asi8 produces integers, so these are considered epoch timestamps
+ index = pd.date_range('2011-01-01', periods=5)
+ arg = getattr(index, attr)
+ if utc:
+ index = index.tz_localize('UTC').tz_convert(tz_naive_fixture)
+ else:
+ index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
- result = klass(values, tz=tz_naive_fixture)
+ result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
- result = klass(values, dtype=dtype)
+ result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
- result = klass(list(values), tz=tz_naive_fixture)
+ result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
- result = klass(list(values), dtype=dtype)
+ result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
| closes #20997
closes #20964
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I did a little refactoring of `Datetime.__new__` that converts passed data but made it easier to fix this issue. I had to modify some methods (join, normalize, delete, insert) and tests that weren't assuming integer data weren't epoch timestamps. | https://api.github.com/repos/pandas-dev/pandas/pulls/21216 | 2018-05-26T05:39:36Z | 2018-06-14T10:05:29Z | 2018-06-14T10:05:28Z | 2018-06-14T17:28:21Z |
BUG: Categorical.fillna with iterables | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f33f03a3eb500 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -66,6 +66,7 @@ Categorical
^^^^^^^^^^^
- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index abcb9ae3494b5..a1a8f098b582e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -12,6 +12,7 @@
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
@@ -1751,7 +1752,7 @@ def fillna(self, value=None, method=None, limit=None):
values[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
- elif is_scalar(value):
+ elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py
index 5133c97d8b590..c78f02245a5b4 100644
--- a/pandas/tests/categorical/test_missing.py
+++ b/pandas/tests/categorical/test_missing.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import collections
+
import numpy as np
import pytest
@@ -68,3 +70,16 @@ def test_fillna_raises(self, fillna_kwargs, msg):
with tm.assert_raises_regex(ValueError, msg):
cat.fillna(**fillna_kwargs)
+
+ @pytest.mark.parametrize("named", [True, False])
+ def test_fillna_iterable_category(self, named):
+ # https://github.com/pandas-dev/pandas/issues/21097
+ if named:
+ Point = collections.namedtuple("Point", "x y")
+ else:
+ Point = lambda *args: args # tuple
+ cat = Categorical([Point(0, 0), Point(0, 1), None])
+ result = cat.fillna(Point(0, 0))
+ expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
+
+ tm.assert_categorical_equal(result, expected)
| Closes https://github.com/pandas-dev/pandas/issues/19788
Closes https://github.com/pandas-dev/pandas/issues/21097
Note that `Series.fillna` still doesn't allow iterables. I'm not sure that we should allow that, as it's potentially confusing what that would do. | https://api.github.com/repos/pandas-dev/pandas/pulls/21215 | 2018-05-26T03:06:27Z | 2018-05-28T21:57:57Z | 2018-05-28T21:57:57Z | 2018-06-12T14:27:45Z |
DOC: Add numeric_only to DataFrame.quantile | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d4ce8dc166b09..22677b19192e1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7089,6 +7089,9 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ numeric_only : boolean, default True
+ If False, the quantile of datetime and timedelta data will be
+ computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
@@ -7116,7 +7119,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
- columns=['a', 'b'])
+ columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
@@ -7126,6 +7129,20 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0.1 1.3 3.7
0.5 2.5 55.0
+ Specifying `numeric_only=False` will also compute the quantile of
+ datetime and timedelta data.
+
+ >>> df = pd.DataFrame({'A': [1, 2],
+ 'B': [pd.Timestamp('2010'),
+ pd.Timestamp('2011')],
+ 'C': [pd.Timedelta('1 days'),
+ pd.Timedelta('2 days')]})
+ >>> df.quantile(0.5, numeric_only=False)
+ A 1.5
+ B 2010-07-02 12:00:00
+ C 1 days 12:00:00
+ Name: 0.5, dtype: object
+
See Also
--------
pandas.core.window.Rolling.quantile
| - [x] closes #18608
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
My confusion with #18608 was primarily due to the `numeric_only` not being specified in the docs. Additional we do have tests covering this case in `pandas/tests/frame/test_quantile.py` | https://api.github.com/repos/pandas-dev/pandas/pulls/21214 | 2018-05-26T02:36:46Z | 2018-05-29T01:07:45Z | 2018-05-29T01:07:45Z | 2018-06-08T17:17:41Z |
implement arith ops on pd.Categorical | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index abcb9ae3494b5..9b9f60ca14507 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1,4 +1,5 @@
# pylint: disable=E1101,W0232
+import operator
import numpy as np
from warnings import warn
@@ -60,6 +61,21 @@
Use 'allow_fill=False' to accept the new behavior.""")
+def _cat_arithmetic_op(op, reverse=False):
+ # arithmetic operations are disabled
+ def func(self, other):
+ from pandas.core.ops import _get_opstr
+ str_rep = _get_opstr(op, self.__class__)
+ raise TypeError("{typ} cannot perform the operation "
+ "{op}".format(typ=type(self).__name__, op=str_rep))
+
+ opname = op.__name__
+ if reverse:
+ opname = '__r' + opname[2:]
+ func.__name__ = opname
+ return func
+
+
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
@@ -1203,6 +1219,24 @@ def map(self, mapper):
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
+ __add__ = _cat_arithmetic_op(operator.add)
+ __radd__ = _cat_arithmetic_op(operator.add, True)
+ __sub__ = _cat_arithmetic_op(operator.sub)
+ __rsub__ = _cat_arithmetic_op(operator.sub, True)
+ __mul__ = _cat_arithmetic_op(operator.mul)
+ __rmul__ = _cat_arithmetic_op(operator.mul, True)
+ __truediv__ = _cat_arithmetic_op(operator.truediv)
+ __rtruediv__ = _cat_arithmetic_op(operator.truediv, True)
+ __floordiv__ = _cat_arithmetic_op(operator.floordiv)
+ __rfloordiv__ = _cat_arithmetic_op(operator.floordiv, True)
+ __mod__ = _cat_arithmetic_op(operator.mod)
+ __rmod__ = _cat_arithmetic_op(operator.mod, True)
+ __pow__ = _cat_arithmetic_op(operator.pow)
+ __rpow__ = _cat_arithmetic_op(operator.pow, True)
+ if compat.PY2:
+ __div__ = __truediv__
+ __rdiv__ = __rtruediv__
+
# for Series/ndarray like compat
@property
def shape(self):
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index e14f82906cd06..3968fd6867478 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1055,8 +1055,11 @@ def wrapper(left, right):
dtype=result.dtype)
elif is_categorical_dtype(left):
- raise TypeError("{typ} cannot perform the operation "
- "{op}".format(typ=type(left).__name__, op=str_rep))
+ # raises TypeError
+ result = dispatch_to_index_op(op, left, right, pd.Categorical)
+ return construct_result(left, result,
+ index=left.index, name=res_name,
+ dtype=result.dtype)
lvalues = left.values
rvalues = right
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Then in core.ops we dispatch to pd.Categorical instead of special-casing. | https://api.github.com/repos/pandas-dev/pandas/pulls/21213 | 2018-05-25T22:13:32Z | 2018-05-29T00:56:42Z | null | 2020-04-05T17:41:43Z |
Stable Sorting Algorithm for Fillna Indexer | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f400de5f1ad02 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -52,6 +52,7 @@ Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
+- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
Strings
^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 43afd1e0f5969..a6dbaff17e543 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -297,7 +297,8 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
# Make sure all arrays are the same size
assert N == len(labels) == len(mask)
- sorted_labels = np.argsort(labels).astype(np.int64, copy=False)
+ sorted_labels = np.argsort(labels, kind='mergesort').astype(
+ np.int64, copy=False)
if direction == 'bfill':
sorted_labels = sorted_labels[::-1]
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 626057c1ea760..7fccf1f57a886 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -721,6 +721,23 @@ def interweave(list_obj):
assert_frame_equal(result, exp)
+@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
+def test_pad_stable_sorting(fill_method):
+ # GH 21207
+ x = [0] * 20
+ y = [np.nan] * 10 + [1] * 10
+
+ if fill_method == 'bfill':
+ y = y[::-1]
+
+ df = pd.DataFrame({'x': x, 'y': y})
+ expected = df.copy()
+
+ result = getattr(df.groupby('x'), fill_method)()
+
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
| - [X] closes #21207
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Haven't added a whatsnew yet if only because I need to think through the best way to phrase this / investigate where it doesn't work. I have a feeling the default `kind` argument has a bug in it back in NumPy, but `mergesort` is guaranteed as a stable sort so may be preferable anyway in spite of the performance hit.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.html#numpy.sort
Will post ASVs later as well - let me know of feedback in the interim | https://api.github.com/repos/pandas-dev/pandas/pulls/21212 | 2018-05-25T21:58:56Z | 2018-05-29T01:20:39Z | 2018-05-29T01:20:39Z | 2018-06-08T17:18:03Z |
DOC: minor documentation fix on frame.dropna | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1d8f225bd4342..d4ce8dc166b09 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4177,8 +4177,9 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
- .. deprecated:: 0.23.0: Pass tuple or list to drop on multiple
- axes.
+ .. deprecated:: 0.23.0
+ Pass tuple or list to drop on multiple axes.
+
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
| - [x] closes #21209
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
After this PR, it should look like this:
<img width="749" alt="pr" src="https://user-images.githubusercontent.com/14131823/40563992-e1536170-605e-11e8-8fd7-13bc1e0981d0.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/21210 | 2018-05-25T20:00:12Z | 2018-05-25T21:06:09Z | 2018-05-25T21:06:09Z | 2018-05-25T21:07:06Z |
CI: use latest deps for pandas-datareader, python-dateutil | diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
index fe057e714761e..006276ba1a65f 100644
--- a/ci/travis-36.yaml
+++ b/ci/travis-36.yaml
@@ -18,12 +18,10 @@ dependencies:
- numexpr
- numpy
- openpyxl
- - pandas-datareader
- psycopg2
- pyarrow
- pymysql
- pytables
- - python-dateutil
- python-snappy
- python=3.6*
- pytz
@@ -45,3 +43,5 @@ dependencies:
- pip:
- brotlipy
- coverage
+ - pandas-datareader
+ - python-dateutil
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index a595d9f18d6b8..c28e2052bd93e 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -87,6 +87,7 @@ def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
+@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
@@ -95,6 +96,7 @@ def test_pandas_datareader():
'F', 'quandl', '2017-01-01', '2017-02-01')
+@pytest.mark.xfail(reaason="downstream install issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
| xfail geopandas
| https://api.github.com/repos/pandas-dev/pandas/pulls/21204 | 2018-05-25T10:42:00Z | 2018-05-25T11:32:05Z | 2018-05-25T11:32:05Z | 2018-06-12T16:30:34Z |
BUG: make dense ranks results scale to 100 percent | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 726ab73e8f933..f2bc81eea186b 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -51,6 +51,7 @@ Groupby/Resample/Rolling
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
+- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
Strings
^^^^^^^
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 6a33e4a09476d..b3e9b7c9e69ee 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -418,7 +418,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
bint is_datetimelike, object ties_method,
bint ascending, bint pct, object na_option):
"""
- Provides the rank of values within each group.
+ Provides the rank of values within each group.
Parameters
----------
@@ -451,8 +451,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
"""
cdef:
TiebreakEnumType tiebreak
- Py_ssize_t i, j, N, K, val_start=0, grp_start=0, dups=0, sum_ranks=0
- Py_ssize_t grp_vals_seen=1, grp_na_count=0
+ Py_ssize_t i, j, N, K, grp_start=0, dups=0, sum_ranks=0
+ Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0
ndarray[int64_t] _as
ndarray[float64_t, ndim=2] grp_sizes
ndarray[{{c_type}}] masked_vals
@@ -563,6 +563,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
dups = sum_ranks = 0
val_start = i
grp_vals_seen += 1
+ grp_tie_count +=1
# Similar to the previous conditional, check now if we are moving
# to a new group. If so, keep track of the index where the new
@@ -571,11 +572,16 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# (used by pct calculations later). also be sure to reset any of
# the items helping to calculate dups
if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]:
- for j in range(grp_start, i + 1):
- grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ if tiebreak != TIEBREAK_DENSE:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ else:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = (grp_tie_count -
+ (grp_na_count > 0))
dups = sum_ranks = 0
grp_na_count = 0
- val_start = i + 1
+ grp_tie_count = 0
grp_start = i + 1
grp_vals_seen = 1
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 6ad8b4905abff..203c3c73bec94 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -59,9 +59,9 @@ def test_rank_apply():
('first', False, False, [3., 4., 1., 5., 2.]),
('first', False, True, [.6, .8, .2, 1., .4]),
('dense', True, False, [1., 1., 3., 1., 2.]),
- ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]),
+ ('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]),
('dense', False, False, [3., 3., 1., 3., 2.]),
- ('dense', False, True, [.6, .6, .2, .6, .4]),
+ ('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]),
])
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
@@ -126,7 +126,7 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
- [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats
+ [2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06'), np.nan, np.nan]
@@ -167,11 +167,11 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
('dense', True, 'keep', False,
[1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
('dense', True, 'keep', True,
- [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]),
+ [1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]),
('dense', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('dense', False, 'keep', True,
- [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
+ [3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]),
('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
('average', True, 'no_na', True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
@@ -198,10 +198,10 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
('dense', True, 'no_na', True,
- [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]),
+ [0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]),
('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
('dense', False, 'no_na', True,
- [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5])
+ [0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.])
])
def test_rank_args_missing(grps, vals, ties_method, ascending,
na_option, pct, exp):
| - [x] closes #20731
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21203 | 2018-05-25T08:12:23Z | 2018-05-31T20:44:53Z | 2018-05-31T20:44:53Z | 2018-06-08T17:20:38Z |
#21128 DOC: Add documentation for freq='infer' option of DatetimeIndex and TimedeltaIndex constructors | diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 745810704f665..691b89ccd8c14 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -363,6 +363,13 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss
pd.TimedeltaIndex(['1 days', '1 days, 00:00:05',
np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)])
+'infer' can be passed in order to set the frequency of the index as the inferred frequency
+upon creation
+
+.. ipython:: python
+
+ pd.TimedeltaIndex(['0 days', '10 days', '20 days'], freq='infer')
+
Generating Ranges of Time Deltas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1b0cf86995a39..223b2228d1230 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -185,6 +185,17 @@ options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are req
pd.Timestamp('2010/11/12')
+You can also use the `DatetimeIndex` constructor directly:
+
+'infer' can be passed in order to set the frequency of the index as the inferred frequency
+upon creation
+
+.. ipython:: python
+
+ pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'])
+
+ pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'], freq='infer')
+
Providing a Format Argument
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 83950f1d71633..963eb6dc053bf 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -186,7 +186,10 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
- One of pandas date offset strings or corresponding objects
+ One of pandas date offset strings or corresponding objects. The string
+ 'infer' can be passed in order to set the frequency of the index as the
+ inferred frequency upon creation
+
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 9707d19953418..a39a48c4ca56b 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -107,7 +107,10 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
- freq: a frequency for the index, optional
+ freq : string or pandas offset object, optional
+ One of pandas date offset strings or corresponding objects. The string
+ 'infer' can be passed in order to set the frequency of the index as the
+ inferred frequency upon creation
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
| - [X] closes #21128
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21201 | 2018-05-25T04:31:07Z | 2018-06-20T23:28:40Z | null | 2018-06-20T23:28:40Z |
ENH: 'to_sql()' add param 'method' to control insert statement (#21103) | Also revert default insert method to NOT use multi-value.
This is WIP. I would like to gather feedback on API change before going on.
- support callables as parameter?
- support to_sql() when used on SQLite3 (without SQLAlchemy)
Sample file for performance benchmarking
```
import time
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
N_COLS = 20
N_ROWS= 200000
# one of to_sql insert methods: None/'default', 'multi', 'copy'
METHOD = 'copy'
engine = create_engine('postgresql://postgres:@localhost/pandas_perf')
conn = engine.connect()
start = time.time()
df = pd.DataFrame({n: np.arange(0, N_ROWS, 1) for n in range(N_COLS)})
# convert df to sql table
df.to_sql('test', conn, index=False, if_exists='replace',
chunksize=1000, method=METHOD)
print('WRITE: {}'.format(time.time() - start))
```
- [x] closes #21103 & closes #21146 & #8953
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- [ ] update docstrings/docs
| https://api.github.com/repos/pandas-dev/pandas/pulls/21199 | 2018-05-25T02:52:13Z | 2018-06-09T10:51:28Z | null | 2018-06-09T11:02:01Z | |
BUG: Fix inconsistency between the shape properties of SparseSeries and SparseArray (#21126) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f5df37cd42e4f 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -67,6 +67,11 @@ Categorical
- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+Sparse
+^^^^^^
+
+- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`)
+
Conversion
^^^^^^^^^^
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 5532d7522cd2d..ff58f7d104ff9 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -290,6 +290,7 @@ def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
+ object_state[2] = self.sp_values.__reduce__()[2]
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
@@ -339,6 +340,10 @@ def values(self):
output.put(int_index.indices, self)
return output
+ @property
+ def shape(self):
+ return (len(self),)
+
@property
def sp_values(self):
# caching not an option, leaks memory
diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py
index 6c0c83cf65ff7..b3330f866ba1f 100644
--- a/pandas/tests/sparse/test_array.py
+++ b/pandas/tests/sparse/test_array.py
@@ -454,6 +454,17 @@ def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
+ @pytest.mark.parametrize('data,shape,dtype', [
+ ([0, 0, 0, 0, 0], (5,), None),
+ ([], (0,), None),
+ ([0], (1,), None),
+ (['A', 'A', np.nan, 'B'], (4,), np.object)
+ ])
+ def test_shape(self, data, shape, dtype):
+ # GH 21126
+ out = SparseArray(data, dtype=dtype)
+ assert out.shape == shape
+
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
| - [x] closes #21126
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21198 | 2018-05-25T02:48:29Z | 2018-05-31T10:27:33Z | 2018-05-31T10:27:32Z | 2018-06-12T16:30:34Z |
CLN: Remove duplicate Categorical subsection from 0.23.1 whatsnew | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index a7ba0dfbbd1c4..4876678baaa6e 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -97,8 +97,3 @@ Reshaping
- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
-
-
-Categorical
-^^^^^^^^^^^
-
--
| Unless I'm misreading, it looks like there are two Categorical subsections under the Bug Fixes section, with the first subsection starting on line 65. | https://api.github.com/repos/pandas-dev/pandas/pulls/21197 | 2018-05-25T00:31:54Z | 2018-05-25T07:09:54Z | 2018-05-25T07:09:54Z | 2018-06-12T14:33:18Z |
BUG: Should not raise errors in .set_names for MultiIndex with nlevels == 1 (GH21149) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index b0e08e8583cd1..fc6f3f3bfa614 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -80,6 +80,7 @@ Indexing
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
+- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
-
I/O
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f79288c167356..25d4e1be983e7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1384,7 +1384,8 @@ def set_names(self, names, level=None, inplace=False):
names=[u'baz', u'bar'])
"""
- if level is not None and self.nlevels == 1:
+ from .multi import MultiIndex
+ if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index c9f6bc9151d00..0ab3447909d9b 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -165,6 +165,22 @@ def test_set_name_methods(self):
assert res is None
assert ind.names == new_names2
+ @pytest.mark.parametrize('inplace', [True, False])
+ def test_set_names_with_nlevel_1(self, inplace):
+ # GH 21149
+ # Ensure that .set_names for MultiIndex with
+ # nlevels == 1 does not raise any errors
+ expected = pd.MultiIndex(levels=[[0, 1]],
+ labels=[[0, 1]],
+ names=['first'])
+ m = pd.MultiIndex.from_product([[0, 1]])
+ result = m.set_names('first', level=0, inplace=inplace)
+
+ if inplace:
+ result = m
+
+ tm.assert_index_equal(result, expected)
+
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
| - [x] partially #21149
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Initial PR #21182 is closed - and now split in 2 different PRs | https://api.github.com/repos/pandas-dev/pandas/pulls/21196 | 2018-05-24T18:31:39Z | 2018-05-29T01:32:25Z | 2018-05-29T01:32:25Z | 2018-06-08T17:18:13Z |
CLN: Comparison methods for MultiIndex should have consistent behaviour for all nlevels (GH21149) | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index 3e4326dea2ecc..0d3f9cb8dd3b6 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -52,6 +52,7 @@ Bug Fixes
**Indexing**
- Bug in :meth:`Index.get_indexer_non_unique` with categorical key (:issue:`21448`)
+- Bug in comparison operations for :class:`MultiIndex` where error was raised on equality / inequality comparison involving a MultiIndex with ``nlevels == 1`` (:issue:`21149`)
-
**I/O**
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 36345a32a3bf7..4b32e5d4f5654 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -91,7 +91,8 @@ def cmp_method(self, other):
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
- if is_object_dtype(self) and self.nlevels == 1:
+ from .multi import MultiIndex
+ if is_object_dtype(self) and not isinstance(self, MultiIndex):
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 0ab3447909d9b..ab53002ee1587 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -3307,3 +3307,20 @@ def test_duplicate_multiindex_labels(self):
with pytest.raises(ValueError):
ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
+
+ def test_multiindex_compare(self):
+ # GH 21149
+ # Ensure comparison operations for MultiIndex with nlevels == 1
+ # behave consistently with those for MultiIndex with nlevels > 1
+
+ midx = pd.MultiIndex.from_product([[0, 1]])
+
+ # Equality self-test: MultiIndex object vs self
+ expected = pd.Series([True, True])
+ result = pd.Series(midx == midx)
+ tm.assert_series_equal(result, expected)
+
+ # Greater than comparison: MultiIndex object vs self
+ expected = pd.Series([False, False])
+ result = pd.Series(midx > midx)
+ tm.assert_series_equal(result, expected)
| - [x] closes #21149
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Initial PR #21182 is closed - and now split in 2 different PRs
`def cmp_method` raised ValueError for equality / inequality comparisons of MultiIndex with nlevels == 1, which was inconsistent with behaviour for MultiIndex with nlevels > 1 (details of issue below) - this has now been fixed
Currently (as of 0.23.0), comparing MultiIndex of nlevels==1 with another of same length raises a ValueError e.g.
```python
[In] midx=pd.MultiIndex.from_product([[0, 1]])
[In] midx
[Out] MultiIndex(levels=[[0, 1]],
labels=[[0, 1]])
[In] midx == midx
[Out] ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
whereas the behaviour should be consistent with that for MultiIndex with nlevels>1 as follows:
```python
[In] midx == midx
[Out] array([ True, True])
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/21195 | 2018-05-24T18:27:12Z | 2018-06-14T10:23:15Z | 2018-06-14T10:23:15Z | 2018-06-29T14:48:51Z |
ENH: extension ops | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 55e76512b2440..61f8ebe3618f1 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -29,12 +29,22 @@ Datetimelike API Changes
- For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with non-``None`` ``freq`` attribute, addition or subtraction of integer-dtyped array or ``Index`` will return an object of the same class (:issue:`19959`)
+.. _whatsnew_0240.api.extension:
+
+ExtensionType Changes
+^^^^^^^^^^^^^^^^^^^^^
+
+- ``ExtensionArray`` has gained the abstract methods ``.dropna()`` and ``.append()`` (:issue:`21185`)
+- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore
+ the dtype has gained the ``construct_array_type`` (:issue:`21185`)
+- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`)
+
.. _whatsnew_0240.api.other:
Other API Changes
^^^^^^^^^^^^^^^^^
--
+- Invalid consruction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`)
-
-
diff --git a/pandas/conftest.py b/pandas/conftest.py
index a463f573c82e0..227ee3a150154 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -92,6 +92,15 @@ def observed(request):
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
+ """
+ return request.param
+
+
+@pytest.fixture(params=['__eq__', '__ne__', '__le__',
+ '__lt__', '__ge__', '__gt__'])
+def all_compare_operators(request):
+ """
+ Fixture for dunder names for common compare operations
"""
return request.param
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 88bc497f9f22d..f6dd6dac87035 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -154,7 +154,7 @@ def _reconstruct_data(values, dtype, original):
"""
from pandas import Index
if is_extension_array_dtype(dtype):
- pass
+ values = dtype.construct_array_type(values)._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
@@ -705,7 +705,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
else:
- if is_categorical_dtype(values) or is_sparse(values):
+ if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 1922801c30719..04bf3bb8a06c3 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -9,6 +9,9 @@
from pandas.errors import AbstractMethodError
from pandas.compat.numpy import function as nv
+from pandas.compat import set_function_name, PY3
+from pandas.core import ops
+import operator
_not_implemented_message = "{} does not implement {}."
@@ -36,6 +39,7 @@ class ExtensionArray(object):
* isna
* take
* copy
+ * append
* _concat_same_type
An additional method is available to satisfy pandas' internal,
@@ -49,6 +53,7 @@ class ExtensionArray(object):
methods:
* fillna
+ * dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
@@ -82,7 +87,7 @@ class ExtensionArray(object):
# Constructors
# ------------------------------------------------------------------------
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
@@ -90,6 +95,8 @@ def _from_sequence(cls, scalars):
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
+ copy : boolean, default True
+ if True, copy the underlying data
Returns
-------
ExtensionArray
@@ -379,6 +386,16 @@ def fillna(self, value=None, method=None, limit=None):
new_values = self.copy()
return new_values
+ def dropna(self):
+ """ Return ExtensionArray without NA values
+
+ Returns
+ -------
+ valid : ExtensionArray
+ """
+
+ return self[~self.isna()]
+
def unique(self):
"""Compute the ExtensionArray of unique values.
@@ -567,6 +584,34 @@ def copy(self, deep=False):
"""
raise AbstractMethodError(self)
+ def append(self, other):
+ """
+ Append a collection of Arrays together
+
+ Parameters
+ ----------
+ other : ExtensionArray or list/tuple of ExtensionArrays
+
+ Returns
+ -------
+ appended : ExtensionArray
+ """
+
+ to_concat = [self]
+ cls = self.__class__
+
+ if isinstance(other, (list, tuple)):
+ to_concat = to_concat + list(other)
+ else:
+ to_concat.append(other)
+
+ for obj in to_concat:
+ if not isinstance(obj, cls):
+ raise TypeError('all inputs must be of type {}'.format(
+ cls.__name__))
+
+ return cls._concat_same_type(to_concat)
+
# ------------------------------------------------------------------------
# Block-related methods
# ------------------------------------------------------------------------
@@ -610,3 +655,56 @@ def _ndarray_values(self):
used for interacting with our indexers.
"""
return np.array(self)
+
+ # ------------------------------------------------------------------------
+ # ops-related methods
+ # ------------------------------------------------------------------------
+
+ @classmethod
+ def _add_comparison_methods_binary(cls):
+ cls.__eq__ = cls._make_comparison_op(operator.eq)
+ cls.__ne__ = cls._make_comparison_op(operator.ne)
+ cls.__lt__ = cls._make_comparison_op(operator.lt)
+ cls.__gt__ = cls._make_comparison_op(operator.gt)
+ cls.__le__ = cls._make_comparison_op(operator.le)
+ cls.__ge__ = cls._make_comparison_op(operator.ge)
+
+ @classmethod
+ def _add_numeric_methods_binary(cls):
+ """ add in numeric methods """
+ cls.__add__ = cls._make_arithmetic_op(operator.add)
+ cls.__radd__ = cls._make_arithmetic_op(ops.radd)
+ cls.__sub__ = cls._make_arithmetic_op(operator.sub)
+ cls.__rsub__ = cls._make_arithmetic_op(ops.rsub)
+ cls.__mul__ = cls._make_arithmetic_op(operator.mul)
+ cls.__rmul__ = cls._make_arithmetic_op(ops.rmul)
+ cls.__rpow__ = cls._make_arithmetic_op(ops.rpow)
+ cls.__pow__ = cls._make_arithmetic_op(operator.pow)
+ cls.__mod__ = cls._make_arithmetic_op(operator.mod)
+ cls.__rmod__ = cls._make_arithmetic_op(ops.rmod)
+ cls.__floordiv__ = cls._make_arithmetic_op(operator.floordiv)
+ cls.__rfloordiv__ = cls._make_arithmetic_op(ops.rfloordiv)
+ cls.__truediv__ = cls._make_arithmetic_op(operator.truediv)
+ cls.__rtruediv__ = cls._make_arithmetic_op(ops.rtruediv)
+ if not PY3:
+ cls.__div__ = cls._make_arithmetic_op(operator.div)
+ cls.__rdiv__ = cls._make_arithmetic_op(ops.rdiv)
+
+ cls.__divmod__ = cls._make_arithmetic_op(divmod)
+ cls.__rdivmod__ = cls._make_arithmetic_op(ops.rdivmod)
+
+ @classmethod
+ def make_comparison_op(cls, op):
+ def cmp_method(self, other):
+ raise NotImplementedError
+
+ name = '__{name}__'.format(name=op.__name__)
+ return set_function_name(cmp_method, name, cls)
+
+ @classmethod
+ def make_arithmetic_op(cls, op):
+ def integer_arithmetic_method(self, other):
+ raise NotImplementedError
+
+ name = '__{name}__'.format(name=op.__name__)
+ return set_function_name(integer_arithmetic_method, name, cls)
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 49e98c16c716e..c0c9a8d22ce4f 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -109,6 +109,11 @@ class ExtensionDtype(_DtypeOpsMixin):
* name
* construct_from_string
+ Optionally one can override construct_array_type for construction
+ with the name of this dtype via the Registry
+
+ * construct_array_type
+
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
@@ -156,6 +161,22 @@ def name(self):
"""
raise AbstractMethodError(self)
+ @classmethod
+ def construct_array_type(cls, array=None):
+ """Return the array type associated with this dtype
+
+ Parameters
+ ----------
+ array : array-like, optional
+
+ Returns
+ -------
+ type
+ """
+ if array is None:
+ return cls
+ raise NotImplementedError
+
@classmethod
def construct_from_string(cls, string):
"""Attempt to construct this type from a string.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e4ed6d544d42e..73176887ca0d9 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -647,6 +647,11 @@ def conv(r, dtype):
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
+
+ # dispatch on extension dtype if needed
+ if is_extension_array_dtype(dtype):
+ return dtype.array_type._from_sequence(arr, copy=copy)
+
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index c45838e6040a9..37d260088c4d4 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -9,7 +9,7 @@
DatetimeTZDtype, DatetimeTZDtypeType,
PeriodDtype, PeriodDtypeType,
IntervalDtype, IntervalDtypeType,
- ExtensionDtype, PandasExtensionDtype)
+ ExtensionDtype, registry)
from .generic import (ABCCategorical, ABCPeriodIndex,
ABCDatetimeIndex, ABCSeries,
ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex,
@@ -1975,38 +1975,13 @@ def pandas_dtype(dtype):
np.dtype or a pandas dtype
"""
- if isinstance(dtype, DatetimeTZDtype):
- return dtype
- elif isinstance(dtype, PeriodDtype):
- return dtype
- elif isinstance(dtype, CategoricalDtype):
- return dtype
- elif isinstance(dtype, IntervalDtype):
- return dtype
- elif isinstance(dtype, string_types):
- try:
- return DatetimeTZDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- if dtype.startswith('period[') or dtype.startswith('Period['):
- # do not parse string like U as period[U]
- try:
- return PeriodDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- elif dtype.startswith('interval') or dtype.startswith('Interval'):
- try:
- return IntervalDtype.construct_from_string(dtype)
- except TypeError:
- pass
+ # registered extension types
+ result = registry.find(dtype)
+ if result is not None:
+ return result
- try:
- return CategoricalDtype.construct_from_string(dtype)
- except TypeError:
- pass
- elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ # un-registered extension types
+ if isinstance(dtype, ExtensionDtype):
return dtype
try:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 708f54f5ca75b..7d147da661e34 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -8,6 +8,60 @@
from .base import ExtensionDtype, _DtypeOpsMixin
+class Registry(object):
+ """ Registry for dtype inference
+
+ We can directly construct dtypes in pandas_dtypes if they are
+ a type; the registry allows us to register an extension dtype
+ to try inference from a string or a dtype class
+
+ These are tried in order for inference.
+ """
+ dtypes = []
+
+ @classmethod
+ def register(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : ExtensionDtype
+ """
+ if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ raise ValueError("can only register pandas extension dtypes")
+
+ self.dtypes.append(dtype)
+
+ def find(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : PandasExtensionDtype or string
+
+ Returns
+ -------
+ return the first matching dtype, otherwise return None
+ """
+ if not isinstance(dtype, compat.string_types):
+ dtype_type = dtype
+ if not isinstance(dtype, type):
+ dtype_type = type(dtype)
+ if issubclass(dtype_type, (PandasExtensionDtype, ExtensionDtype)):
+ return dtype
+
+ return None
+
+ for dtype_type in self.dtypes:
+ try:
+ return dtype_type.construct_from_string(dtype)
+ except TypeError:
+ pass
+
+ return None
+
+
+registry = Registry()
+
+
class PandasExtensionDtype(_DtypeOpsMixin):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
@@ -263,6 +317,21 @@ def _hash_categories(categories, ordered=True):
else:
return np.bitwise_xor.reduce(hashed)
+ @classmethod
+ def construct_array_type(cls, array=None):
+ """Return the array type associated with this dtype
+
+ Parameters
+ ----------
+ array : array-like, optional
+
+ Returns
+ -------
+ type
+ """
+ from pandas import Categorical
+ return Categorical
+
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
@@ -552,11 +621,16 @@ def _parse_dtype_strict(cls, freq):
@classmethod
def construct_from_string(cls, string):
"""
- attempt to construct this type from a string, raise a TypeError
- if its not possible
+ Strict construction from a string, raise a TypeError if not
+ possible
"""
from pandas.tseries.offsets import DateOffset
- if isinstance(string, (compat.string_types, DateOffset)):
+
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('period[') or
+ string.startswith('Period[')) or
+ isinstance(string, DateOffset)):
+ # do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
@@ -656,7 +730,7 @@ def __new__(cls, subtype=None):
try:
subtype = pandas_dtype(subtype)
except TypeError:
- raise ValueError("could not construct IntervalDtype")
+ raise TypeError("could not construct IntervalDtype")
if is_categorical_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
@@ -678,8 +752,11 @@ def construct_from_string(cls, string):
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
- if isinstance(string, compat.string_types):
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('interval') or
+ string.startswith('Interval'))):
return cls(string)
+
msg = "a string needs to be passed, got type {typ}"
raise TypeError(msg.format(typ=type(string)))
@@ -723,3 +800,10 @@ def is_dtype(cls, dtype):
else:
return False
return super(IntervalDtype, cls).is_dtype(dtype)
+
+
+# register the dtypes in search order
+registry.register(DatetimeTZDtype)
+registry.register(PeriodDtype)
+registry.register(IntervalDtype)
+registry.register(CategoricalDtype)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 8f8d8760583ce..2694f5d5be384 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -800,7 +800,7 @@ def astype(self, dtype, copy=True):
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
- return IntervalDtype.construct_from_string(str(self.left.dtype))
+ return IntervalDtype(str(self.left.dtype))
@property
def inferred_type(self):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index fe508dc1bb0bc..a5e9107b8a660 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -633,8 +633,9 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
return self.make_block(Categorical(self.values, dtype=dtype))
# astype processing
- dtype = np.dtype(dtype)
- if self.dtype == dtype:
+ if not is_extension_array_dtype(dtype):
+ dtype = np.dtype(dtype)
+ if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
@@ -662,7 +663,13 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
- values = values.reshape(self.shape)
+
+ # TODO(extension)
+ # should we make this attribute?
+ try:
+ values = values.reshape(self.shape)
+ except AttributeError:
+ pass
newb = make_block(values, placement=self.mgr_locs,
klass=klass)
@@ -3170,6 +3177,10 @@ def get_block_type(values, dtype=None):
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
+ elif is_categorical(values):
+ cls = CategoricalBlock
+ elif is_extension_array_dtype(values):
+ cls = ExtensionBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetimetz(values)
cls = DatetimeBlock
@@ -3179,10 +3190,6 @@ def get_block_type(values, dtype=None):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
- elif is_categorical(values):
- cls = CategoricalBlock
- elif is_extension_array_dtype(values):
- cls = ExtensionBlock
else:
cls = ObjectBlock
return cls
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 31c489e2f8941..cb5ee8388c2c4 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -638,7 +638,8 @@ def fill_zeros(result, x, y, name, fill):
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
- signs = np.sign(y if name.startswith(('r', '__r')) else x)
+ signs = y if name.startswith(('r', '__r')) else x
+ signs = np.sign(signs.astype('float', copy=False))
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index e14f82906cd06..69d1efb0304ab 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -27,7 +27,7 @@
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
- is_bool_dtype,
+ is_bool_dtype, is_extension_array_dtype,
is_list_like,
is_scalar,
_ensure_object)
@@ -1003,8 +1003,18 @@ def _arith_method_SERIES(cls, op, special):
if op is divmod else _construct_result)
def na_op(x, y):
- import pandas.core.computation.expressions as expressions
+ # handle extension array ops
+ # TODO(extension)
+ # the ops *between* non-same-type extension arrays are not
+ # very well defined
+ if (is_extension_array_dtype(x) or is_extension_array_dtype(y)):
+ if (op_name.startswith('__r') and not
+ is_extension_array_dtype(y) and not
+ is_scalar(y)):
+ y = x.__class__._from_sequence(y)
+ return op(x, y)
+ import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
@@ -1025,6 +1035,20 @@ def na_op(x, y):
return result
def safe_na_op(lvalues, rvalues):
+ """
+ return the result of evaluating na_op on the passed in values
+
+ try coercion to object type if the native types are not compatible
+
+ Parameters
+ ----------
+ lvalues : array-like
+ rvalues : array-like
+
+ Raises
+ ------
+ invalid operation raises TypeError
+ """
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
@@ -1035,14 +1059,21 @@ def safe_na_op(lvalues, rvalues):
raise
def wrapper(left, right):
-
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
- if is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
+ if is_categorical_dtype(left):
+ raise TypeError("{typ} cannot perform the operation "
+ "{op}".format(typ=type(left).__name__, op=str_rep))
+
+ elif (is_extension_array_dtype(left) or
+ is_extension_array_dtype(right)):
+ pass
+
+ elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(left, result,
index=left.index, name=res_name,
@@ -1054,10 +1085,6 @@ def wrapper(left, right):
index=left.index, name=res_name,
dtype=result.dtype)
- elif is_categorical_dtype(left):
- raise TypeError("{typ} cannot perform the operation "
- "{op}".format(typ=type(left).__name__, op=str_rep))
-
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
@@ -1136,6 +1163,14 @@ def na_op(x, y):
# The `not is_scalar(y)` check excludes the string "category"
return op(y, x)
+ # handle extension array ops
+ # TODO(extension)
+ # the ops *between* non-same-type extension arrays are not
+ # very well defined
+ elif (is_extension_array_dtype(x) or
+ is_extension_array_dtype(y)):
+ return op(x, y)
+
elif is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d59401414181f..4f63c56706c72 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4054,11 +4054,9 @@ def _try_cast(arr, take_fast_path):
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
- # We don't allow casting to third party dtypes, since we don't
- # know what array belongs to which type.
- msg = ("Cannot cast data to extension dtype '{}'. "
- "Pass the extension array directly.".format(dtype))
- raise ValueError(msg)
+ # create an extension array from its dtype
+ array_type = dtype.construct_array_type(subarr)
+ subarr = array_type(subarr, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 12201f62946ac..adb4bf3f47572 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -514,7 +514,6 @@ def _to_str_columns(self):
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
-
# may include levels names also
str_index = self._get_formatted_index(frame)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index cc833af03ae66..fd8042212f658 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -9,10 +9,9 @@
from pandas import (
Series, Categorical, CategoricalIndex, IntervalIndex, date_range)
-from pandas.compat import string_types
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
- IntervalDtype, CategoricalDtype)
+ IntervalDtype, CategoricalDtype, registry)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
@@ -448,7 +447,7 @@ def test_construction_not_supported(self, subtype):
def test_construction_errors(self):
msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype('xx')
def test_construction_from_string(self):
@@ -458,14 +457,21 @@ def test_construction_from_string(self):
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
- 'foo', 'interval[foo]', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
+ 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
- if isinstance(string, string_types):
- error, msg = ValueError, 'could not construct IntervalDtype'
- else:
- error, msg = TypeError, 'a string needs to be passed, got type'
+ # these are invalid entirely
+ msg = 'a string needs to be passed, got type'
+
+ with tm.assert_raises_regex(TypeError, msg):
+ IntervalDtype.construct_from_string(string)
+
+ @pytest.mark.parametrize('string', [
+ 'interval[foo]'])
+ def test_construction_from_string_error_subtype(self, string):
+ # this is an invalid subtype
+ msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(error, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
@@ -767,3 +773,24 @@ def test_update_dtype_errors(self, bad_dtype):
msg = 'a CategoricalDtype must be passed to perform an update, '
with tm.assert_raises_regex(ValueError, msg):
dtype.update_dtype(bad_dtype)
+
+
+@pytest.mark.parametrize(
+ 'dtype',
+ [DatetimeTZDtype, CategoricalDtype,
+ PeriodDtype, IntervalDtype])
+def test_registry(dtype):
+ assert dtype in registry.dtypes
+
+
+@pytest.mark.parametrize(
+ 'dtype, expected',
+ [('int64', None),
+ ('interval', IntervalDtype()),
+ ('interval[int64]', IntervalDtype()),
+ ('category', CategoricalDtype()),
+ ('period[D]', PeriodDtype('D')),
+ ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))])
+def test_registry_find(dtype, expected):
+
+ assert registry.find(dtype) == expected
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 9da985625c4ee..7bbba1e8640b1 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -46,6 +46,7 @@ class TestMyDtype(BaseDtypeTests):
from .getitem import BaseGetitemTests # noqa
from .groupby import BaseGroupbyTests # noqa
from .interface import BaseInterfaceTests # noqa
+from .ops import BaseOpsTests # noqa
from .methods import BaseMethodsTests # noqa
from .missing import BaseMissingTests # noqa
from .reshaping import BaseReshapingTests # noqa
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 489a430bb4020..972ef7f37acca 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -1,5 +1,6 @@
import pytest
+import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.internals import ExtensionBlock
@@ -45,3 +46,14 @@ def test_series_given_mismatched_index_raises(self, data):
msg = 'Length of passed values is 3, index implies 5'
with tm.assert_raises_regex(ValueError, msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
+
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ dtype = data.dtype
+
+ expected = pd.Series(data)
+ result = pd.Series(np.array(data), dtype=dtype)
+ self.assert_series_equal(result, expected)
+
+ result = pd.Series(np.array(data), dtype=str(dtype))
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 63d3d807c270c..52a12816c8722 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -1,3 +1,4 @@
+import pytest
import numpy as np
import pandas as pd
@@ -46,3 +47,10 @@ def test_eq_with_str(self, dtype):
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
+
+ def test_array_type(self, data, dtype):
+ assert dtype.construct_array_type() is type(data)
+
+ def test_array_type_with_arg(self, data, dtype):
+ with pytest.raises(NotImplementedError):
+ dtype.construct_array_type('foo')
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 8ef8debbdc666..69de0e1900831 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -40,6 +40,16 @@ def test_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
+ def test_repr_array(self, data):
+ # some arrays may be able to assert
+ # attributes in the repr
+ repr(data)
+
+ def test_repr_array_long(self, data):
+ # some arrays may be able to assert a ... in the repr
+ with pd.option_context('display.max_seq_items', 1):
+ repr(data)
+
def test_dtype_name_in_info(self, data):
buf = StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index c5436aa731d50..0ad3196277c34 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -19,7 +19,8 @@ def test_value_counts(self, all_data, dropna):
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
- expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
+ expected = pd.Series(other).value_counts(
+ dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index af26d83df3fe2..43b2702c72193 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -23,6 +23,11 @@ def test_isna(self, data_missing):
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
+ def test_dropna_array(self, data_missing):
+ result = data_missing.dropna()
+ expected = data_missing[[1]]
+ self.assert_extension_array_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
new file mode 100644
index 0000000000000..7bd97a94d4094
--- /dev/null
+++ b/pandas/tests/extension/base/ops.py
@@ -0,0 +1,54 @@
+import pytest
+import numpy as np
+import pandas as pd
+from .base import BaseExtensionTests
+
+
+class BaseOpsTests(BaseExtensionTests):
+ """Various Series and DataFrame ops methos."""
+
+ def compare(self, s, op, other, exc=NotImplementedError):
+
+ with pytest.raises(exc):
+ getattr(s, op)(other)
+
+ def test_arith_scalar(self, data, all_arithmetic_operators):
+ # scalar
+ op = all_arithmetic_operators
+ s = pd.Series(data)
+ self.compare(s, op, 1, exc=TypeError)
+
+ def test_arith_array(self, data, all_arithmetic_operators):
+ # ndarray & other series
+ op = all_arithmetic_operators
+ s = pd.Series(data)
+ self.compare(s, op, np.ones(len(s), dtype=s.dtype.type), exc=TypeError)
+
+ def test_compare_scalar(self, data, all_compare_operators):
+ op = all_compare_operators
+
+ s = pd.Series(data)
+
+ if op in '__eq__':
+ assert getattr(data, op)(0) is NotImplemented
+ assert not getattr(s, op)(0).all()
+ elif op in '__ne__':
+ assert getattr(data, op)(0) is NotImplemented
+ assert getattr(s, op)(0).all()
+
+ else:
+
+ # array
+ getattr(data, op)(0) is NotImplementedError
+
+ # series
+ s = pd.Series(data)
+ with pytest.raises(TypeError):
+ getattr(s, op)(0)
+
+ def test_error(self, data, all_arithmetic_operators):
+
+ # invalid ops
+ op = all_arithmetic_operators
+ with pytest.raises(AttributeError):
+ getattr(data, op)
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index fe920a47ab740..ff739c97f2785 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -26,6 +26,14 @@ def test_concat(self, data, in_frame):
assert dtype == data.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
+ def test_append(self, data):
+
+ wrapped = pd.Series(data)
+ result = wrapped.append(wrapped)
+ expected = pd.concat([wrapped, wrapped])
+
+ self.assert_series_equal(result, expected)
+
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat_all_na_block(self, data_missing, in_frame):
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
@@ -84,6 +92,7 @@ def test_concat_columns(self, data, na_value):
expected = pd.DataFrame({
'A': data._from_sequence(list(data[:3]) + [na_value]),
'B': [np.nan, 1, 2, 3]})
+
result = pd.concat([df1, df2], axis=1)
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['B']], axis=1)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 530a4e7a22a7a..b331cded4ac6a 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -52,7 +52,25 @@ def data_for_grouping():
class TestDtype(base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type('foo') is Categorical
+
+
+class TestOps(base.BaseOpsTests):
+
+ def test_compare_scalar(self, data, all_compare_operators):
+ op = all_compare_operators
+
+ if op == '__eq__':
+ assert not getattr(data, op)(0).all()
+
+ elif op == '__ne__':
+ assert getattr(data, op)(0).all()
+
+ else:
+ with pytest.raises(TypeError):
+ getattr(data, op)(0)
class TestInterface(base.BaseInterfaceTests):
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 90f0181beab0d..7bdbbf77cf4d6 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -15,6 +15,20 @@ class DecimalDtype(ExtensionDtype):
name = 'decimal'
na_value = decimal.Decimal('NaN')
+ @classmethod
+ def construct_array_type(cls, array=None):
+ """Return the array type associated with this dtype
+
+ Parameters
+ ----------
+ array : array-like, optional
+
+ Returns
+ -------
+ type
+ """
+ return DecimalArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -27,7 +41,7 @@ def construct_from_string(cls, string):
class DecimalArray(ExtensionArray):
dtype = DecimalDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
assert all(isinstance(v, decimal.Decimal) for v in values)
values = np.asarray(values, dtype=object)
@@ -40,7 +54,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 1f8cf0264f62f..ca646486d2bff 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -92,15 +92,56 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseDecimal, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type('foo') is DecimalArray
class TestInterface(BaseDecimal, base.BaseInterfaceTests):
pass
+class TestOps(BaseDecimal, base.BaseOpsTests):
+
+ def compare(self, s, op, other):
+ # TODO(extension)
+
+ pytest.xfail("not implemented")
+
+ result = getattr(s, op)(other)
+ expected = result
+
+ self.assert_series_equal(result, expected)
+
+ def test_arith_scalar(self, data, all_arithmetic_operators):
+ # scalar
+ op = all_arithmetic_operators
+ s = pd.Series(data)
+ self.compare(s, op, 1)
+
+ def test_arith_array(self, data, all_arithmetic_operators):
+ # ndarray & other series
+ op = all_arithmetic_operators
+ s = pd.Series(data)
+ self.compare(s, op, np.ones(len(s), dtype=s.dtype.type))
+
+ @pytest.mark.xfail(reason="Not implemented")
+ def test_compare_scalar(self, data, all_compare_operators):
+ op = all_compare_operators
+
+ # array
+ result = getattr(data, op)(0)
+ expected = getattr(data.data, op)(0)
+
+ tm.assert_series_equal(result, expected)
+
+
class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseDecimal, base.BaseReshapingTests):
@@ -147,6 +188,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
pass
+# TODO(extension)
+@pytest.mark.xfail(reason=(
+ "raising AssertionError as this is not implemented, "
+ "though easy enough to do"))
def test_series_constructor_coerce_data_to_extension_dtype_raises():
xpr = ("Cannot cast data to extension dtype 'decimal'. Pass the "
"extension array directly.")
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 10be7836cb8d7..f5d7d58277cc5 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -32,6 +32,20 @@ class JSONDtype(ExtensionDtype):
# source compatibility with Py2.
na_value = {}
+ @classmethod
+ def construct_array_type(cls, array=None):
+ """Return the array type associated with this dtype
+
+ Parameters
+ ----------
+ array : array-like, optional
+
+ Returns
+ -------
+ type
+ """
+ return JSONArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -44,7 +58,7 @@ def construct_from_string(cls, string):
class JSONArray(ExtensionArray):
dtype = JSONDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError
@@ -58,7 +72,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index b7ac8033f3f6d..97d3ddf1ec54a 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -107,7 +107,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseJSON, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type('foo') is JSONArray
class TestInterface(BaseJSON, base.BaseInterfaceTests):
@@ -130,13 +132,21 @@ def test_custom_asserts(self):
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
pass
+class TestOps(BaseJSON, base.BaseOpsTests):
+ pass
+
+
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
| builds on #21185 for extension array ops
pre-cursor to #21160 | https://api.github.com/repos/pandas-dev/pandas/pulls/21191 | 2018-05-24T11:45:24Z | 2018-07-03T22:47:59Z | null | 2018-07-03T22:48:39Z |
BUG: DecimalArray and JSONArray that are empty return incorrect results for isna() | diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index 32cf29818e069..af26d83df3fe2 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -18,6 +18,11 @@ def test_isna(self, data_missing):
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
+ # GH 21189
+ result = pd.Series(data_missing).drop([0, 1]).isna()
+ expected = pd.Series([], dtype=bool)
+ self.assert_series_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index e9431bd0c233c..90f0181beab0d 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -90,7 +90,7 @@ def nbytes(self):
return 0
def isna(self):
- return np.array([x.is_nan() for x in self._data])
+ return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 88bb66f38b35c..10be7836cb8d7 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -108,7 +108,8 @@ def nbytes(self):
return sys.getsizeof(self.data)
def isna(self):
- return np.array([x == self.dtype.na_value for x in self.data])
+ return np.array([x == self.dtype.na_value for x in self.data],
+ dtype=bool)
def take(self, indexer, allow_fill=False, fill_value=None):
# re-implement here, since NumPy has trouble setting
|
- [x] closes #21189
- [x] tests added / passed
- Modified tests/extension/base/missing.py to test when arrays are empty
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- Did not put anything here because it's internal code
Probably for @TomAugspurger to review.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21190 | 2018-05-24T09:42:18Z | 2018-05-24T14:59:28Z | 2018-05-24T14:59:28Z | 2018-05-24T19:41:08Z |
fix hashing string-casting error | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index 5b3e607956f7a..c908d29716a7d 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -81,6 +81,7 @@ Bug Fixes
**Categorical**
+- Bug in rendering :class:`Series` with ``Categorical`` dtype in rare conditions under Python 2.7 (:issue:`21002`)
-
**Timezones**
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index c6f182ac5003f..4489847518a1d 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -8,8 +8,7 @@ import numpy as np
from numpy cimport ndarray, uint8_t, uint32_t, uint64_t
from util cimport _checknull
-from cpython cimport (PyString_Check,
- PyBytes_Check,
+from cpython cimport (PyBytes_Check,
PyUnicode_Check)
from libc.stdlib cimport malloc, free
@@ -62,9 +61,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
cdef list datas = []
for i in range(n):
val = arr[i]
- if PyString_Check(val):
- data = <bytes>val.encode(encoding)
- elif PyBytes_Check(val):
+ if PyBytes_Check(val):
data = <bytes>val
elif PyUnicode_Check(val):
data = <bytes>val.encode(encoding)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 97236f028b1c4..730c2b7865f1f 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -11,6 +11,7 @@
from pandas import (Index, Series, DataFrame, date_range, option_context,
Categorical, period_range, timedelta_range)
from pandas.core.index import MultiIndex
+from pandas.core.base import StringMixin
from pandas.compat import lrange, range, u
from pandas import compat
@@ -202,6 +203,35 @@ def test_latex_repr(self):
class TestCategoricalRepr(object):
+ def test_categorical_repr_unicode(self):
+ # GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
+ # and we are working in PY2, then rendering a Categorical could raise
+ # UnicodeDecodeError by trying to decode when it shouldn't
+
+ class County(StringMixin):
+ name = u'San Sebastián'
+ state = u'PR'
+
+ def __unicode__(self):
+ return self.name + u', ' + self.state
+
+ cat = pd.Categorical([County() for n in range(61)])
+ idx = pd.Index(cat)
+ ser = idx.to_series()
+
+ if compat.PY3:
+ # no reloading of sys, just check that the default (utf8) works
+ # as expected
+ repr(ser)
+ str(ser)
+
+ else:
+ # set sys.defaultencoding to ascii, then change it back after
+ # the test
+ with tm.set_defaultencoding('ascii'):
+ repr(ser)
+ str(ser)
+
def test_categorical_repr(self):
a = Series(Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index d26a2116fb3ce..b9e53dfc80020 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -553,6 +553,28 @@ def _valid_locales(locales, normalize):
# Stdout / stderr decorators
+@contextmanager
+def set_defaultencoding(encoding):
+ """
+ Set default encoding (as given by sys.getdefaultencoding()) to the given
+ encoding; restore on exit.
+
+ Parameters
+ ----------
+ encoding : str
+ """
+ if not PY2:
+ raise ValueError("set_defaultencoding context is only available "
+ "in Python 2.")
+ orig = sys.getdefaultencoding()
+ reload(sys) # noqa:F821
+ sys.setdefaultencoding(encoding)
+ try:
+ yield
+ finally:
+ sys.setdefaultencoding(orig)
+
+
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
| - [x] closes #21002
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21187 | 2018-05-24T02:25:08Z | 2018-06-21T10:18:54Z | 2018-06-21T10:18:54Z | 2018-06-29T15:03:04Z |
ENH: add in extension dtype registry | diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 38b3b19031a0e..dcabfed2b6021 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -91,8 +91,16 @@ extension array for IP Address data, this might be ``ipaddress.IPv4Address``.
See the `extension dtype source`_ for interface definition.
+.. versionadded:: 0.24.0
+
+:class:`pandas.api.extension.ExtensionDtype` can be registered to pandas to allow creation via a string dtype name.
+This allows one to instantiate ``Series`` and ``.astype()`` with a registered string name, for
+example ``'category'`` is a registered string accessor for the ``CategoricalDtype``.
+
+See the `extension dtype dtypes`_ for more on how to register dtypes.
+
:class:`~pandas.api.extensions.ExtensionArray`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This class provides all the array-like functionality. ExtensionArrays are
limited to 1 dimension. An ExtensionArray is linked to an ExtensionDtype via the
@@ -179,6 +187,7 @@ To use a test, subclass it:
See https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/base/__init__.py
for a list of all the tests available.
+.. _extension dtype dtypes: https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/dtypes.py
.. _extension dtype source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
.. _extension array source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b275796237191..a08ac36ef4409 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -128,6 +128,23 @@ Previous Behavior:
In [3]: pi - pi[0]
Out[3]: Int64Index([0, 1, 2], dtype='int64')
+.. _whatsnew_0240.api.extension:
+
+ExtensionType Changes
+^^^^^^^^^^^^^^^^^^^^^
+
+- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore
+ the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`)
+- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`)
+- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`)
+- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
+- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
+-
+
+.. _whatsnew_0240.api.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
.. _whatsnew_0240.api.incompatibilities:
@@ -168,6 +185,7 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`)
+- Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`)
-
-
@@ -330,13 +348,6 @@ Reshaping
-
-
-ExtensionArray
-^^^^^^^^^^^^^^
-
-- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`)
-- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
-- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
--
-
Other
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index dc726a736d34f..c4e4f5471c4be 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -154,7 +154,7 @@ def _reconstruct_data(values, dtype, original):
"""
from pandas import Index
if is_extension_array_dtype(dtype):
- pass
+ values = dtype.construct_array_type()._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
@@ -705,7 +705,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
else:
- if is_categorical_dtype(values) or is_sparse(values):
+ if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index a572fff1c44d7..fe4e461b0bd4f 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -54,6 +54,7 @@ class ExtensionArray(object):
methods:
* fillna
+ * dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
@@ -87,7 +88,7 @@ class ExtensionArray(object):
# Constructors
# ------------------------------------------------------------------------
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
@@ -95,6 +96,8 @@ def _from_sequence(cls, scalars):
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
+ copy : boolean, default False
+ if True, copy the underlying data
Returns
-------
ExtensionArray
@@ -384,6 +387,16 @@ def fillna(self, value=None, method=None, limit=None):
new_values = self.copy()
return new_values
+ def dropna(self):
+ """ Return ExtensionArray without NA values
+
+ Returns
+ -------
+ valid : ExtensionArray
+ """
+
+ return self[~self.isna()]
+
def unique(self):
"""Compute the ExtensionArray of unique values.
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 49e98c16c716e..5f405e0d10657 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -109,6 +109,11 @@ class ExtensionDtype(_DtypeOpsMixin):
* name
* construct_from_string
+ Optionally one can override construct_array_type for construction
+ with the name of this dtype via the Registry
+
+ * construct_array_type
+
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
@@ -156,6 +161,16 @@ def name(self):
"""
raise AbstractMethodError(self)
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ raise NotImplementedError
+
@classmethod
def construct_from_string(cls, string):
"""Attempt to construct this type from a string.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 65328dfc7347e..2cd8144e43cea 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -648,6 +648,11 @@ def conv(r, dtype):
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
+
+ # dispatch on extension dtype if needed
+ if is_extension_array_dtype(dtype):
+ return dtype.array_type._from_sequence(arr, copy=copy)
+
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 05f82c67ddb8b..ef4f36dc6df33 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -5,10 +5,11 @@
PY3, PY36)
from pandas._libs import algos, lib
from pandas._libs.tslibs import conversion
+
from pandas.core.dtypes.dtypes import (
- CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
+ registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype,
- IntervalDtypeType, ExtensionDtype, PandasExtensionDtype)
+ IntervalDtypeType, ExtensionDtype)
from pandas.core.dtypes.generic import (
ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries,
ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass,
@@ -1977,38 +1978,13 @@ def pandas_dtype(dtype):
np.dtype or a pandas dtype
"""
- if isinstance(dtype, DatetimeTZDtype):
- return dtype
- elif isinstance(dtype, PeriodDtype):
- return dtype
- elif isinstance(dtype, CategoricalDtype):
- return dtype
- elif isinstance(dtype, IntervalDtype):
- return dtype
- elif isinstance(dtype, string_types):
- try:
- return DatetimeTZDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- if dtype.startswith('period[') or dtype.startswith('Period['):
- # do not parse string like U as period[U]
- try:
- return PeriodDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- elif dtype.startswith('interval') or dtype.startswith('Interval'):
- try:
- return IntervalDtype.construct_from_string(dtype)
- except TypeError:
- pass
+ # registered extension types
+ result = registry.find(dtype)
+ if result is not None:
+ return result
- try:
- return CategoricalDtype.construct_from_string(dtype)
- except TypeError:
- pass
- elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ # un-registered extension types
+ if isinstance(dtype, ExtensionDtype):
return dtype
try:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 1e762c2be92a6..de837efc235a0 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -8,6 +8,65 @@
from .base import ExtensionDtype, _DtypeOpsMixin
+class Registry(object):
+ """
+ Registry for dtype inference
+
+ The registry allows one to map a string repr of a extension
+ dtype to an extenstion dtype.
+
+ Multiple extension types can be registered.
+ These are tried in order.
+
+ Examples
+ --------
+ registry.register(MyExtensionDtype)
+ """
+ dtypes = []
+
+ @classmethod
+ def register(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : ExtensionDtype
+ """
+ if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ raise ValueError("can only register pandas extension dtypes")
+
+ self.dtypes.append(dtype)
+
+ def find(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : PandasExtensionDtype or string
+
+ Returns
+ -------
+ return the first matching dtype, otherwise return None
+ """
+ if not isinstance(dtype, compat.string_types):
+ dtype_type = dtype
+ if not isinstance(dtype, type):
+ dtype_type = type(dtype)
+ if issubclass(dtype_type, (PandasExtensionDtype, ExtensionDtype)):
+ return dtype
+
+ return None
+
+ for dtype_type in self.dtypes:
+ try:
+ return dtype_type.construct_from_string(dtype)
+ except TypeError:
+ pass
+
+ return None
+
+
+registry = Registry()
+
+
class PandasExtensionDtype(_DtypeOpsMixin):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
@@ -265,6 +324,17 @@ def _hash_categories(categories, ordered=True):
else:
return np.bitwise_xor.reduce(hashed)
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ from pandas import Categorical
+ return Categorical
+
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
@@ -556,11 +626,16 @@ def _parse_dtype_strict(cls, freq):
@classmethod
def construct_from_string(cls, string):
"""
- attempt to construct this type from a string, raise a TypeError
- if its not possible
+ Strict construction from a string, raise a TypeError if not
+ possible
"""
from pandas.tseries.offsets import DateOffset
- if isinstance(string, (compat.string_types, DateOffset)):
+
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('period[') or
+ string.startswith('Period[')) or
+ isinstance(string, DateOffset)):
+ # do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
@@ -660,7 +735,7 @@ def __new__(cls, subtype=None):
try:
subtype = pandas_dtype(subtype)
except TypeError:
- raise ValueError("could not construct IntervalDtype")
+ raise TypeError("could not construct IntervalDtype")
if is_categorical_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
@@ -682,8 +757,11 @@ def construct_from_string(cls, string):
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
- if isinstance(string, compat.string_types):
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('interval') or
+ string.startswith('Interval'))):
return cls(string)
+
msg = "a string needs to be passed, got type {typ}"
raise TypeError(msg.format(typ=type(string)))
@@ -727,3 +805,10 @@ def is_dtype(cls, dtype):
else:
return False
return super(IntervalDtype, cls).is_dtype(dtype)
+
+
+# register the dtypes in search order
+registry.register(DatetimeTZDtype)
+registry.register(PeriodDtype)
+registry.register(IntervalDtype)
+registry.register(CategoricalDtype)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 80619c7beb28c..2fd4e099777bf 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -796,7 +796,7 @@ def astype(self, dtype, copy=True):
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
- return IntervalDtype.construct_from_string(str(self.left.dtype))
+ return IntervalDtype(self.left.dtype.name)
@property
def inferred_type(self):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index fe508dc1bb0bc..a5e9107b8a660 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -633,8 +633,9 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
return self.make_block(Categorical(self.values, dtype=dtype))
# astype processing
- dtype = np.dtype(dtype)
- if self.dtype == dtype:
+ if not is_extension_array_dtype(dtype):
+ dtype = np.dtype(dtype)
+ if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
@@ -662,7 +663,13 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
- values = values.reshape(self.shape)
+
+ # TODO(extension)
+ # should we make this attribute?
+ try:
+ values = values.reshape(self.shape)
+ except AttributeError:
+ pass
newb = make_block(values, placement=self.mgr_locs,
klass=klass)
@@ -3170,6 +3177,10 @@ def get_block_type(values, dtype=None):
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
+ elif is_categorical(values):
+ cls = CategoricalBlock
+ elif is_extension_array_dtype(values):
+ cls = ExtensionBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetimetz(values)
cls = DatetimeBlock
@@ -3179,10 +3190,6 @@ def get_block_type(values, dtype=None):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
- elif is_categorical(values):
- cls = CategoricalBlock
- elif is_extension_array_dtype(values):
- cls = ExtensionBlock
else:
cls = ObjectBlock
return cls
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cdb901d18767c..af3906a3e5c45 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4092,11 +4092,9 @@ def _try_cast(arr, take_fast_path):
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
- # We don't allow casting to third party dtypes, since we don't
- # know what array belongs to which type.
- msg = ("Cannot cast data to extension dtype '{}'. "
- "Pass the extension array directly.".format(dtype))
- raise ValueError(msg)
+ # create an extension array from its dtype
+ array_type = dtype.construct_array_type()
+ subarr = array_type(subarr, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index c46f4b5ad9c18..2133d0c981b71 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -514,7 +514,6 @@ def _to_str_columns(self):
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
-
# may include levels names also
str_index = self._get_formatted_index(frame)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index eee53a2fcac6a..62e0f1cb717f0 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -7,10 +7,9 @@
from pandas import (
Series, Categorical, CategoricalIndex, IntervalIndex, date_range)
-from pandas.compat import string_types
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
- IntervalDtype, CategoricalDtype)
+ IntervalDtype, CategoricalDtype, registry)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
@@ -448,7 +447,7 @@ def test_construction_not_supported(self, subtype):
def test_construction_errors(self):
msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype('xx')
def test_construction_from_string(self):
@@ -458,14 +457,21 @@ def test_construction_from_string(self):
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
- 'foo', 'interval[foo]', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
+ 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
- if isinstance(string, string_types):
- error, msg = ValueError, 'could not construct IntervalDtype'
- else:
- error, msg = TypeError, 'a string needs to be passed, got type'
+ # these are invalid entirely
+ msg = 'a string needs to be passed, got type'
+
+ with tm.assert_raises_regex(TypeError, msg):
+ IntervalDtype.construct_from_string(string)
+
+ @pytest.mark.parametrize('string', [
+ 'interval[foo]'])
+ def test_construction_from_string_error_subtype(self, string):
+ # this is an invalid subtype
+ msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(error, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
@@ -767,3 +773,25 @@ def test_update_dtype_errors(self, bad_dtype):
msg = 'a CategoricalDtype must be passed to perform an update, '
with tm.assert_raises_regex(ValueError, msg):
dtype.update_dtype(bad_dtype)
+
+
+@pytest.mark.parametrize(
+ 'dtype',
+ [DatetimeTZDtype, CategoricalDtype,
+ PeriodDtype, IntervalDtype])
+def test_registry(dtype):
+ assert dtype in registry.dtypes
+
+
+@pytest.mark.parametrize(
+ 'dtype, expected',
+ [('int64', None),
+ ('interval', IntervalDtype()),
+ ('interval[int64]', IntervalDtype()),
+ ('interval[datetime64[ns]]', IntervalDtype('datetime64[ns]')),
+ ('category', CategoricalDtype()),
+ ('period[D]', PeriodDtype('D')),
+ ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))])
+def test_registry_find(dtype, expected):
+
+ assert registry.find(dtype) == expected
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 489a430bb4020..fdd2b99d9b3c7 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -45,3 +45,14 @@ def test_series_given_mismatched_index_raises(self, data):
msg = 'Length of passed values is 3, index implies 5'
with tm.assert_raises_regex(ValueError, msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
+
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ dtype = data.dtype
+
+ expected = pd.Series(data)
+ result = pd.Series(list(data), dtype=dtype)
+ self.assert_series_equal(result, expected)
+
+ result = pd.Series(list(data), dtype=str(dtype))
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 63d3d807c270c..52a12816c8722 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -1,3 +1,4 @@
+import pytest
import numpy as np
import pandas as pd
@@ -46,3 +47,10 @@ def test_eq_with_str(self, dtype):
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
+
+ def test_array_type(self, data, dtype):
+ assert dtype.construct_array_type() is type(data)
+
+ def test_array_type_with_arg(self, data, dtype):
+ with pytest.raises(NotImplementedError):
+ dtype.construct_array_type('foo')
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 8ef8debbdc666..69de0e1900831 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -40,6 +40,16 @@ def test_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
+ def test_repr_array(self, data):
+ # some arrays may be able to assert
+ # attributes in the repr
+ repr(data)
+
+ def test_repr_array_long(self, data):
+ # some arrays may be able to assert a ... in the repr
+ with pd.option_context('display.max_seq_items', 1):
+ repr(data)
+
def test_dtype_name_in_info(self, data):
buf = StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 23227867ee4d7..c660687f16590 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -19,7 +19,8 @@ def test_value_counts(self, all_data, dropna):
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
- expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
+ expected = pd.Series(other).value_counts(
+ dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index af26d83df3fe2..43b2702c72193 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -23,6 +23,11 @@ def test_isna(self, data_missing):
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
+ def test_dropna_array(self, data_missing):
+ result = data_missing.dropna()
+ expected = data_missing[[1]]
+ self.assert_extension_array_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index fe920a47ab740..c83726c5278a5 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -84,6 +84,7 @@ def test_concat_columns(self, data, na_value):
expected = pd.DataFrame({
'A': data._from_sequence(list(data[:3]) + [na_value]),
'B': [np.nan, 1, 2, 3]})
+
result = pd.concat([df1, df2], axis=1)
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['B']], axis=1)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index ae0d72c204d13..715e8bd40a2d0 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -62,7 +62,9 @@ def data_for_grouping():
class TestDtype(base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is Categorical
class TestInterface(base.BaseInterfaceTests):
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 3f2f24cd26af0..33adebbbe5780 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -16,6 +16,16 @@ class DecimalDtype(ExtensionDtype):
name = 'decimal'
na_value = decimal.Decimal('NaN')
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ return DecimalArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -28,7 +38,7 @@ def construct_from_string(cls, string):
class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin):
dtype = DecimalDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " +
@@ -44,7 +54,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 45ee7f227c4f0..8fd3d1a57f6c8 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -100,7 +100,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseDecimal, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is DecimalArray
class TestInterface(BaseDecimal, base.BaseInterfaceTests):
@@ -108,7 +110,11 @@ class TestInterface(BaseDecimal, base.BaseInterfaceTests):
class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseDecimal, base.BaseReshapingTests):
@@ -155,6 +161,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
pass
+# TODO(extension)
+@pytest.mark.xfail(reason=(
+ "raising AssertionError as this is not implemented, "
+ "though easy enough to do"))
def test_series_constructor_coerce_data_to_extension_dtype_raises():
xpr = ("Cannot cast data to extension dtype 'decimal'. Pass the "
"extension array directly.")
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index d3043bf0852d2..160bf259e1e32 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -32,6 +32,16 @@ class JSONDtype(ExtensionDtype):
# source compatibility with Py2.
na_value = {}
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ return JSONArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -44,7 +54,7 @@ def construct_from_string(cls, string):
class JSONArray(ExtensionArray):
dtype = JSONDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " +
@@ -59,7 +69,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 268134dc8c333..7eeaf7946663e 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -107,7 +107,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseJSON, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is JSONArray
class TestInterface(BaseJSON, base.BaseInterfaceTests):
@@ -130,7 +132,11 @@ def test_custom_asserts(self):
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
| precursor to #21160 | https://api.github.com/repos/pandas-dev/pandas/pulls/21185 | 2018-05-24T00:35:49Z | 2018-07-03T14:25:11Z | 2018-07-03T14:25:11Z | 2018-07-03T14:25:35Z |
BUG: Series.combine() fails with ExtensionArray inside of Series | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index c69de149a0f35..8d26c51ae6527 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -178,9 +178,18 @@ Reshaping
-
-
+ExtensionArray
+^^^^^^^^^^^^^^
+
+- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
+- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
+-
+-
+
Other
^^^^^
-
-
-
+-
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d8bdd9ac9ed22..0564cdbbb2014 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2204,7 +2204,7 @@ def _binop(self, other, func, level=None, fill_value=None):
result.name = None
return result
- def combine(self, other, func, fill_value=np.nan):
+ def combine(self, other, func, fill_value=None):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
@@ -2216,6 +2216,8 @@ def combine(self, other, func, fill_value=np.nan):
func : function
Function that takes two scalars as inputs and return a scalar
fill_value : scalar value
+ The default specifies to use the appropriate NaN value for
+ the underlying dtype of the Series
Returns
-------
@@ -2235,20 +2237,38 @@ def combine(self, other, func, fill_value=np.nan):
Series.combine_first : Combine Series values, choosing the calling
Series's values first
"""
+ if fill_value is None:
+ fill_value = na_value_for_dtype(self.dtype, compat=False)
+
if isinstance(other, Series):
+ # If other is a Series, result is based on union of Series,
+ # so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
- new_values = np.empty(len(new_index), dtype=self.dtype)
- for i, idx in enumerate(new_index):
+ new_values = []
+ for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
- new_values[i] = func(lv, rv)
+ new_values.append(func(lv, rv))
else:
+ # Assume that other is a scalar, so apply the function for
+ # each element in the Series
new_index = self.index
with np.errstate(all='ignore'):
- new_values = func(self._values, other)
+ new_values = [func(lv, other) for lv in self._values]
new_name = self.name
+
+ if is_categorical_dtype(self.values):
+ pass
+ elif is_extension_array_dtype(self.values):
+ # The function can return something of any type, so check
+ # if the type is compatible with the calling EA
+ try:
+ new_values = self._values._from_sequence(new_values)
+ except TypeError:
+ pass
+
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index c5436aa731d50..23227867ee4d7 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -103,3 +103,37 @@ def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
tm.assert_numpy_array_equal(l1, l2)
self.assert_extension_array_equal(u1, u2)
+
+ def test_combine_le(self, data_repeated):
+ # GH 20825
+ # Test that combine works when doing a <= (le) comparison
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 <= x2)
+ expected = pd.Series([a <= b for (a, b) in
+ zip(list(orig_data1), list(orig_data2))])
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 <= x2)
+ expected = pd.Series([a <= val for a in list(orig_data1)])
+ self.assert_series_equal(result, expected)
+
+ def test_combine_add(self, data_repeated):
+ # GH 20825
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
+ expected = pd.Series(
+ orig_data1._from_sequence([a + b for (a, b) in
+ zip(list(orig_data1),
+ list(orig_data2))]))
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 + x2)
+ expected = pd.Series(
+ orig_data1._from_sequence([a + val for a in list(orig_data1)]))
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 530a4e7a22a7a..61fdb8454b542 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -1,6 +1,7 @@
import string
import pytest
+import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
@@ -29,6 +30,15 @@ def data_missing():
return Categorical([np.nan, 'A'])
+@pytest.fixture
+def data_repeated():
+ """Return different versions of data for count times"""
+ def gen(count):
+ for _ in range(count):
+ yield Categorical(make_data())
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
return Categorical(['A', 'B', 'C'], categories=['C', 'A', 'B'],
@@ -154,6 +164,22 @@ class TestMethods(base.BaseMethodsTests):
def test_value_counts(self, all_data, dropna):
pass
+ def test_combine_add(self, data_repeated):
+ # GH 20825
+ # When adding categoricals in combine, result is a string
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
+ expected = pd.Series(([a + b for (a, b) in
+ zip(list(orig_data1), list(orig_data2))]))
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 + x2)
+ expected = pd.Series([a + val for a in list(orig_data1)])
+ self.assert_series_equal(result, expected)
+
class TestCasting(base.BaseCastingTests):
pass
diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index bbd31c4071b91..4bbbb7df2f399 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -30,6 +30,15 @@ def all_data(request, data, data_missing):
return data_missing
+@pytest.fixture
+def data_repeated():
+ """Return different versions of data for count times"""
+ def gen(count):
+ for _ in range(count):
+ yield NotImplementedError
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
"""Length-3 array with a known sort order.
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 90f0181beab0d..cc6fadc483d5e 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -28,7 +28,9 @@ class DecimalArray(ExtensionArray):
dtype = DecimalDtype()
def __init__(self, values):
- assert all(isinstance(v, decimal.Decimal) for v in values)
+ for val in values:
+ if not isinstance(val, self.dtype.type):
+ raise TypeError
values = np.asarray(values, dtype=object)
self._data = values
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 1f8cf0264f62f..f74b4d7e94f11 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -25,6 +25,14 @@ def data_missing():
return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)])
+@pytest.fixture
+def data_repeated():
+ def gen(count):
+ for _ in range(count):
+ yield DecimalArray(make_data())
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
return DecimalArray([decimal.Decimal('1'),
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index b7ac8033f3f6d..85a282ae4007f 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -187,6 +187,14 @@ def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super(TestMethods, self).test_sort_values_missing(
data_missing_for_sorting, ascending)
+ @pytest.mark.skip(reason="combine for JSONArray not supported")
+ def test_combine_le(self, data_repeated):
+ pass
+
+ @pytest.mark.skip(reason="combine for JSONArray not supported")
+ def test_combine_add(self, data_repeated):
+ pass
+
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.xfail
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 6cf60e818c845..f35cce6ac9d71 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -60,6 +60,19 @@ def test_append_duplicates(self):
with tm.assert_raises_regex(ValueError, msg):
pd.concat([s1, s2], verify_integrity=True)
+ def test_combine_scalar(self):
+ # GH 21248
+ # Note - combine() with another Series is tested elsewhere because
+ # it is used when testing operators
+ s = pd.Series([i * 10 for i in range(5)])
+ result = s.combine(3, lambda x, y: x + y)
+ expected = pd.Series([i * 10 + 3 for i in range(5)])
+ tm.assert_series_equal(result, expected)
+
+ result = s.combine(22, lambda x, y: min(x, y))
+ expected = pd.Series([min(i * 10, 22) for i in range(5)])
+ tm.assert_series_equal(result, expected)
+
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
| - [x] closes #20825
closes #21248
- [x] tests added / passed
- extension/category/test_categorical.py:test_combine
- extension/decimal/test_decimal.py:test_combine
- series/test_combine_concat.py:test_combine_scalar
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- modified `whatsnew.v0.24.0.txt`
This is a split of #20889 to just fix the `combine` issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21183 | 2018-05-23T18:39:32Z | 2018-06-08T11:34:34Z | 2018-06-08T11:34:33Z | 2018-06-08T13:53:54Z |
BUG: Should not raises errors in .set_names and comparison methods for MultiIndex with nlevels == 1 (GH21149) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index a071d7f3f5534..99e968d908182 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -78,6 +78,8 @@ Indexing
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
+- Bug in :meth:`MultiIndex.set_names` where error was raised for a MultiIndex with self.nlevels == 1 (:issue: `21149`)
+- Bug in comparison operations for :class:`MultiIndex` where error was raised on equality / inequality comparison involving a MultiIndex with self.nlevels == 1 (:issue: `21149`)
-
I/O
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f79288c167356..2a02e901b1002 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -96,10 +96,13 @@ def cmp_method(self, other):
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
+ from .multi import MultiIndex
if is_object_dtype(self) and self.nlevels == 1:
- # don't pass MultiIndex
- with np.errstate(all='ignore'):
- result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
+ if not isinstance(self, MultiIndex):
+ # don't pass MultiIndex
+ with np.errstate(all='ignore'):
+ result = ops._comp_method_OBJECT_ARRAY(op,
+ self.values, other)
else:
@@ -1383,8 +1386,8 @@ def set_names(self, names, level=None, inplace=False):
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
-
- if level is not None and self.nlevels == 1:
+ from .multi import MultiIndex
+ if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index c9f6bc9151d00..9c45c8188c3df 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -3291,3 +3291,37 @@ def test_duplicate_multiindex_labels(self):
with pytest.raises(ValueError):
ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
+
+ testdata = [
+ (pd.MultiIndex.from_product([[0, 1], [1, 0]]), pd.Series(range(4)), 4),
+ (pd.MultiIndex.from_product([[0, 1]]), pd.Series(range(2)), 2),
+ ]
+
+ @pytest.mark.parametrize("midx,idx,count", testdata)
+ def test_multiindex_compare(self, midx, idx, count):
+ # GH 21149 - change in 'def cmp_method()'
+ expected = pd.Series([True]).repeat(count)
+ expected.reset_index(drop=True, inplace=True)
+ result = pd.Series(midx == midx)
+ tm.assert_series_equal(result, expected)
+ result = (idx == idx)
+ tm.assert_series_equal(result, expected)
+
+ expected = pd.Series([False]).repeat(count)
+ expected.reset_index(drop=True, inplace=True)
+ result = pd.Series(midx > midx)
+ tm.assert_series_equal(result, expected)
+ result = pd.Series(midx == idx)
+ tm.assert_series_equal(result, expected)
+
+ with tm.assert_raises_regex(TypeError, 'not supported'):
+ midx > idx
+
+ def test_multiindex_set_names(self):
+ # GH 21149 - change in 'def set_names()'
+ result = pd.MultiIndex.from_product([[0, 1]])
+ result.set_names('first', level=0, inplace=True)
+ expected = pd.MultiIndex(levels=[[0, 1]],
+ labels=[[0, 1]],
+ names=['first'])
+ tm.assert_index_equal(result, expected)
| - [x] closes #21149
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21182 | 2018-05-23T18:28:20Z | 2018-05-24T18:35:43Z | null | 2018-05-24T18:40:05Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.