title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Bug in read_csv when using nrows or chunksize on a file containing only a header | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 8320f3cbc8e76..582b7f90b75d5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -653,6 +653,8 @@ Bug Fixes
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`)
+- Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`)
+
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 275c765c4cb92..9ad992c434984 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -802,6 +802,8 @@ def __init__(self, kwds):
self._name_processed = False
+ self._first_chunk = True
+
@property
def _has_complex_date_col(self):
return (isinstance(self.parse_dates, dict) or
@@ -1164,14 +1166,11 @@ def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
- if self.as_recarray:
- # what to do if there are leading columns?
- return self._reader.read(nrows)
-
try:
data = self._reader.read(nrows)
except StopIteration:
- if nrows is None:
+ if self._first_chunk:
+ self._first_chunk = False
return _get_empty_meta(self.orig_names,
self.index_col,
self.index_names,
@@ -1179,6 +1178,13 @@ def read(self, nrows=None):
else:
raise
+ # Done with first read, next time raise StopIteration
+ self._first_chunk = False
+
+ if self.as_recarray:
+ # what to do if there are leading columns?
+ return data
+
names = self.names
if self._reader.leading_cols:
@@ -1454,7 +1460,6 @@ def __init__(self, f, **kwds):
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
- self._first_chunk = True
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index facbff5e047db..feab6a9e82125 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2415,6 +2415,25 @@ def test_int64_overflow(self):
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
+ def test_empty_with_nrows_chunksize(self):
+ # GH 9535
+ expected = pd.DataFrame([], columns=['foo', 'bar'])
+
+ result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
+ tm.assert_frame_equal(result, expected)
+
+ result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
+ tm.assert_frame_equal(result, expected)
+
+ result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
+ result = pd.DataFrame(result[2], columns=result[1], index=result[0])
+ tm.assert_frame_equal(pd.DataFrame.from_records(result), expected)
+
+ result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
+ result = pd.DataFrame(result[2], columns=result[1], index=result[0])
+ tm.assert_frame_equal(pd.DataFrame.from_records(result), expected)
+
+
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
| Fixes GH #9535
| https://api.github.com/repos/pandas-dev/pandas/pulls/10827 | 2015-08-15T16:45:00Z | 2015-08-15T22:22:36Z | 2015-08-15T22:22:36Z | 2015-09-19T00:38:10Z |
BUG: Merge with empty dataframe may raise IndexError | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7e69a8044a305..2e39ef5f0ce76 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -660,6 +660,8 @@ Bug Fixes
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`)
+- Bug in ``merge`` with empty ``DataFrame`` may raise ``IndexError`` (:issue:`10824`)
+
- Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 5ee774635e59e..a8b0d37b55bfe 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -217,6 +217,9 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
if left_indexer is not None and right_indexer is not None:
if name in self.left:
+ if len(self.left) == 0:
+ continue
+
na_indexer = (left_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
@@ -226,6 +229,9 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
elif name in self.right:
+ if len(self.right) == 0:
+ continue
+
na_indexer = (right_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
@@ -270,9 +276,17 @@ def _get_join_info(self):
sort=self.sort, how=self.how)
if self.right_index:
- join_index = self.left.index.take(left_indexer)
+ if len(self.left) > 0:
+ join_index = self.left.index.take(left_indexer)
+ else:
+ join_index = self.right.index.take(right_indexer)
+ left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
- join_index = self.right.index.take(right_indexer)
+ if len(self.right) > 0:
+ join_index = self.right.index.take(right_indexer)
+ else:
+ join_index = self.left.index.take(left_indexer)
+ right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 236157d028db3..ee83b9632bd4b 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -737,6 +737,88 @@ def test_left_merge_empty_dataframe(self):
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
+ def test_merge_left_empty_right_empty(self):
+ # GH 10824
+ left = pd.DataFrame([], columns=['a', 'b', 'c'])
+ right = pd.DataFrame([], columns=['x', 'y', 'z'])
+
+ exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
+ dtype=object)
+
+ for kwarg in [dict(left_index=True, right_index=True),
+ dict(left_index=True, right_on='x'),
+ dict(left_on='a', right_index=True),
+ dict(left_on='a', right_on='x')]:
+
+ result = pd.merge(left, right, how='inner', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+ result = pd.merge(left, right, how='left', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+ result = pd.merge(left, right, how='right', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+ result = pd.merge(left, right, how='outer', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+
+ def test_merge_left_empty_right_notempty(self):
+ # GH 10824
+ left = pd.DataFrame([], columns=['a', 'b', 'c'])
+ right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=['x', 'y', 'z'])
+
+ exp_out = pd.DataFrame({'a': np.array([np.nan]*3, dtype=object),
+ 'b': np.array([np.nan]*3, dtype=object),
+ 'c': np.array([np.nan]*3, dtype=object),
+ 'x': [1, 4, 7],
+ 'y': [2, 5, 8],
+ 'z': [3, 6, 9]},
+ columns=['a', 'b', 'c', 'x', 'y', 'z'])
+ exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
+
+ for kwarg in [dict(left_index=True, right_index=True),
+ dict(left_index=True, right_on='x'),
+ dict(left_on='a', right_index=True),
+ dict(left_on='a', right_on='x')]:
+
+ result = pd.merge(left, right, how='inner', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+ result = pd.merge(left, right, how='left', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+
+ result = pd.merge(left, right, how='right', **kwarg)
+ tm.assert_frame_equal(result, exp_out)
+ result = pd.merge(left, right, how='outer', **kwarg)
+ tm.assert_frame_equal(result, exp_out)
+
+ def test_merge_left_notempty_right_empty(self):
+ # GH 10824
+ left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=['a', 'b', 'c'])
+ right = pd.DataFrame([], columns=['x', 'y', 'z'])
+
+ exp_out = pd.DataFrame({'a': [1, 4, 7],
+ 'b': [2, 5, 8],
+ 'c': [3, 6, 9],
+ 'x': np.array([np.nan]*3, dtype=object),
+ 'y': np.array([np.nan]*3, dtype=object),
+ 'z': np.array([np.nan]*3, dtype=object)},
+ columns=['a', 'b', 'c', 'x', 'y', 'z'])
+ exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
+
+ for kwarg in [dict(left_index=True, right_index=True),
+ dict(left_index=True, right_on='x'),
+ dict(left_on='a', right_index=True),
+ dict(left_on='a', right_on='x')]:
+
+ result = pd.merge(left, right, how='inner', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+ result = pd.merge(left, right, how='right', **kwarg)
+ tm.assert_frame_equal(result, exp_in)
+
+ result = pd.merge(left, right, how='left', **kwarg)
+ tm.assert_frame_equal(result, exp_out)
+ result = pd.merge(left, right, how='outer', **kwarg)
+ tm.assert_frame_equal(result, exp_out)
+
def test_merge_nosort(self):
# #2098, anything to do?
| Closes #10824.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10826 | 2015-08-15T14:25:10Z | 2015-08-20T12:54:55Z | 2015-08-20T12:54:55Z | 2015-08-20T13:48:48Z |
Fix handling of EOF in 'c' csv parser | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 18c39ccf820eb..6040cdbe70218 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -649,6 +649,7 @@ Bug Fixes
- Bug in ``Categorical`` may not representing properly when category contains ``tz`` or ``Period`` (:issue:`10713`)
- Bug in ``Categorical.__iter__`` may not returning correct ``datetime`` and ``Period`` (:issue:`10713`)
+- Bug in ``read_csv`` with ``engine='c'``: EOF preceded by a comment, blank line, etc. was not handled correctly (:issue:`10728`, :issue:`10548`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index feab6a9e82125..ed261edad4f20 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2433,6 +2433,75 @@ def test_empty_with_nrows_chunksize(self):
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(result), expected)
+ def test_eof_states(self):
+ # GH 10728 and 10548
+
+ ## With skip_blank_lines = True
+ expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
+
+ # GH 10728
+ # WHITESPACE_LINE
+ data = 'a,b,c\n4,5,6\n '
+ result = self.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+ # GH 10548
+ # EAT_LINE_COMMENT
+ data = 'a,b,c\n4,5,6\n#comment'
+ result = self.read_csv(StringIO(data), comment='#')
+ tm.assert_frame_equal(result, expected)
+
+ # EAT_CRNL_NOP
+ data = 'a,b,c\n4,5,6\n\r'
+ result = self.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+ # EAT_COMMENT
+ data = 'a,b,c\n4,5,6#comment'
+ result = self.read_csv(StringIO(data), comment='#')
+ tm.assert_frame_equal(result, expected)
+
+ # SKIP_LINE
+ data = 'a,b,c\n4,5,6\nskipme'
+ result = self.read_csv(StringIO(data), skiprows=[2])
+ tm.assert_frame_equal(result, expected)
+
+ ## With skip_blank_lines = False
+
+ # EAT_LINE_COMMENT
+ data = 'a,b,c\n4,5,6\n#comment'
+ result = self.read_csv(StringIO(data), comment='#', skip_blank_lines=False)
+ expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
+ tm.assert_frame_equal(result, expected)
+
+ # IN_FIELD
+ data = 'a,b,c\n4,5,6\n '
+ result = self.read_csv(StringIO(data), skip_blank_lines=False)
+ expected = pd.DataFrame([['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
+ tm.assert_frame_equal(result, expected)
+
+ # EAT_CRNL
+ data = 'a,b,c\n4,5,6\n\r'
+ result = self.read_csv(StringIO(data), skip_blank_lines=False)
+ expected = pd.DataFrame([[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
+ tm.assert_frame_equal(result, expected)
+
+ ## Should produce exceptions
+
+ # ESCAPED_CHAR
+ data = "a,b,c\n4,5,6\n\\"
+ self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\')
+
+ # ESCAPE_IN_QUOTED_FIELD
+ data = 'a,b,c\n4,5,6\n"\\'
+ self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\')
+
+ # IN_QUOTED_FIELD
+ # Python 2.6 won't throw an exception for this case (see http://bugs.python.org/issue16013)
+ tm._skip_if_python26()
+ data = 'a,b,c\n4,5,6\n"'
+ self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\')
+
class TestPythonParser(ParserTests, tm.TestCase):
diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c
index 3be17f17d6afa..9d81bc9c37b8d 100644
--- a/pandas/src/parser/tokenizer.c
+++ b/pandas/src/parser/tokenizer.c
@@ -1413,9 +1413,9 @@ int tokenize_whitespace(parser_t *self, size_t line_limit)
self->state = EAT_CRNL;
break;
} else if (IS_WHITESPACE(c)) {
- /*if (self->skip_empty_lines)
+ if (self->skip_empty_lines)
self->state = WHITESPACE_LINE;
- else*/
+ else
self->state = EAT_WHITESPACE;
break;
} else if (c == self->commentchar) {
@@ -1643,34 +1643,44 @@ int tokenize_whitespace(parser_t *self, size_t line_limit)
static int parser_handle_eof(parser_t *self) {
TRACE(("handling eof, datalen: %d, pstate: %d\n", self->datalen, self->state))
- if (self->datalen == 0 && (self->state != START_RECORD)) {
- // test cases needed here
- // TODO: empty field at end of line
- TRACE(("handling eof\n"));
- if (self->state == IN_FIELD || self->state == START_FIELD) {
- if (end_field(self) < 0)
- return -1;
- } else if (self->state == QUOTE_IN_QUOTED_FIELD) {
- if (end_field(self) < 0)
- return -1;
- } else if (self->state == IN_QUOTED_FIELD) {
- self->error_msg = (char*) malloc(100);
- sprintf(self->error_msg, "EOF inside string starting at line %d",
- self->file_lines);
- return -1;
- }
+ if (self->datalen != 0)
+ return -1;
- if (end_line(self) < 0)
+ switch (self->state) {
+ case START_RECORD:
+ case WHITESPACE_LINE:
+ case EAT_CRNL_NOP:
+ case EAT_LINE_COMMENT:
+ return 0;
+
+ case ESCAPE_IN_QUOTED_FIELD:
+ case IN_QUOTED_FIELD:
+ self->error_msg = (char*)malloc(100);
+ sprintf(self->error_msg, "EOF inside string starting at line %d",
+ self->file_lines);
+ return -1;
+
+ case ESCAPED_CHAR:
+ self->error_msg = (char*)malloc(100);
+ sprintf(self->error_msg, "EOF following escape character");
+ return -1;
+
+ case IN_FIELD:
+ case START_FIELD:
+ case QUOTE_IN_QUOTED_FIELD:
+ if (end_field(self) < 0)
return -1;
+ break;
- return 0;
- }
- else if (self->datalen == 0 && (self->state == START_RECORD)) {
- return 0;
+ default:
+ break;
}
- return -1;
+ if (end_line(self) < 0)
+ return -1;
+ else
+ return 0;
}
int parser_consume_rows(parser_t *self, size_t nrows) {
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 4b7c8d4540e0f..e3633a1ec4360 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -241,6 +241,12 @@ def _skip_if_no_cday():
raise nose.SkipTest("CustomBusinessDay not available.")
+def _skip_if_python26():
+ if sys.version_info[:2] == (2, 6):
+ import nose
+ raise nose.SkipTest("skipping on python2.6")
+
+
#------------------------------------------------------------------------------
# locale utilities
| Fixes GH #10728
fixes #10548.
Also fixes:
- '\r' followed by EOF should be considered a blank line
- Escape character followed by EOF should produce an exception
- Line containing only whitespace should be skipped if `skip_blank_lines` and `delim_whitespace` are both `True`
| https://api.github.com/repos/pandas-dev/pandas/pulls/10825 | 2015-08-15T14:23:49Z | 2015-08-17T11:09:50Z | 2015-08-17T11:09:50Z | 2015-09-19T00:38:10Z |
API: Series.sum() will now return 0.0 for all-NaN series | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 13764543ec665..8078e459f2ee2 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -551,6 +551,20 @@ Other API Changes
- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`)
+- ``Series.sum()`` will now return 0.0, and ``Series.prod()`` will return 1.0 for all-NaN series rather than ``NaN``; this is for compat with ``numpy`` >= 1.8.2 and ``bottleneck`` >= 1.0 (:issue:`9422`).
+
+ .. ipython:: python
+
+ s = Series([np.nan])
+ s.sum()
+ s.sum(skipna=False)
+ s.prod()
+ s.prod(skipna=False)
+
+ .. warning::
+
+ ``bottleneck`` is used for these calculations. If you have ``bottleneck`` < 1.0, then these will all return ``NaN``.
+
.. _whatsnew_0170.deprecations:
Deprecations
diff --git a/pandas/__init__.py b/pandas/__init__.py
index dbc697410da80..61ced12a36ae1 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -27,6 +27,7 @@
_np_version = np.version.short_version
_np_version_under1p8 = LooseVersion(_np_version) < '1.8'
_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
+_np_version_under1p10 = LooseVersion(_np_version) < '1.10'
from pandas.info import __doc__
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index d23cb39c15548..baefc91a9fb5b 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -102,11 +102,11 @@ class SpecificationError(GroupByError):
def _groupby_function(name, alias, npfunc, numeric_only=True,
- _convert=False):
+ fillna=None, _convert=False):
def f(self):
self._set_selection_from_grouper()
try:
- return self._cython_agg_general(alias, numeric_only=numeric_only)
+ return self._cython_agg_general(alias, numeric_only=numeric_only, fillna=fillna)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
@@ -793,8 +793,8 @@ def size(self):
"""
return self.grouper.size()
- sum = _groupby_function('sum', 'add', np.sum)
- prod = _groupby_function('prod', 'prod', np.prod)
+ sum = _groupby_function('sum', 'add', np.sum, fillna=0.0)
+ prod = _groupby_function('prod', 'prod', np.prod, fillna=1.0)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
@@ -1118,7 +1118,7 @@ def _try_cast(self, result, obj):
return result
- def _cython_agg_general(self, how, numeric_only=True):
+ def _cython_agg_general(self, how, numeric_only=True, fillna=None):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
@@ -1126,7 +1126,7 @@ def _cython_agg_general(self, how, numeric_only=True):
continue
try:
- result, names = self.grouper.aggregate(obj.values, how)
+ result, names = self.grouper.aggregate(obj.values, how, fillna=fillna)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
@@ -1511,7 +1511,7 @@ def wrapper(*args, **kwargs):
(how, dtype_str))
return func, dtype_str
- def aggregate(self, values, how, axis=0):
+ def aggregate(self, values, how, axis=0, fillna=None):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
@@ -1534,14 +1534,18 @@ def aggregate(self, values, how, axis=0):
values = values.view('int64')
# GH 7754
is_numeric = True
+ fillna = None
elif is_bool_dtype(values.dtype):
values = _algos.ensure_float64(values)
+ fillna = None
elif com.is_integer_dtype(values):
values = values.astype('int64', copy=False)
+ fillna = None
elif is_numeric:
values = _algos.ensure_float64(values)
else:
values = values.astype(object)
+ fillna = None
try:
agg_func, dtype_str = self._get_aggregate_function(how, values)
@@ -1564,6 +1568,10 @@ def aggregate(self, values, how, axis=0):
result = self._aggregate(result, counts, values, agg_func, is_numeric)
+ # if we have a non-None fillna, then replace
+ if fillna is not None:
+ result[np.isnan(result)] = fillna
+
if com.is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
@@ -2581,8 +2589,8 @@ def _iterate_slices(self):
continue
yield val, slicer(val)
- def _cython_agg_general(self, how, numeric_only=True):
- new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
+ def _cython_agg_general(self, how, numeric_only=True, fillna=None):
+ new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only, fillna=fillna)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
@@ -2608,7 +2616,7 @@ def _wrap_agged_blocks(self, items, blocks):
_block_agg_axis = 0
- def _cython_agg_blocks(self, how, numeric_only=True):
+ def _cython_agg_blocks(self, how, numeric_only=True, fillna=None):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
@@ -2620,7 +2628,7 @@ def _cython_agg_blocks(self, how, numeric_only=True):
values = block._try_operate(block.values)
- result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
+ result, _ = self.grouper.aggregate(values, how, axis=agg_axis, fillna=fillna)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index c70fb6339517d..d004ab26d1424 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -9,7 +9,7 @@
_USE_BOTTLENECK = False
import pandas.hashtable as _hash
-from pandas import compat, lib, algos, tslib
+from pandas import compat, lib, algos, tslib, _np_version_under1p10
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
@@ -243,12 +243,14 @@ def nanall(values, axis=None, skipna=True):
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
+ dtype = values.dtype
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
the_sum = values.sum(axis, dtype=dtype_sum)
- the_sum = _maybe_null_out(the_sum, axis, mask)
+ the_sum = _maybe_null_out(the_sum, axis, mask, allow_all_null=not skipna,
+ dtype=dtype, fill_value=0)
return _wrap_results(the_sum, dtype)
@@ -549,12 +551,14 @@ def nankurt(values, axis=None, skipna=True):
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
+ dtype = values.dtype
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
- return _maybe_null_out(result, axis, mask)
+ return _maybe_null_out(result, axis, mask, allow_all_null=not skipna, dtype=dtype,
+ fill_value=1)
def _maybe_arg_null_out(result, axis, mask, skipna):
@@ -588,7 +592,29 @@ def _get_counts(mask, axis, dtype=float):
return np.array(count, dtype=dtype)
-def _maybe_null_out(result, axis, mask):
+def _maybe_null_out(result, axis, mask, allow_all_null=True, dtype=None, fill_value=None):
+
+
+ # 9422
+ # if we have all nulls we normally return a
+ # null, but for numpy >= 1.8.2 and bottleneck >= 1.0
+ # nansum/nanprod are set to be the fill_values
+ if not allow_all_null and dtype is not None:
+
+ if is_complex_dtype(dtype) or not is_float_dtype(dtype):
+
+ # we don't mask complex
+ # object or non-floats
+ # if numpy changes this, we will as well
+
+ # IOW, np.nansum(np.array([np.nan],dtype='object')) is np.nan
+ # https://github.com/numpy/numpy/issues/6209
+ allow_all_null = True
+ fill_value = np.nan
+
+ else:
+ fill_value = np.nan
+
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
@@ -596,11 +622,19 @@ def _maybe_null_out(result, axis, mask):
result = result.astype('c16')
else:
result = result.astype('f8')
+
+ # mark nans
result[null_mask] = np.nan
+
+ # masker if for only all nan
+ if not allow_all_null:
+ null_mask = mask.all(axis)
+ if null_mask.any():
+ result[null_mask] = fill_value
else:
null_mask = mask.size - mask.sum()
- if null_mask == 0:
- result = np.nan
+ if null_mask == 0 and (mask.size > 0 or allow_all_null):
+ result = fill_value
return result
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 465f1da05ebde..f6004737ab97d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12230,10 +12230,10 @@ def test_count(self):
assert_series_equal(result, expected)
def test_sum(self):
- self._check_stat_op('sum', np.sum, has_numeric_only=True)
+ self._check_stat_op('sum', np.sum, has_numeric_only=True, fillna=0.0)
# mixed types (with upcasting happening)
- self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),
+ self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'), fillna=0.0,
has_numeric_only=True, check_dtype=False, check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
@@ -12247,23 +12247,32 @@ def test_stat_operators_attempt_obj_array(self):
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
+ fills = [0.0, np.nan, 1.0, np.nan, np.nan, np.nan, np.nan, np.nan]
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
- for meth in methods:
+ for meth, fill in zip(methods, fills):
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
+
+ # 9422
+ # all-NaN object array is still NaN, while floats are not :<
expected = getattr(df.astype('f8'), meth)(1)
+ if not np.isnan(fill):
+ mask = df.isnull().all(1)
+ if mask.any():
+ expected[mask] = np.nan
+
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
- self._check_stat_op('product', np.prod)
+ self._check_stat_op('product', np.prod, fillna=1.0)
def test_median(self):
def wrapper(x):
@@ -12435,7 +12444,7 @@ def alt(x):
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True, check_dates=False,
- check_less_precise=False):
+ check_less_precise=False, fillna=None):
if frame is None:
frame = self.frame
# set some NAs
@@ -12478,11 +12487,20 @@ def wrapper(x):
wrapper = alternative
result0 = f(axis=0)
- result1 = f(axis=1)
- assert_series_equal(result0, frame.apply(skipna_wrapper),
+ expected0 = frame.apply(skipna_wrapper)
+ assert_series_equal(result0, expected0,
check_dtype=check_dtype,
check_less_precise=check_less_precise)
- assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
+
+ result1 = f(axis=1)
+
+ # 9422
+ # all-nan rows get the fillna
+ expected1 = frame.apply(skipna_wrapper, axis=1)
+ if fillna is not None:
+ expected1[isnull(frame).all(axis=1)] = fillna
+
+ assert_series_equal(result1, expected1,
check_dtype=False,
check_less_precise=check_less_precise)
@@ -12513,8 +12531,14 @@ def wrapper(x):
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
- self.assertTrue(np.isnan(r0).all())
- self.assertTrue(np.isnan(r1).all())
+
+ # 9422
+ if fillna is not None:
+ self.assertTrue((r0==fillna).all())
+ self.assertTrue((r1==fillna).all())
+ else:
+ self.assertTrue(np.isnan(r0).all())
+ self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index a903b76b3ac7f..36aa2d833024f 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -4,18 +4,28 @@
from functools import partial
import numpy as np
-from pandas import Series
+from pandas import Series, _np_version_under1p10
+
from pandas.core.common import isnull, is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
-nanops._USE_BOTTLENECK = False
+_USE_BOTTLENECK = nanops._USE_BOTTLENECK
+class Base(object):
-class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
+ nanops._USE_BOTTLENECK = False
np.random.seed(11235)
+ def tearDown(self):
+ nanops._USE_BOTTLENECK = _USE_BOTTLENECK
+
+class TestnanopsDataFrame(Base, tm.TestCase):
+
+ def setUp(self):
+ super(TestnanopsDataFrame, self).setUp()
+
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
@@ -172,15 +182,29 @@ def _coerce_tds(targ, res):
tm.assert_almost_equal(targ.imag, res.imag)
def check_fun_data(self, testfunc, targfunc,
- testarval, targarval, targarnanval, **kwargs):
+ testarval, targarval, targarnanval, nanfunc=None, **kwargs):
+
+ otargfunc = targfunc
for axis in list(range(targarval.ndim))+[None]:
for skipna in [False, True]:
+
targartempval = targarval if skipna else targarnanval
try:
+
+ # we need a different comp function if
+ # we have a provided nanfunc (e.g. nansum)
+ # and we are skipna=False
+ if nanfunc is not None:
+ if skipna:
+ targfunc = nanfunc
+ else:
+ targfunc = otargfunc
+
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis)
+
if skipna:
res = testfunc(testarval, axis=axis)
self.check_results(targ, res, axis)
@@ -205,9 +229,9 @@ def check_fun_data(self, testfunc, targfunc,
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
- self.check_fun_data(testfunc, targfunc,
+ self.check_fun_data(testfunc, otargfunc,
testarval2, targarval2, targarnanval2,
- **kwargs)
+ nanfunc=nanfunc, **kwargs)
def check_fun(self, testfunc, targfunc,
testar, targar=None, targarnan=None,
@@ -228,7 +252,7 @@ def check_fun(self, testfunc, targfunc,
'targarnan: %s' % targarnan)
raise
- def check_funs(self, testfunc, targfunc,
+ def check_funs(self, testfunc, targfunc, nanfunc=None,
allow_complex=True, allow_all_nan=True, allow_str=True,
allow_date=True, allow_tdelta=True, allow_obj=True,
**kwargs):
@@ -242,7 +266,7 @@ def check_funs(self, testfunc, targfunc,
self.arr_bool.astype('O')]
if allow_all_nan:
- self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
+ self.check_fun(testfunc, targfunc, 'arr_nan', nanfunc=nanfunc, **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
@@ -315,9 +339,15 @@ def test_nanall(self):
allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
- self.check_funs(nanops.nansum, np.sum,
+ self.check_funs(nanops.nansum, np.sum, nanfunc=np.nansum,
allow_str=False, allow_date=False, allow_tdelta=True)
+ # validate that nansum of all nans is 0, True for numpy >= 1.8.2 & bottleneck >= 1.0
+ # 9422
+ s = Series([np.nan])
+ self.assertEqual(s.sum(skipna=True),0.0)
+ self.assertIs(s.sum(skipna=False),np.nan)
+
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean,
allow_complex=False, allow_obj=False,
@@ -450,9 +480,30 @@ def test_nankurt(self):
allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
- self.check_funs(nanops.nanprod, np.prod,
+
+ # use nanprod if it exists
+ # otherwise by construction
+ nanfunc = getattr(np,'nanprod',None)
+ if nanfunc is None:
+ def nanprod(x, axis, **kwargs):
+ result = x.prod(axis=axis)
+ if np.isnan(result).all():
+ if np.isscalar(result):
+ result = 1
+ else:
+ result[np.isnan(result)] = 1
+ return result
+ nanfunc = nanprod
+
+ self.check_funs(nanops.nanprod, np.prod, nanfunc=nanfunc,
allow_str=False, allow_date=False, allow_tdelta=False)
+ # validate that nanprod of all nans is 1.0
+ # 9422
+ s = Series([np.nan])
+ self.assertEqual(s.prod(skipna=True),1.0)
+ self.assertIs(s.prod(skipna=False),np.nan)
+
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d,
**kwargs)
@@ -769,7 +820,8 @@ def test__bn_ok_dtype(self):
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
-class TestEnsureNumeric(tm.TestCase):
+class TestEnsureNumeric(Base, tm.TestCase):
+
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 9cdc769dd7d74..e504a63490f0b 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -70,13 +70,13 @@ def test_count(self):
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
- self._check_stat_op('sum', np.sum)
+ self._check_stat_op('sum', np.sum, fillna=0.0)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
- self._check_stat_op('prod', np.prod)
+ self._check_stat_op('prod', np.prod, fillna=1.0)
def test_median(self):
def wrapper(x):
@@ -139,7 +139,7 @@ def alt(x):
# self._check_stat_op('skew', alt)
- def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
+ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, fillna=None):
if obj is None:
obj = self.panel
@@ -161,14 +161,22 @@ def wrapper(x):
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
- assert_frame_equal(result, obj.apply(wrapper, axis=i))
+ expected = obj.apply(wrapper, axis=i)
+ assert_frame_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
- assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
+ expected = obj.apply(skipna_wrapper, axis=i)
+
+ # 9422
+ # all-nan rows get the fillna
+ if fillna is not None:
+ expected[isnull(obj).all(axis=i)] = fillna
+
+ assert_frame_equal(result, expected)
self.assertRaises(Exception, f, axis=obj.ndim)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 56146df37a27f..c99b08c8ea99c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2762,7 +2762,9 @@ def testit():
self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
self.assertEqual(f(allna),0.0)
except:
- self.assertTrue(np.isnan(f(allna)))
+
+ # 10815 pandas does as well
+ self.assertEqual(f(allna),0.0)
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
| compat with `numpy` >= 1.8.2 and `bottleneck` >= 1.0, #9422
note that passing skipna=False will still return a NaN
| https://api.github.com/repos/pandas-dev/pandas/pulls/10815 | 2015-08-13T15:29:06Z | 2015-09-03T14:19:26Z | null | 2022-10-13T00:16:46Z |
Deprecate combineAdd and combineMult (GH10735) | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 1cbe55ddbacb6..d9a8cc0d7cde0 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -799,9 +799,7 @@ Binary operator functions
DataFrame.ne
DataFrame.eq
DataFrame.combine
- DataFrame.combineAdd
DataFrame.combine_first
- DataFrame.combineMult
Function application, GroupBy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 11ba56eef5264..6074f639124fa 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -532,7 +532,11 @@ Deprecations
===================== =================================
- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
-- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was removed in favor of ``keep``. (:issue:`6511`, :issue:`8505`)
+- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`)
+- ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They
+ can easily be replaced by using the ``add`` and ``mul`` methods:
+ ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)``
+ (:issue:`10735`).
.. _whatsnew_0170.prior_deprecations:
@@ -633,5 +637,5 @@ Bug Fixes
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
-- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
+- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fe9c9bece1f79..062cbe579785c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4900,6 +4900,8 @@ def isin(self, values):
def combineAdd(self, other):
"""
+ DEPRECATED. Use ``DataFrame.add(other, fill_value=0.)`` instead.
+
Add two DataFrame objects and do not propagate
NaN values, so if for a (column, time) one frame is missing a
value, it will default to the other frame's value (which might
@@ -4912,11 +4914,21 @@ def combineAdd(self, other):
Returns
-------
DataFrame
+
+ See also
+ --------
+ DataFrame.add
+
"""
+ warnings.warn("'combineAdd' is deprecated. Use "
+ "'DataFrame.add(other, fill_value=0.)' instead",
+ FutureWarning, stacklevel=2)
return self.add(other, fill_value=0.)
def combineMult(self, other):
"""
+ DEPRECATED. Use ``DataFrame.mul(other, fill_value=1.)`` instead.
+
Multiply two DataFrame objects and do not propagate NaN values, so if
for a (column, time) one frame is missing a value, it will default to
the other frame's value (which might be NaN as well)
@@ -4928,7 +4940,15 @@ def combineMult(self, other):
Returns
-------
DataFrame
+
+ See also
+ --------
+ DataFrame.mul
+
"""
+ warnings.warn("'combineMult' is deprecated. Use "
+ "'DataFrame.mul(other, fill_value=1.)' instead",
+ FutureWarning, stacklevel=2)
return self.mul(other, fill_value=1.)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3d0259deef6f2..8c836ae564e28 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11787,61 +11787,65 @@ def test_update_from_non_df(self):
assert_frame_equal(df, expected)
def test_combineAdd(self):
- # trivial
- comb = self.frame.combineAdd(self.frame)
- assert_frame_equal(comb, self.frame * 2)
- # more rigorous
- a = DataFrame([[1., nan, nan, 2., nan]],
- columns=np.arange(5))
- b = DataFrame([[2., 3., nan, 2., 6., nan]],
- columns=np.arange(6))
- expected = DataFrame([[3., 3., nan, 4., 6., nan]],
- columns=np.arange(6))
-
- result = a.combineAdd(b)
- assert_frame_equal(result, expected)
- result2 = a.T.combineAdd(b.T)
- assert_frame_equal(result2, expected.T)
-
- expected2 = a.combine(b, operator.add, fill_value=0.)
- assert_frame_equal(expected, expected2)
-
- # corner cases
- comb = self.frame.combineAdd(self.empty)
- assert_frame_equal(comb, self.frame)
-
- comb = self.empty.combineAdd(self.frame)
- assert_frame_equal(comb, self.frame)
-
- # integer corner case
- df1 = DataFrame({'x': [5]})
- df2 = DataFrame({'x': [1]})
- df3 = DataFrame({'x': [6]})
- comb = df1.combineAdd(df2)
- assert_frame_equal(comb, df3)
-
- # mixed type GH2191
- df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
- df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
- rs = df1.combineAdd(df2)
- xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
- assert_frame_equal(xp, rs)
+ with tm.assert_produces_warning(FutureWarning):
+ # trivial
+ comb = self.frame.combineAdd(self.frame)
+ assert_frame_equal(comb, self.frame * 2)
+
+ # more rigorous
+ a = DataFrame([[1., nan, nan, 2., nan]],
+ columns=np.arange(5))
+ b = DataFrame([[2., 3., nan, 2., 6., nan]],
+ columns=np.arange(6))
+ expected = DataFrame([[3., 3., nan, 4., 6., nan]],
+ columns=np.arange(6))
+
+ result = a.combineAdd(b)
+ assert_frame_equal(result, expected)
+ result2 = a.T.combineAdd(b.T)
+ assert_frame_equal(result2, expected.T)
+
+ expected2 = a.combine(b, operator.add, fill_value=0.)
+ assert_frame_equal(expected, expected2)
+
+ # corner cases
+ comb = self.frame.combineAdd(self.empty)
+ assert_frame_equal(comb, self.frame)
+
+ comb = self.empty.combineAdd(self.frame)
+ assert_frame_equal(comb, self.frame)
+
+ # integer corner case
+ df1 = DataFrame({'x': [5]})
+ df2 = DataFrame({'x': [1]})
+ df3 = DataFrame({'x': [6]})
+ comb = df1.combineAdd(df2)
+ assert_frame_equal(comb, df3)
+
+ # mixed type GH2191
+ df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
+ df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
+ rs = df1.combineAdd(df2)
+ xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
+ assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
- # trivial
- comb = self.frame.combineMult(self.frame)
- assert_frame_equal(comb, self.frame ** 2)
+ with tm.assert_produces_warning(FutureWarning):
+ # trivial
+ comb = self.frame.combineMult(self.frame)
- # corner cases
- comb = self.frame.combineMult(self.empty)
- assert_frame_equal(comb, self.frame)
+ assert_frame_equal(comb, self.frame ** 2)
- comb = self.empty.combineMult(self.frame)
- assert_frame_equal(comb, self.frame)
+ # corner cases
+ comb = self.frame.combineMult(self.empty)
+ assert_frame_equal(comb, self.frame)
+
+ comb = self.empty.combineMult(self.frame)
+ assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
| Closes #10735
| https://api.github.com/repos/pandas-dev/pandas/pulls/10812 | 2015-08-13T08:39:32Z | 2015-08-16T08:28:53Z | 2015-08-16T08:28:53Z | 2015-08-16T08:28:53Z |
DOC: Updated drop_duplicates doc | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 251d94cbdd911..38629ee7baaea 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1180,28 +1180,43 @@ takes as an argument the columns to use to identify duplicated rows.
By default, the first observed row of a duplicate set is considered unique, but
each method has a ``keep`` parameter to specify targets to be kept.
+- ``keep='first'`` (default): mark / drop duplicates except for the first occurrence.
+- ``keep='last'``: mark / drop duplicates except for the last occurrence.
+- ``keep=False``: mark / drop all duplicates.
+
.. ipython:: python
- df2 = pd.DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'],
- 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'],
- 'c' : np.random.randn(7)})
- df2.duplicated(['a','b'])
- df2.duplicated(['a','b'], keep='last')
- df2.duplicated(['a','b'], keep=False)
- df2.drop_duplicates(['a','b'])
- df2.drop_duplicates(['a','b'], keep='last')
- df2.drop_duplicates(['a','b'], keep=False)
+ df2 = pd.DataFrame({'a': ['one', 'one', 'two', 'two', 'two', 'three', 'four'],
+ 'b': ['x', 'y', 'x', 'y', 'x', 'x', 'x'],
+ 'c': np.random.randn(7)})
+ df2
+ df2.duplicated('a')
+ df2.duplicated('a', keep='last')
+ df2.duplicated('a', keep=False)
+ df2.drop_duplicates('a')
+ df2.drop_duplicates('a', keep='last')
+ df2.drop_duplicates('a', keep=False)
-An alternative way to drop duplicates on the index is ``.groupby(level=0)`` combined with ``first()`` or ``last()``.
+Also, you can pass a list of columns to identify duplications.
.. ipython:: python
- df3 = df2.set_index('b')
- df3
- df3.groupby(level=0).first()
+ df2.duplicated(['a', 'b'])
+ df2.drop_duplicates(['a', 'b'])
+
+To drop duplicates by index value, use ``Index.duplicated`` then perform slicing.
+Same options are available in ``keep`` parameter.
- # a bit more verbose
- df3.reset_index().drop_duplicates(subset='b', keep='first').set_index('b')
+.. ipython:: python
+
+ df3 = pd.DataFrame({'a': np.arange(6),
+ 'b': np.random.randn(6)},
+ index=['a', 'a', 'b', 'c', 'b', 'a'])
+ df3
+ df3.index.duplicated()
+ df3[~df3.index.duplicated()]
+ df3[~df3.index.duplicated(keep='last')]
+ df3[~df3.index.duplicated(keep=False)]
.. _indexing.dictionarylike:
| Closes #2825.
CC @patricktokeeffe @bilderbuchi
| https://api.github.com/repos/pandas-dev/pandas/pulls/10810 | 2015-08-12T21:36:16Z | 2015-08-18T10:48:47Z | 2015-08-18T10:48:47Z | 2015-08-18T11:23:04Z |
BUG: fix bounds for negative ints when using iloc (GH 10779) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2a00263f973e9..8320f3cbc8e76 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -658,3 +658,5 @@ Bug Fixes
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
+- Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`)
+- Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 7fbc6736db4bd..8a8ee00f234fa 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1388,7 +1388,8 @@ def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
- if key > len(ax):
+ l = len(ax)
+ if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
@@ -1400,7 +1401,7 @@ def _is_valid_list_like(self, key, axis):
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
- if len(arr) and (arr.max() >= l or arr.min() <= -l):
+ if len(arr) and (arr.max() >= l or arr.min() < -l):
raise IndexError("positional indexers are out-of-bounds")
return True
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index d0ccbee378df8..2c0bfcd9b905d 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -411,6 +411,12 @@ def test_iloc_exceeds_bounds(self):
df.iloc[30]
self.assertRaises(IndexError, lambda : df.iloc[-30])
+ # GH10779
+ # single positive/negative indexer exceeding Series bounds should raise an IndexError
+ with tm.assertRaisesRegexp(IndexError, 'single positional indexer is out-of-bounds'):
+ s.iloc[30]
+ self.assertRaises(IndexError, lambda : s.iloc[-30])
+
# slices are ok
result = df.iloc[:,4:10] # 0 < start < len < stop
expected = df.iloc[:,4:]
@@ -471,7 +477,6 @@ def check(result,expected):
self.assertRaises(IndexError, lambda : dfl.iloc[[4,5,6]])
self.assertRaises(IndexError, lambda : dfl.iloc[:,4])
-
def test_iloc_getitem_int(self):
# integer
@@ -497,6 +502,33 @@ def test_iloc_getitem_list_int(self):
self.check_result('array int', 'iloc', np.array([2]), 'ix', { 0 : [4], 1 : [6], 2: [8] }, typs = ['ints'])
self.check_result('array int', 'iloc', np.array([0,1,2]), 'indexer', [0,1,2], typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
+ def test_iloc_getitem_neg_int_can_reach_first_index(self):
+ # GH10547 and GH10779
+ # negative integers should be able to reach index 0
+ df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
+ s = df['A']
+
+ expected = df.iloc[0]
+ result = df.iloc[-3]
+ assert_series_equal(result, expected)
+
+ expected = df.iloc[[0]]
+ result = df.iloc[[-3]]
+ assert_frame_equal(result, expected)
+
+ expected = s.iloc[0]
+ result = s.iloc[-3]
+ self.assertEqual(result, expected)
+
+ expected = s.iloc[[0]]
+ result = s.iloc[[-3]]
+ assert_series_equal(result, expected)
+
+ # check the length 1 Series case highlighted in GH10547
+ expected = pd.Series(['a'], index=['A'])
+ result = expected.iloc[[-1]]
+ assert_series_equal(result, expected)
+
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
| This fixes the issues raised in [GH 10779](https://github.com/pydata/pandas/issues/10779).
closes #10547
Previously, negative integers used in `iloc` were not stopped from going beyond the bounds of the Series. For instance, if `s = pd.Series([1,2,3])` then `s.iloc[-4]` returned bytes from memory outside `s`. This will now raise an IndexError instead.
Additionally, it was not possible to access the first row (index 0) of a Series or DataFrame with a negative integer in a list. For example, `s.iloc[[-3]]` raised an IndexError instead of returning a Series containing the first row of `s`. The behaviour of `iloc` has been changed to allow this.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10808 | 2015-08-12T20:47:31Z | 2015-08-14T18:40:52Z | 2015-08-14T18:40:52Z | 2015-08-15T12:46:01Z |
BUG: Allow read_sql_table to read from views | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 70d616ca72c1b..dd8e1f89d7b38 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -142,6 +142,9 @@ Other enhancements
- ``pd.merge`` will now allow duplicate column names if they are not merged upon (:issue:`10639`).
- ``pd.pivot`` will now allow passing index as ``None`` (:issue:`3962`).
+
+- ``read_sql_table`` will now allow reading from views (:issue:`10750`).
+
- ``drop_duplicates`` and ``duplicated`` now accept ``keep`` keyword to target first, last, and all duplicates. ``take_last`` keyword is deprecated, see :ref:`deprecations <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`)
.. ipython :: python
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 8eefe4ba98876..b587ec128c016 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -337,7 +337,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
- meta.reflect(only=[table_name])
+ meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 859c6d3250121..619de8d6bad3b 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -161,6 +161,12 @@
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
+ },
+ 'create_view': {
+ 'sqlite': """
+ CREATE VIEW iris_view AS
+ SELECT * FROM iris
+ """
}
}
@@ -244,6 +250,10 @@ def _load_iris_data(self):
for row in r:
self._get_exec().execute(ins, row)
+ def _load_iris_view(self):
+ self.drop_table('iris_view')
+ self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
+
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
@@ -482,6 +492,7 @@ class _TestSQLApi(PandasSQLTest):
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
+ self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
@@ -492,6 +503,11 @@ def test_read_sql_iris(self):
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
+ def test_read_sql_view(self):
+ iris_frame = sql.read_sql_query(
+ "SELECT * FROM iris_view", self.conn)
+ self._check_iris_loaded_frame(iris_frame)
+
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
| Solves #10750
Follow-up from #10771
'read_sql_table' will now allow reading from views.
Added also a note in whatsnew and a unit test to check the behaviour.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10803 | 2015-08-12T07:04:36Z | 2015-08-12T09:31:13Z | 2015-08-12T09:31:13Z | 2015-08-12T09:31:13Z |
PeriodIndex test keys that aren't strings | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 18c39ccf820eb..3af9c887bb82f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -629,6 +629,7 @@ Bug Fixes
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in ``pd.eval`` using ``numexpr`` engine coerces 1 element numpy array to scalar (:issue:`10546`)
+- Bug in ``PeriodIndex.__contains__`` & ``DatetimeIndex.__contains__`` that always returned False for each other's objects (:issue:`10798`)
- Bug in ``pd.concat`` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`, :issue:`10630`)
- Bug in ``pd.read_csv`` with kwargs ``index_col=False``, ``index_col=['a', 'b']`` or ``dtype``
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 9a3576a8fd846..52d2d85bda739 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -2817,7 +2817,31 @@ def test_view(self):
result = self._holder(i)
tm.assert_index_equal(result, i)
-class TestDatetimeIndex(DatetimeLike, tm.TestCase):
+class DatetimeAbsoluteLike(DatetimeLike):
+
+ # GH10801
+ def test_datetimeabsolute_contains(self):
+
+ i = self.create_index()
+
+ self.assertTrue(i[2] in i)
+ self.assertFalse('2012' in i)
+
+ # python datetime objects
+ self.assertTrue(datetime(2013,1,1) in i)
+
+ # strings
+ self.assertTrue('2013-1-1' in i)
+
+ # Timestamp # GH10801
+ self.assertTrue(pd.Timestamp('2013-1-1') in i)
+
+ # pandas Period
+ self.assertTrue(pd.Period('2013-1-1', 'D') in i)
+ self.assertFalse(pd.Period('2013-1-1', 'M') in i)
+
+
+class TestDatetimeIndex(DatetimeAbsoluteLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
@@ -2964,7 +2988,7 @@ def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
-class TestPeriodIndex(DatetimeLike, tm.TestCase):
+class TestPeriodIndex(DatetimeAbsoluteLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 19ff9a4b19a3e..8605058e3ec32 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1289,6 +1289,12 @@ def get_loc(self, key, method=None):
'when key is a time object')
return self.indexer_at_time(key)
+ # check if it's a Period and the frequencies are the same - otherwise a monthly period would match for
+ # a daily timestamp at the beginning of the month. NB: 'B' and 'D' therefore won't match
+ if isinstance(key, com.ABCPeriod) and key.freq == self.freq:
+ key = key.to_timestamp()
+ return Index.get_loc(self, key, method=method)
+
try:
return Index.get_loc(self, key, method=method)
except (KeyError, ValueError, TypeError):
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index e7b229e91cbc8..62289219ad72e 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -296,14 +296,14 @@ def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
+ # if key isn't a Period of the same freq, rely on `get_loc` for the coercion.
if not isinstance(key, Period) or key.freq != self.freq:
- if isinstance(key, compat.string_types):
- try:
- self.get_loc(key)
- return True
- except Exception:
- return False
- return False
+ try:
+ self.get_loc(key)
+ return True
+ except Exception:
+ return False
+ # If it is a Period of the same freq, go straight to the _engine
return key.ordinal in self._engine
@property
| Closes https://github.com/pydata/pandas/issues/10798
Not sure if the testing is done in a good way - I just copy & pasted code to test `PeriodIndex` & `DatetimeIndex` (it wouldn't work for `LikeDatetimeIndex` because of `TimeDeltaIndex`) - very open to feedback on better ways to do this.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10801 | 2015-08-11T22:50:49Z | 2015-08-26T01:23:14Z | null | 2015-11-22T09:02:08Z |
BUG: pd.Series.interpolate(method='spline') Errort Msg, #10633 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 13764543ec665..ad104303b9dbd 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -613,10 +613,15 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
+=======
+
+
+- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
+- Bug in ``pd.Series.interpolate`` when setting no order value on ``Series.interpolate`` this needs to be at least 1. (:issue:`10633`) and (:issue:`10800`)
- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
- Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index aaa341240f538..48ad299cfd658 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1718,6 +1718,9 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
+ # GH #10633
+ if not order:
+ raise ValueError("order needs to be specified and greater than 0")
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py
index 86c5a9e0d7f19..0ec3575be04f6 100644
--- a/pandas/io/tests/generate_legacy_storage_files.py
+++ b/pandas/io/tests/generate_legacy_storage_files.py
@@ -83,9 +83,20 @@ def create_data():
index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
names=['one', 'two'])),
dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
+<<<<<<< HEAD
+<<<<<<< HEAD
cat=Series(Categorical(['foo', 'bar', 'baz'])))
if LooseVersion(pandas.__version__) >= '0.17.0':
series['period'] = Series([Period('2000Q1')] * 5)
+=======
+ cat=Series(Categorical(['foo', 'bar', 'baz'])),
+ per=Series([Period('2000Q1')] * 5))
+>>>>>>> 0525684... ENH: pickle support for Period #10439
+=======
+ cat=Series(Categorical(['foo', 'bar', 'baz'])))
+ if LooseVersion(pandas.__version__) >= '0.17.0':
+ series['period'] = Series([Period('2000Q1')] * 5)
+>>>>>>> aa04812... update legacy_storage for pickles
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index c1f6045c61d54..d177d9359dfc8 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -799,7 +799,15 @@ def test_nan_interpolate(self):
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
-
+
+ # GH #10633
+ def test_interpolate_spline(self):
+ np.random.seed(1)
+ t = pd.Series(np.arange(10)**2)
+ t[np.random.randint(0,9,3)] = np.nan
+ with tm.assertRaises(ValueError):
+ t.interpolate(method='spline', order=0)
+
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index eb5c6759bfa45..3a69670f43fb7 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -2537,7 +2537,17 @@ def test_searchsorted(self):
def test_round_trip(self):
+<<<<<<< HEAD
+<<<<<<< HEAD
p = Period('2000Q1')
+=======
+ import pickle
+ p = Period('2000Q1')
+
+>>>>>>> 0525684... ENH: pickle support for Period #10439
+=======
+ p = Period('2000Q1')
+>>>>>>> aa04812... update legacy_storage for pickles
new_p = self.round_trip_pickle(p)
self.assertEqual(new_p, p)
| Closes #10633
This a first attempt at the error messages mentioned. Not ready to merge yet but I wanted to start the conversation...
I think I might have the error message wrong - anyone got any ideas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10800 | 2015-08-11T20:14:58Z | 2015-08-21T19:56:29Z | null | 2015-08-21T20:20:10Z |
Allows set_index() to take an Index as an argument (#10797) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fe9c9bece1f79..fd013eca10f82 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2628,7 +2628,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
-------
dataframe : DataFrame
"""
- if not isinstance(keys, list):
+ if not com.is_list_like(keys):
keys = [keys]
if inplace:
| Simple change, closes #10797
| https://api.github.com/repos/pandas-dev/pandas/pulls/10799 | 2015-08-11T20:12:01Z | 2015-11-10T01:27:28Z | null | 2015-11-10T01:27:28Z |
TST: Suppress warnings of drop_duplicates tests | diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 066b359d72b5c..c9e4285d8b684 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -683,10 +683,6 @@ def test_factorize(self):
def test_duplicated_drop_duplicates(self):
# GH 4060
-
- import warnings
- warnings.simplefilter('always')
-
for original in self.objs:
if isinstance(original, Index):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 72eea5162caa5..3d0259deef6f2 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7848,7 +7848,7 @@ def test_dropna_multiple_axes(self):
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
- def test_aaa_drop_duplicates(self):
+ def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
@@ -7892,7 +7892,8 @@ def test_aaa_drop_duplicates(self):
assert_frame_equal(result, expected)
# deprecate take_last
- result = df.drop_duplicates(('AAA', 'B'), take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
@@ -7913,8 +7914,10 @@ def test_aaa_drop_duplicates(self):
assert_frame_equal(result, expected)
# deprecate take_last
- result = df2.drop_duplicates(take_last=True)
- expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df2.drop_duplicates(take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
def test_drop_duplicates_for_take_all(self):
@@ -8008,7 +8011,8 @@ def test_drop_duplicates_tuple(self):
assert_frame_equal(result, expected)
# deprecate take_last
- result = df.drop_duplicates(('AA', 'AB'), take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
@@ -8041,7 +8045,8 @@ def test_drop_duplicates_NA(self):
self.assertEqual(len(result), 0)
# deprecate take_last
- result = df.drop_duplicates('A', take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
@@ -8059,7 +8064,8 @@ def test_drop_duplicates_NA(self):
assert_frame_equal(result, expected)
# deprecate take_last
- result = df.drop_duplicates(['A', 'B'], take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
@@ -8086,7 +8092,8 @@ def test_drop_duplicates_NA(self):
self.assertEqual(len(result), 0)
# deprecate take_last
- result = df.drop_duplicates('C', take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
@@ -8104,7 +8111,8 @@ def test_drop_duplicates_NA(self):
assert_frame_equal(result, expected)
# deprecate take_last
- result = df.drop_duplicates(['C', 'B'], take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
@@ -8172,7 +8180,8 @@ def test_drop_duplicates_inplace(self):
# deprecate take_last
df = orig.copy()
- df.drop_duplicates('A', take_last=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
@@ -8198,7 +8207,8 @@ def test_drop_duplicates_inplace(self):
# deprecate take_last
df = orig.copy()
- df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
@@ -8227,8 +8237,10 @@ def test_drop_duplicates_inplace(self):
# deprecate take_last
df2 = orig2.copy()
- df2.drop_duplicates(take_last=True, inplace=True)
- expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ df2.drop_duplicates(take_last=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index fbe4eefabe02d..be7ed6c1b268f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2151,11 +2151,13 @@ def test_duplicated_drop_duplicates(self):
# deprecate take_last
expected = np.array([True, False, False, False, False, False])
- duplicated = idx.duplicated(take_last=True)
+ with tm.assert_produces_warning(FutureWarning):
+ duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
- tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
+ with tm.assert_produces_warning(FutureWarning):
+ tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 31843616956f6..4d92deece1be3 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4798,10 +4798,13 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
- assert_series_equal(s.duplicated(take_last=True), expected)
- assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.duplicated(take_last=True), expected)
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
- sc.drop_duplicates(take_last=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, True])
@@ -4827,10 +4830,13 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
- assert_series_equal(s.duplicated(take_last=True), expected)
- assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.duplicated(take_last=True), expected)
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
- sc.drop_duplicates(take_last=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, True, True, False])
| Follow-up of #10236. Suppress unnecessary warnings during the test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10795 | 2015-08-11T09:21:38Z | 2015-08-12T09:55:47Z | 2015-08-12T09:55:47Z | 2015-08-12T13:51:24Z |
DOC: add guideline to use versionadded directive to contributing docs (GH10215) | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7df5a18959ba7..4ec2258df56f2 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -247,6 +247,8 @@ just checked out. There are two primary methods of doing this.
from your development directory. Thus, you can always be using the development
version on your system without being inside the clone directory.
+.. _contributing.documentation:
+
Contributing to the documentation
=================================
@@ -543,10 +545,23 @@ Documenting your code
Changes should be reflected in the release notes located in `doc/source/whatsnew/vx.y.z.txt`.
This file contains an ongoing change log for each release. Add an entry to this file to
document your fix, enhancement or (unavoidable) breaking change. Make sure to include the
-GitHub issue number when adding your entry.
+GitHub issue number when adding your entry (using `` :issue:`1234` `` where `1234` is the
+issue/pull request number).
+
+If your code is an enhancement, it is most likely necessary to add usage
+examples to the existing documentation. This can be done following the section
+regarding documentation :ref:`above <contributing.documentation>`.
+Further, to let users know when this feature was added, the ``versionadded``
+directive is used. The sphinx syntax for that is:
+
+.. code-block:: rst
+
+ .. versionadded:: 0.17.0
-If your code is an enhancement, it is most likely necessary to add usage examples to the
-existing documentation. This can be done following the section regarding documentation.
+This will put the text *New in version 0.17.0* wherever you put the sphinx
+directive. This should also be put in the docstring when adding a new function
+or method (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
+or a new keyword argument (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
Contributing your changes to *pandas*
=====================================
| Closes #10215
| https://api.github.com/repos/pandas-dev/pandas/pulls/10794 | 2015-08-11T09:13:41Z | 2015-08-12T11:37:32Z | 2015-08-12T11:37:32Z | 2015-08-12T11:37:32Z |
BUG: Index.take may add unnecessary freq attribute | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 70d616ca72c1b..68a7c105be138 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -548,6 +548,7 @@ Performance Improvements
- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`)
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`)
+- 8x improvement in ``iloc`` using list-like input (:issue:`10791`)
- Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`)
- 20x improvement in ``concat`` of Categoricals when categories are identical (:issue:`10587`)
- Improved performance of ``to_datetime`` when specified format string is ISO8601 (:issue:`10178`)
@@ -624,7 +625,7 @@ Bug Fixes
- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`)
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
-
+- Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index febcfa37994a3..12ad8a590c304 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1194,7 +1194,7 @@ def _ensure_compat_concat(indexes):
return indexes
- def take(self, indexer, axis=0):
+ def take(self, indices, axis=0):
"""
return a new Index of the values selected by the indexer
@@ -1203,11 +1203,9 @@ def take(self, indexer, axis=0):
numpy.ndarray.take
"""
- indexer = com._ensure_platform_int(indexer)
- taken = np.array(self).take(indexer)
-
- # by definition cannot propogate freq
- return self._shallow_copy(taken, freq=None)
+ indices = com._ensure_platform_int(indices)
+ taken = self.values.take(indices)
+ return self._shallow_copy(taken)
def putmask(self, mask, value):
"""
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 3c988943301c0..9a3576a8fd846 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -276,6 +276,11 @@ def test_take(self):
expected = ind[indexer]
self.assertTrue(result.equals(expected))
+ if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # GH 10791
+ with tm.assertRaises(AttributeError):
+ ind.freq
+
def test_setops_errorcases(self):
for name, idx in compat.iteritems(self.indices):
# # non-iterable input
@@ -4775,7 +4780,7 @@ def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'),range(3)],names=['first','second'])
str(mi)
-
+
if compat.PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
@@ -4784,11 +4789,11 @@ def test_repr_roundtrip(self):
tm.assert_index_equal(result, mi, exact=False)
self.assertEqual(mi.get_level_values('first').inferred_type, 'string')
self.assertEqual(result.get_level_values('first').inferred_type, 'unicode')
-
+
mi_u = MultiIndex.from_product([list(u'ab'),range(3)],names=['first','second'])
result = eval(repr(mi_u))
- tm.assert_index_equal(result, mi_u, exact=True)
-
+ tm.assert_index_equal(result, mi_u, exact=True)
+
# formatting
if compat.PY3:
str(mi)
@@ -4810,7 +4815,7 @@ def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list(u'abcdefg'),range(10)],names=['first','second'])
result = eval(repr(mi_u))
- tm.assert_index_equal(result, mi_u, exact=True)
+ tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 6d20b0128f164..addd070c23eeb 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -182,10 +182,12 @@ def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
- maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices), len(self))
+ indices = com._ensure_int64(indices)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
- return super(DatetimeIndexOpsMixin, self).take(indices, axis)
+ taken = self.asi8.take(indices)
+ return self._shallow_copy(taken, freq=None)
def get_duplicates(self):
values = Index.get_duplicates(self)
diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py
index 9fbc070ac3b9d..f2236c48fb002 100644
--- a/vb_suite/indexing.py
+++ b/vb_suite/indexing.py
@@ -265,3 +265,16 @@
multiindex_slicers = Benchmark('mdt2.loc[idx[test_A-eps_A:test_A+eps_A,test_B-eps_B:test_B+eps_B,test_C-eps_C:test_C+eps_C,test_D-eps_D:test_D+eps_D],:]', setup,
start_date=datetime(2015, 1, 1))
+
+#----------------------------------------------------------------------
+# take
+
+setup = common_setup + """
+s = Series(np.random.rand(100000))
+ts = Series(np.random.rand(100000),
+ index=date_range('2011-01-01', freq='S', periods=100000))
+indexer = [True, False, True, True, False] * 20000
+"""
+
+series_take_intindex = Benchmark("s.take(indexer)", setup)
+series_take_dtindex = Benchmark("ts.take(indexer)", setup)
| `Index.freq` should raise `AttributeError` in non-datetime-like index. But `Index.take` adds `freq=None` and broke the behavior.
```
import pandas as pd
idx = pd.Index([1, 2, 3])
idx.freq
# AttributeError: 'Int64Index' object has no attribute 'freq'
idx.take([True, False, True]).freq
# None
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10791 | 2015-08-10T23:30:11Z | 2015-08-12T13:51:50Z | 2015-08-12T13:51:50Z | 2015-08-12T13:51:53Z |
Update install.rst | diff --git a/doc/source/install.rst b/doc/source/install.rst
index aaa39dd383e2e..1e2f76042623a 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -153,6 +153,8 @@ and can take a few minutes to complete.
Installing using your Linux distribution's package manager.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The commands in this table will install pandas for Python 2 from your distribution.
+To install pandas for Python 3 you need to use the package ``python3-pandas``.
.. csv-table::
| closes #10782
- Added hint regarding pip install on low memory machines.
- Added hint to python 3 version of pandas from distributon repos.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10784 | 2015-08-10T12:51:02Z | 2015-08-11T10:35:34Z | null | 2015-08-11T10:35:34Z |
BUG: GH10747 where cast_to_nanoseconds from NaT fails | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 70d616ca72c1b..f32028fe7b2f5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -629,4 +629,5 @@ Bug Fixes
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
+- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f14358452ec13..12a43986d32bd 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -3291,6 +3291,12 @@ def test_NaT_scalar(self):
series[2] = val
self.assertTrue(com.isnull(series[2]))
+ def test_NaT_cast(self):
+ # GH10747
+ result = Series([np.nan]).astype('M8[ns]')
+ expected = Series([NaT])
+ assert_series_equal(result, expected)
+
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index bf134a0a6d996..369993b4c54d1 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -3253,9 +3253,12 @@ def cast_to_nanoseconds(ndarray arr):
unit = get_datetime64_unit(arr.flat[0])
for i in range(n):
- pandas_datetime_to_datetimestruct(ivalues[i], unit, &dts)
- iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
- _check_dts_bounds(&dts)
+ if ivalues[i] != NPY_NAT:
+ pandas_datetime_to_datetimestruct(ivalues[i], unit, &dts)
+ iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ _check_dts_bounds(&dts)
+ else:
+ iresult[i] = NPY_NAT
return result
| related to first of #10747
| https://api.github.com/repos/pandas-dev/pandas/pulls/10776 | 2015-08-09T06:52:20Z | 2015-08-12T13:44:32Z | 2015-08-12T13:44:32Z | 2015-08-12T13:44:37Z |
BUG: `read_sql_table` can't find views | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 70d616ca72c1b..c679f53270ed1 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -137,11 +137,15 @@ Other enhancements
- ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`)
- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`).
+
- ``pd.read_stata`` will now read Stata 118 type files. (:issue:`9882`)
- ``pd.merge`` will now allow duplicate column names if they are not merged upon (:issue:`10639`).
- ``pd.pivot`` will now allow passing index as ``None`` (:issue:`3962`).
+
+- ``read_sql_table`` will now allow reading from views (:issue:`10750`).
+
- ``drop_duplicates`` and ``duplicated`` now accept ``keep`` keyword to target first, last, and all duplicates. ``take_last`` keyword is deprecated, see :ref:`deprecations <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`)
.. ipython :: python
@@ -151,6 +155,35 @@ Other enhancements
s.drop_duplicates(keep='last')
s.drop_duplicates(keep=False)
+- ``read_sql_table`` will now allow reading from views (:issue:`10750`).
+
+- ``concat`` will now inherit the existing series names (even when some are missing), if new ones are not provided through the ``keys`` argument (:issue:`10698`).
+
+ Previous Behavior:
+
+ .. code-block:: python
+
+ In [1]: foo = pd.Series([1,2], name='foo')
+ In [2]: bar = pd.Series([1,2])
+ In [3]: baz = pd.Series([4,5])
+ In [4] pd.concat([foo, bar, baz], 1)
+ Out[4]:
+ 0 1 2
+ 0 1 1 4
+ 1 2 2 5
+
+ New Behavior:
+
+ .. ipython:: python
+
+ foo = pd.Series([1,2], name='foo')
+ bar = pd.Series([1,2])
+ baz = pd.Series([4,5])
+ pd.concat([foo, bar, baz], 1)
+
+- ``read_sql_table`` will now allow reading from views (:issue:`10750`).
+
+- ``read_sql_table`` will now allow reading from views (:issue:`10750`).
.. _whatsnew_0170.api:
@@ -529,7 +562,6 @@ Deprecations
===================== =================================
- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
-- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was removed in favor of ``keep``. (:issue:`6511`, :issue:`8505`)
.. _whatsnew_0170.prior_deprecations:
@@ -616,9 +648,6 @@ Bug Fixes
- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10757`)
-- Bug in ``Categorical`` may not representing properly when category contains ``tz`` or ``Period`` (:issue:`10713`)
-- Bug in ``Categorical.__iter__`` may not returning correct ``datetime`` and ``Period`` (:issue:`10713`)
-
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 8eefe4ba98876..b587ec128c016 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -337,7 +337,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
- meta.reflect(only=[table_name])
+ meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 859c6d3250121..c78d193124b76 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -161,6 +161,12 @@
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
+ },
+ 'create_view': {
+ 'sqlite': """
+ CREATE VIEW iris_view AS
+ SELECT * FROM iris;
+ """
}
}
@@ -244,6 +250,10 @@ def _load_iris_data(self):
for row in r:
self._get_exec().execute(ins, row)
+ def _load_iris_view(self):
+ self.drop_table('iris_view')
+ self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
+
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
@@ -482,6 +492,7 @@ class _TestSQLApi(PandasSQLTest):
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
+ self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
@@ -492,6 +503,11 @@ def test_read_sql_iris(self):
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
+ def test_read_sql_view(self):
+ iris_frame = sql.read_sql_query(
+ "SELECT * FROM iris_view", self.conn)
+ self._check_iris_loaded_frame(iris_frame)
+
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 430828a3db31b..d04cc8c4a7754 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -16,7 +16,7 @@
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
-from pandas.core.common import ABCSeries
+from pandas.core.common import ABCSeries, isnull
from pandas.io.parsers import TextFileReader
import pandas.core.common as com
@@ -896,8 +896,14 @@ def get_result(self):
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
tmpdf = DataFrame(data, index=index)
- if columns is not None:
- tmpdf.columns = columns
+ # checks if the column variable already stores valid column names (because set via the 'key' argument
+ # in the 'concat' function call. If that's not the case, use the series names as column names
+ if columns.equals(Index(np.arange(len(self.objs)))):
+ columns = np.array([ data[i].name for i in range(len(data)) ], dtype='object')
+ indexer = isnull(columns)
+ if indexer.any():
+ columns[indexer] = np.arange(len(indexer[indexer]))
+ tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
# combine block managers
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 8b1457e7fd490..3be283eff1bb4 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1797,6 +1797,15 @@ def test_concat_dataframe_keys_bug(self):
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
+ def test_concat_series_partial_columns_names(self):
+ foo = pd.Series([1,2], name='foo')
+ bar = pd.Series([1,2])
+ baz = pd.Series([4,5])
+
+ result = pd.concat([foo, bar, baz], 1)
+ expected = DataFrame({'foo' : [1,2], 0 : [1,2], 1 : [4,5]}, columns=['foo',0,1])
+ tm.assert_frame_equal(result, expected)
+
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
@@ -2330,7 +2339,7 @@ def test_concat_series_axis1(self):
s2.name = None
result = concat([s, s2], axis=1)
- self.assertTrue(np.array_equal(result.columns, lrange(2)))
+ self.assertTrue(np.array_equal(result.columns, Index(['A', 0], dtype='object')))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
@@ -2431,7 +2440,7 @@ def test_concat_series_axis1_same_names_ignore_index(self):
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
- self.assertTrue(np.array_equal(result.columns, [0, 1]))
+ self.assertTrue(np.array_equal(result.columns, ['value', 'value']))
def test_concat_iterables(self):
from collections import deque, Iterable
| closes #10750
Add ability to 'read_sql_table' to read views and implement an unit test to check its behaviour
| https://api.github.com/repos/pandas-dev/pandas/pulls/10771 | 2015-08-08T21:31:09Z | 2015-08-12T06:46:14Z | null | 2015-08-12T09:30:03Z |
BUG: Fix dtypes order when ordering is different from original file in pandas.io.stata.read_stata | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9a9054fcf0489..aa96aacd6f0dd 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -598,7 +598,7 @@ Bug Fixes
- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
-
+- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10757`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 0266e2beeca40..5afbc2671e3a7 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1614,14 +1614,12 @@ def _do_select_columns(self, data, columns):
typlist = []
fmtlist = []
lbllist = []
- matched = set()
- for i, col in enumerate(data.columns):
- if col in column_set:
- matched.update([col])
- dtyplist.append(self.dtyplist[i])
- typlist.append(self.typlist[i])
- fmtlist.append(self.fmtlist[i])
- lbllist.append(self.lbllist[i])
+ for col in columns:
+ i = data.columns.get_loc(col)
+ dtyplist.append(self.dtyplist[i])
+ typlist.append(self.typlist[i])
+ fmtlist.append(self.fmtlist[i])
+ lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index aa9f27d1515d3..5b934bad38bd3 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -782,6 +782,14 @@ def test_drop_column(self):
columns=columns)
tm.assert_frame_equal(expected, dropped)
+
+ # See PR 10757
+ columns = ['int_', 'long_', 'byte_']
+ expected = expected[columns]
+ reordered = read_stata(self.dta15_117, convert_dates=True,
+ columns=columns)
+ tm.assert_frame_equal(expected, reordered)
+
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
| This fixes http://stackoverflow.com/questions/31783392/pandas-adding-columns-to-filter-will-mess-with-data-structure
The dtypes of the DataFrame returned by read_stata have the same order as the original stata file, even if columns are specified with a different order.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10757 | 2015-08-06T12:27:07Z | 2015-08-06T15:16:59Z | 2015-08-06T15:16:59Z | 2015-08-06T15:17:07Z |
Add kwargs for kde in scatter_matrix | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index c16e2686c5a3a..7330089d14bbc 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -236,7 +236,7 @@ def use(self, key, value):
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
- hist_kwds=None, range_padding=0.05, **kwds):
+ hist_kwds=None, range_padding=0.05, kde_kwds=None, **kwds):
"""
Draw a matrix of scatter plots.
@@ -290,6 +290,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
+ kde_kwds = kde_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
@@ -315,7 +316,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
- gkde = gaussian_kde(y)
+ gkde = gaussian_kde(y, **kde_kwds)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
| Minor change to allow customizing the kde through the pandas api.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10752 | 2015-08-04T22:04:23Z | 2015-10-13T21:03:11Z | null | 2015-10-13T21:03:11Z |
Fix docstring spelling | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index e73fa207152c1..7837fb60da9d6 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -193,7 +193,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
See Also
--------
- match : analagous, but stricter, relying on re.match instead of re.search
+ match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
| "analagous" -> "analogous"
| https://api.github.com/repos/pandas-dev/pandas/pulls/10751 | 2015-08-04T21:32:38Z | 2015-08-05T06:44:58Z | 2015-08-05T06:44:58Z | 2015-08-05T06:45:09Z |
DOC: fix some doc build errors/warnings | diff --git a/doc/source/install.rst b/doc/source/install.rst
index 31fee45eb0266..aaa39dd383e2e 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -267,11 +267,11 @@ Optional Dependencies
installation.
* Google's `python-gflags <http://code.google.com/p/python-gflags/>`__
and `google-api-python-client <http://github.com/google/google-api-python-client>`__
- * Needed for :mod:`~pandas.io.gbq`
+ * Needed for :mod:`~pandas.io.gbq`
* `setuptools <https://pypi.python.org/pypi/setuptools/>`__
- * Needed for :mod:`~pandas.io.gbq` (specifically, it utilizes `pkg_resources`)
+ * Needed for :mod:`~pandas.io.gbq` (specifically, it utilizes `pkg_resources`)
* `httplib2 <http://pypi.python.org/pypi/httplib2>`__
- * Needed for :mod:`~pandas.io.gbq`
+ * Needed for :mod:`~pandas.io.gbq`
* One of the following combinations of libraries is needed to use the
top-level :func:`~pandas.io.html.read_html` function:
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 85a7f80696331..38a8d4d05b807 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3610,7 +3610,7 @@ below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/c
If you want to manage your own connections you can pass one of those instead:
-.. ipython:: python
+.. code-block:: python
with engine.connect() as conn, conn.begin():
data = pd.read_sql_table('data', conn)
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 51912b5d6b106..4378d182b3128 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1649,6 +1649,7 @@ values, the resulting grid has two columns and two rows. A histogram is
displayed for each cell of the grid.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1680,6 +1681,7 @@ Example below is the same as previous except the plot is set to kernel density
estimation. A ``seaborn`` example is included beneath.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1706,6 +1708,7 @@ The plot below shows that it is possible to have two or more plots for the same
data displayed on the same Trellis grid cell.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1745,6 +1748,7 @@ Below is a similar plot but with 2D kernel density estimation plot superimposed,
followed by a ``seaborn`` equivalent:
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1774,6 +1778,7 @@ only uses 'sex' attribute. If the second grouping attribute is not specified,
the plots will be arranged in a column.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1792,6 +1797,7 @@ the plots will be arranged in a column.
If the first grouping attribute is not specified the plots will be arranged in a row.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1816,6 +1822,7 @@ scale objects to specify these mappings. The list of scale classes is
given below with initialization arguments for quick reference.
.. ipython:: python
+ :okwarning:
plt.figure()
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 87a3042061b16..b68041df32b98 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -13,13 +13,13 @@ users upgrade to this version.
Highlights include:
- - Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
- - The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
- previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
- - The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
- if they are all ``NaN``, see :ref:`here <whatsnew_0170.api_breaking.hdf_dropna>`
- - Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>`
- - Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
+- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
+- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
+ previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
+- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
+ if they are all ``NaN``, see :ref:`here <whatsnew_0170.api_breaking.hdf_dropna>`
+- Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>`
+- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
@@ -448,6 +448,7 @@ from ``7``.
.. ipython:: python
:suppress:
+
pd.set_option('display.precision', 6)
@@ -481,9 +482,9 @@ Series with a ``CategoricalIndex`` (:issue:`10704`)
- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`)
- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`)
- =============================== ==============================================================
+ =============================== ===============================================================
Behavior Methods
- =============================== ==============================================================
+ =============================== ===============================================================
``return np.nan`` ``weekday``, ``isoweekday``
``return NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today``
``return np.datetime64('NaT')`` ``to_datetime64`` (unchanged)
| This should get the sphinx doc build again warning/error free (apart from the one handled in #10602)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10749 | 2015-08-04T18:47:49Z | 2015-08-04T21:15:44Z | 2015-08-04T21:15:44Z | 2015-08-04T21:15:44Z |
Allow DateOffset addition with Series | diff --git a/doc/source/api.rst b/doc/source/api.rst
index a1284a3ff7bc9..f0c79fc7d567f 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1444,6 +1444,7 @@ Conversion
DatetimeIndex.to_datetime
DatetimeIndex.to_period
+ DatetimeIndex.to_perioddelta
DatetimeIndex.to_pydatetime
DatetimeIndex.to_series
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 8215414e425fe..e62f4f9387526 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -97,6 +97,8 @@ It will construct Series if the input is a Series, a scalar if the input is scal
to_timedelta(np.arange(5),unit='s')
to_timedelta(np.arange(5),unit='d')
+.. _timedeltas.operations:
+
Operations
----------
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1b5a4586e59e7..753c3ac57fb50 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -647,6 +647,46 @@ Another example is parameterizing ``YearEnd`` with the specific ending month:
d + YearEnd()
d + YearEnd(month=6)
+
+.. _timeseries.offsetseries:
+
+Using offsets with ``Series`` / ``DatetimeIndex``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Offsets can be used with either a ``Series`` or ``DatetimeIndex`` to
+apply the offset to each element.
+
+.. ipython:: python
+
+ rng = date_range('2012-01-01', '2012-01-03')
+ s = Series(rng)
+ rng
+ rng + DateOffset(months=2)
+ s + DateOffset(months=2)
+ s - DateOffset(months=2)
+
+If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``,
+``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be
+used exactly like a ``Timedelta`` - see the
+:ref:`Timedelta section<timedeltas.operations>` for more examples.
+
+.. ipython:: python
+
+ s - Day(2)
+ td = s - Series(date_range('2011-12-29', '2011-12-31'))
+ td
+ td + Minute(15)
+
+Note that some offsets (such as ``BQuarterEnd``) do not have a
+vectorized implementation. They can still be used but may
+calculate signficantly slower and will raise a ``PerformanceWarning``
+
+.. ipython:: python
+ :okwarning:
+
+ rng + BQuarterEnd()
+
+
.. _timeseries.alias:
Custom Business Days (Experimental)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 16c6c639a489e..68c23951ffbd1 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -133,6 +133,8 @@ Other enhancements
- ``to_datetime`` can now accept ``yearfirst`` keyword (:issue:`7599`)
+- ``pandas.tseries.offsets`` larger than the ``Day`` offset can now be used with with ``Series`` for addition/subtraction (:issue:`10699`). See the :ref:`Documentation <timeseries.offsetseries>` for more details.
+
- ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`)
- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`).
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 6a278e0e44306..8e3dd3836855c 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -6,6 +6,7 @@
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
+import warnings
import numpy as np
import pandas as pd
from pandas import compat, lib, tslib
@@ -21,7 +22,7 @@
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype, is_object_dtype,
is_timedelta64_dtype, is_datetime64_dtype, is_bool_dtype)
-
+from pandas.io.common import PerformanceWarning
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
@@ -276,12 +277,16 @@ def __init__(self, left, right, name):
self.left = left
self.right = right
- lvalues = self._convert_to_array(left, name=name)
- rvalues = self._convert_to_array(right, name=name, other=lvalues)
+ self.is_offset_lhs = self._is_offset(left)
+ self.is_offset_rhs = self._is_offset(right)
+
+ lvalues = self._convert_to_array(left, name=name)
self.is_timedelta_lhs = is_timedelta64_dtype(left)
self.is_datetime_lhs = is_datetime64_dtype(left)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
+
+ rvalues = self._convert_to_array(right, name=name, other=lvalues)
self.is_datetime_rhs = is_datetime64_dtype(rvalues)
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
@@ -309,7 +314,10 @@ def _validate(self):
" passed" % self.name)
# 2 timedeltas
- elif self.is_timedelta_lhs and self.is_timedelta_rhs:
+ elif ((self.is_timedelta_lhs and
+ (self.is_timedelta_rhs or self.is_offset_rhs)) or
+ (self.is_timedelta_rhs and
+ (self.is_timedelta_lhs or self.is_offset_lhs))):
if self.name not in ('__div__', '__truediv__', '__add__',
'__sub__'):
@@ -317,19 +325,21 @@ def _validate(self):
"addition, subtraction, and division, but the"
" operator [%s] was passed" % self.name)
- # datetime and timedelta
- elif self.is_datetime_lhs and self.is_timedelta_rhs:
+ # datetime and timedelta/DateOffset
+ elif (self.is_datetime_lhs and
+ (self.is_timedelta_rhs or self.is_offset_rhs)):
if self.name not in ('__add__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of"
- " a timedelta for addition and subtraction, "
+ " a timedelta/DateOffset for addition and subtraction,"
" but the operator [%s] was passed" %
self.name)
- elif self.is_timedelta_lhs and self.is_datetime_rhs:
+ elif ((self.is_timedelta_lhs or self.is_offset_lhs)
+ and self.is_datetime_rhs):
if self.name != '__add__':
- raise TypeError("can only operate on a timedelta and"
+ raise TypeError("can only operate on a timedelta/DateOffset and"
" a datetime for addition, but the operator"
" [%s] was passed" % self.name)
else:
@@ -371,18 +381,7 @@ def _convert_to_array(self, values, name=None, other=None):
elif name not in ('__truediv__', '__div__', '__mul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
- elif isinstance(values[0], pd.DateOffset):
- # handle DateOffsets
- os = np.array([getattr(v, 'delta', None) for v in values])
- mask = isnull(os)
- if mask.any():
- raise TypeError("cannot use a non-absolute DateOffset in "
- "datetime/timedelta operations [{0}]".format(
- ', '.join([com.pprint_thing(v)
- for v in values[mask]])))
- values = to_timedelta(os, errors='coerce')
elif inferred_type == 'floating':
-
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
if isnull(values).all():
values = np.empty(values.shape, dtype=other.dtype)
@@ -391,6 +390,8 @@ def _convert_to_array(self, values, name=None, other=None):
raise TypeError(
'incompatible type [{0}] for a datetime/timedelta '
'operation'.format(np.array(values).dtype))
+ elif self._is_offset(values):
+ return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
@@ -398,6 +399,7 @@ def _convert_to_array(self, values, name=None, other=None):
return values
def _convert_for_datetime(self, lvalues, rvalues):
+ from pandas.tseries.timedeltas import to_timedelta
mask = None
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
@@ -407,13 +409,40 @@ def _convert_for_datetime(self, lvalues, rvalues):
else:
self.dtype = 'datetime64[ns]'
mask = isnull(lvalues) | isnull(rvalues)
- lvalues = lvalues.view(np.int64)
- rvalues = rvalues.view(np.int64)
+
+ # if adding single offset try vectorized path
+ # in DatetimeIndex; otherwise elementwise apply
+ if self.is_offset_lhs:
+ if len(lvalues) == 1:
+ rvalues = pd.DatetimeIndex(rvalues)
+ lvalues = lvalues[0]
+ else:
+ warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
+ PerformanceWarning)
+ rvalues = rvalues.astype('O')
+ elif self.is_offset_rhs:
+ if len(rvalues) == 1:
+ lvalues = pd.DatetimeIndex(lvalues)
+ rvalues = rvalues[0]
+ else:
+ warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
+ PerformanceWarning)
+ lvalues = lvalues.astype('O')
+ else:
+ lvalues = lvalues.view(np.int64)
+ rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
mask = isnull(lvalues) | isnull(rvalues)
+
+ # convert Tick DateOffset to underlying delta
+ if self.is_offset_lhs:
+ lvalues = to_timedelta(lvalues)
+ if self.is_offset_rhs:
+ rvalues = to_timedelta(rvalues)
+
lvalues = lvalues.astype(np.int64)
rvalues = rvalues.astype(np.int64)
@@ -439,6 +468,16 @@ def f(x):
self.lvalues = lvalues
self.rvalues = rvalues
+
+ def _is_offset(self, arr_or_obj):
+ """ check if obj or all elements of list-like is DateOffset """
+ if isinstance(arr_or_obj, pd.DateOffset):
+ return True
+ elif is_list_like(arr_or_obj):
+ return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
+ else:
+ return False
+
@classmethod
def maybe_convert_for_time_op(cls, left, right, name):
"""
@@ -532,8 +571,8 @@ def wrapper(left, right, name=name):
name=name, dtype=dtype)
else:
# scalars
- if hasattr(lvalues, 'values'):
- lvalues = lvalues.values
+ if hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex):
+ lvalues = lvalues.values
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 66a38cd858846..bd1b0ac8905b2 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3286,14 +3286,37 @@ def test_timedeltas_with_DateOffset(self):
s + op(5)
op(5) + s
- # invalid DateOffsets
- for do in [ 'Week', 'BDay', 'BQuarterEnd', 'BMonthEnd', 'BYearEnd',
- 'BYearBegin','BQuarterBegin', 'BMonthBegin',
- 'MonthEnd','YearBegin', 'YearEnd',
- 'MonthBegin', 'QuarterBegin' ]:
+
+ def test_timedelta64_operations_with_DateOffset(self):
+ # GH 10699
+ td = Series([timedelta(minutes=5, seconds=3)] * 3)
+ result = td + pd.offsets.Minute(1)
+ expected = Series([timedelta(minutes=6, seconds=3)] * 3)
+ assert_series_equal(result, expected)
+
+ result = td - pd.offsets.Minute(1)
+ expected = Series([timedelta(minutes=4, seconds=3)] * 3)
+ assert_series_equal(result, expected)
+
+ result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
+ pd.offsets.Hour(2)])
+ expected = Series([timedelta(minutes=6, seconds=3),
+ timedelta(minutes=5, seconds=6),
+ timedelta(hours=2, minutes=5, seconds=3)])
+ assert_series_equal(result, expected)
+
+ result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
+ expected = Series([timedelta(minutes=6, seconds=15)] * 3)
+ assert_series_equal(result, expected)
+
+ # valid DateOffsets
+ for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
+ 'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
- self.assertRaises(TypeError, s.__add__, op(5))
- self.assertRaises(TypeError, s.__radd__, op(5))
+ td + op(5)
+ op(5) + td
+ td - op(5)
+ op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 8ee6a1bc64e4e..576656ad3ed9d 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1,5 +1,6 @@
# pylint: disable=E1101
import operator
+import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
@@ -7,6 +8,7 @@
_values_from_object, _maybe_box,
ABCSeries, is_integer, is_float,
is_object_dtype, is_datetime64_dtype)
+from pandas.io.common import PerformanceWarning
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
from pandas.compat import u
@@ -16,6 +18,7 @@
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
+from pandas.tseries.timedeltas import to_timedelta
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.tseries.offsets as offsets
@@ -672,8 +675,11 @@ def _add_delta(self, delta):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
+ elif isinstance(delta, DateOffset):
+ new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
+
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
@@ -681,6 +687,14 @@ def _add_delta(self, delta):
result = result.tz_convert(self.tz)
return result
+ def _add_offset(self, offset):
+ try:
+ return offset.apply_index(self)
+ except NotImplementedError:
+ warnings.warn("Non-vectorized DateOffset being applied to Series or DatetimeIndex",
+ PerformanceWarning)
+ return self.astype('O') + offset
+
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.core.format import _get_format_datetime64_from_values
@@ -834,6 +848,24 @@ def union(self, other):
result.offset = to_offset(result.inferred_freq)
return result
+ def to_perioddelta(self, freq):
+ """
+ Calcuates TimedeltaIndex of difference between index
+ values and index converted to PeriodIndex at specified
+ freq. Used for vectorized offsets
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ freq : Period frequency
+
+ Returns
+ -------
+ y : TimedeltaIndex
+ """
+ return to_timedelta(self.asi8 - self.to_period(freq).to_timestamp().asi8)
+
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 941456fa07cfa..33faac153cce0 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -4,6 +4,8 @@
import numpy as np
from pandas.tseries.tools import to_datetime
+from pandas.tseries.timedeltas import to_timedelta
+from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
@@ -93,6 +95,15 @@ def wrapper(self, other):
return wrapper
+def apply_index_wraps(func):
+ @functools.wraps(func)
+ def wrapper(self, other):
+ result = func(self, other)
+ if self.normalize:
+ result = result.to_period('D').to_timestamp()
+ return result
+ return wrapper
+
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0
or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
@@ -221,6 +232,67 @@ def apply(self, other):
else:
return other + timedelta(self.n)
+ @apply_index_wraps
+ def apply_index(self, i):
+ """
+ Vectorized apply of DateOffset to DatetimeIndex,
+ raises NotImplentedError for offsets without a
+ vectorized implementation
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ i : DatetimeIndex
+
+ Returns
+ -------
+ y : DatetimeIndex
+ """
+
+ if not type(self) is DateOffset:
+ raise NotImplementedError("DateOffset subclass %s "
+ "does not have a vectorized "
+ "implementation"
+ % (self.__class__.__name__,))
+ relativedelta_fast = set(['years', 'months', 'weeks',
+ 'days', 'hours', 'minutes',
+ 'seconds', 'microseconds'])
+ # relativedelta/_offset path only valid for base DateOffset
+ if (self._use_relativedelta and
+ set(self.kwds).issubset(relativedelta_fast)):
+ months = ((self.kwds.get('years', 0) * 12
+ + self.kwds.get('months', 0)) * self.n)
+ if months:
+ base = (i.to_period('M') + months).to_timestamp()
+ time = i.to_perioddelta('D')
+ days = i.to_perioddelta('M') - time
+ # minimum prevents month-end from wrapping
+ day_offset = np.minimum(days,
+ to_timedelta(base.days_in_month - 1, unit='D'))
+ i = base + day_offset + time
+
+ weeks = (self.kwds.get('weeks', 0)) * self.n
+ if weeks:
+ i = (i.to_period('W') + weeks).to_timestamp() + i.to_perioddelta('W')
+
+ timedelta_kwds = dict((k,v) for k,v in self.kwds.items()
+ if k in ['days','hours','minutes',
+ 'seconds','microseconds'])
+ if timedelta_kwds:
+ delta = Timedelta(**timedelta_kwds)
+ i = i + (self.n * delta)
+ return i
+ elif not self._use_relativedelta and hasattr(self, '_offset'):
+ # timedelta
+ return i + (self._offset * self.n)
+ else:
+ # relativedelta with other keywords
+ raise NotImplementedError("DateOffset with relativedelta "
+ "keyword(s) %s not able to be "
+ "applied vectorized" %
+ (set(self.kwds) - relativedelta_fast),)
+
def isAnchored(self):
return (self.n == 1)
@@ -307,6 +379,8 @@ def __call__(self, other):
return self.apply(other)
def __add__(self, other):
+ if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
+ return other + self
try:
return self.apply(other)
except ApplyTypeError:
@@ -324,6 +398,8 @@ def __sub__(self, other):
return NotImplemented
def __rsub__(self, other):
+ if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
+ return other - self
return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other
def __mul__(self, someInt):
@@ -363,6 +439,37 @@ def onOffset(self, dt):
b = ((dt + self) - self)
return a == b
+ # helpers for vectorized offsets
+ def _beg_apply_index(self, i, freq):
+ """Offsets index to beginning of Period frequency"""
+
+ off = i.to_perioddelta('D')
+ base_period = i.to_period(freq)
+ if self.n < 0:
+ # when subtracting, dates on start roll to prior
+ roll = np.where(base_period.to_timestamp() == i - off,
+ self.n, self.n + 1)
+ else:
+ roll = self.n
+
+ base = (base_period + roll).to_timestamp()
+ return base + off
+
+ def _end_apply_index(self, i, freq):
+ """Offsets index to end of Period frequency"""
+
+ off = i.to_perioddelta('D')
+ base_period = i.to_period(freq)
+ if self.n > 0:
+ # when adding, dtates on end roll to next
+ roll = np.where(base_period.to_timestamp(how='end') == i - off,
+ self.n, self.n - 1)
+ else:
+ roll = self.n
+
+ base = (base_period + roll).to_timestamp(how='end')
+ return base + off
+
# way to get around weirdness with rule_code
@property
def _prefix(self):
@@ -529,6 +636,19 @@ def apply(self, other):
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
+ @apply_index_wraps
+ def apply_index(self, i):
+ time = i.to_perioddelta('D')
+ # to_period rolls forward to next BDay; track and
+ # reduce n where it does when rolling forward
+ shifted = (i.to_perioddelta('B') - time).asi8 != 0
+ if self.n > 0:
+ roll = np.where(shifted, self.n - 1, self.n)
+ else:
+ roll = self.n
+
+ return (i.to_period('B') + roll).to_timestamp() + time
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -902,6 +1022,9 @@ def apply(self, other):
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
+ def apply_index(self, i):
+ raise NotImplementedError
+
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
@@ -949,6 +1072,10 @@ def apply(self, other):
other = other + relativedelta(months=n, day=31)
return other
+ @apply_index_wraps
+ def apply_index(self, i):
+ return self._end_apply_index(i, 'M')
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -970,6 +1097,10 @@ def apply(self, other):
return other + relativedelta(months=n, day=1)
+ @apply_index_wraps
+ def apply_index(self, i):
+ return self._beg_apply_index(i, 'M')
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -1211,6 +1342,13 @@ def apply(self, other):
base.hour, base.minute, base.second, base.microsecond)
return other
+ @apply_index_wraps
+ def apply_index(self, i):
+ if self.weekday is None:
+ return (i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')
+ else:
+ return self._end_apply_index(i, self.freqstr)
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -1587,6 +1725,10 @@ def apply(self, other):
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
+ @apply_index_wraps
+ def apply_index(self, i):
+ return self._end_apply_index(i, self.freqstr)
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -1621,6 +1763,11 @@ def apply(self, other):
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
+ @apply_index_wraps
+ def apply_index(self, i):
+ freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
+ freqstr = 'Q-%s' % (_int_to_month[freq_month],)
+ return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
@@ -1764,6 +1911,11 @@ def _rollf(date):
result = _rollf(result)
return result
+ @apply_index_wraps
+ def apply_index(self, i):
+ # convert month anchor to annual period tuple
+ return self._end_apply_index(i, self.freqstr)
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -1809,6 +1961,12 @@ def _rollf(date):
result = _rollf(result)
return result
+ @apply_index_wraps
+ def apply_index(self, i):
+ freq_month = 12 if self.month == 1 else self.month - 1
+ freqstr = 'A-%s' % (_int_to_month[freq_month],)
+ return self._beg_apply_index(i, freqstr)
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -2311,6 +2469,7 @@ def apply(self, other):
_prefix = 'undefined'
+
def isAnchored(self):
return False
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f14358452ec13..1b14625310fd3 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -31,6 +31,7 @@
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
from numpy.random import rand
from pandas.util.testing import assert_frame_equal
+from pandas.io.common import PerformanceWarning
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
@@ -2454,6 +2455,91 @@ def test_intersection_bug_1708(self):
result = index_1 & index_2
self.assertEqual(len(result), 0)
+ # GH 10699
+ def test_datetime64_with_DateOffset(self):
+ for klass, assert_func in zip([Series, DatetimeIndex],
+ [self.assert_series_equal,
+ tm.assert_index_equal]):
+ s = klass(date_range('2000-01-01', '2000-01-31'))
+ result = s + pd.DateOffset(years=1)
+ result2 = pd.DateOffset(years=1) + s
+ exp = klass(date_range('2001-01-01', '2001-01-31'))
+ assert_func(result, exp)
+ assert_func(result2, exp)
+
+ result = s - pd.DateOffset(years=1)
+ exp = klass(date_range('1999-01-01', '1999-01-31'))
+ assert_func(result, exp)
+
+ s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
+ pd.Timestamp('2000-02-15', tz='US/Central')])
+ result = s + pd.offsets.MonthEnd()
+ result2 = pd.offsets.MonthEnd() + s
+ exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
+ Timestamp('2000-02-29', tz='US/Central')])
+ assert_func(result, exp)
+ assert_func(result2, exp)
+
+ # array of offsets - valid for Series only
+ if klass is Series:
+ with tm.assert_produces_warning(PerformanceWarning):
+ s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
+ result = s + Series([pd.offsets.DateOffset(years=1),
+ pd.offsets.MonthEnd()])
+ exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')])
+ assert_func(result, exp)
+
+ # same offset
+ result = s + Series([pd.offsets.DateOffset(years=1),
+ pd.offsets.DateOffset(years=1)])
+ exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
+ assert_func(result, exp)
+
+ s = klass([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'),
+ Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')])
+
+ #DateOffset relativedelta fastpath
+ relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
+ ('hours', 5), ('minutes', 10), ('seconds', 2),
+ ('microseconds', 5)]
+ for i, kwd in enumerate(relative_kwargs):
+ op = pd.DateOffset(**dict([kwd]))
+ assert_func(klass([x + op for x in s]), s + op)
+ assert_func(klass([x - op for x in s]), s - op)
+ op = pd.DateOffset(**dict(relative_kwargs[:i+1]))
+ assert_func(klass([x + op for x in s]), s + op)
+ assert_func(klass([x - op for x in s]), s - op)
+
+
+ # split by fast/slow path to test perf warning
+ off = {False:
+ ['YearBegin', ('YearBegin', {'month': 5}),
+ 'YearEnd', ('YearEnd', {'month': 5}),
+ 'MonthBegin', 'MonthEnd', 'Week', ('Week', {'weekday': 3}),
+ 'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin'],
+ PerformanceWarning:
+ ['CustomBusinessDay', 'CDay', 'CBMonthEnd','CBMonthBegin',
+ 'BMonthBegin', 'BMonthEnd', 'BusinessHour', 'BYearBegin',
+ 'BYearEnd','BQuarterBegin', ('LastWeekOfMonth', {'weekday':2}),
+ ('FY5253Quarter', {'qtr_with_extra_week': 1, 'startingMonth': 1,
+ 'weekday': 2, 'variation': 'nearest'}),
+ ('FY5253',{'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
+ ('WeekOfMonth', {'weekday': 2, 'week': 2}), 'Easter',
+ ('DateOffset', {'day': 4}), ('DateOffset', {'month': 5})]}
+
+ for normalize in (True, False):
+ for warning, offsets in off.items():
+ for do in offsets:
+ if isinstance(do, tuple):
+ do, kwargs = do
+ else:
+ do = do
+ kwargs = {}
+ op = getattr(pd.offsets,do)(5, normalize=normalize, **kwargs)
+ with tm.assert_produces_warning(warning):
+ assert_func(klass([x + op for x in s]), s + op)
+ assert_func(klass([x - op for x in s]), s - op)
+ assert_func(klass([op + x for x in s]), op + s)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
@@ -4222,12 +4308,12 @@ def test_to_datetime_format_microsecond(self):
def test_to_datetime_format_time(self):
data = [
- ['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
- ['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
- ['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
- #['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
- #['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
- #['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
+ ['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
+ ['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
+ ['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
+ #['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
+ #['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
+ #['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py
index 75147e079bb65..151777add104a 100644
--- a/vb_suite/timeseries.py
+++ b/vb_suite/timeseries.py
@@ -405,3 +405,33 @@ def iter_n(iterable, n=None):
timeseries_iter_datetimeindex_preexit = Benchmark('iter_n(idx1, M)', setup)
timeseries_iter_periodindex_preexit = Benchmark('iter_n(idx2, M)', setup)
+
+
+#----------------------------------------------------------------------
+# apply an Offset to a DatetimeIndex
+setup = common_setup + """
+N = 100000
+idx1 = date_range(start='20140101', freq='T', periods=N)
+delta_offset = Day()
+fast_offset = DateOffset(months=2, days=2)
+slow_offset = offsets.BusinessDay()
+
+"""
+
+timeseries_datetimeindex_offset_delta = Benchmark('idx1 + delta_offset', setup)
+timeseries_datetimeindex_offset_fast = Benchmark('idx1 + fast_offset', setup)
+timeseries_datetimeindex_offset_slow = Benchmark('idx1 + slow_offset', setup)
+
+# apply an Offset to a Series containing datetime64 values
+setup = common_setup + """
+N = 100000
+s = Series(date_range(start='20140101', freq='T', periods=N))
+delta_offset = Day()
+fast_offset = DateOffset(months=2, days=2)
+slow_offset = offsets.BusinessDay()
+
+"""
+
+timeseries_series_offset_delta = Benchmark('s + delta_offset', setup)
+timeseries_series_offset_fast = Benchmark('s + fast_offset', setup)
+timeseries_series_offset_slow = Benchmark('s + slow_offset', setup)
| Addresses #10699 - allowing relative offsets like `DateOffset(years=1)` to to be added to a `Series` containing datetimes for element-wise addition.
Previously only non-relative offsets that could be mapped to a time_delta (`Tick`, `Day`, etc) worked.
Some performance numbers are below - `Tick` based offsets should be unchanged, a subset of offsets (`DateOffset`, `YearEnd/Begin`, `MonthEnd/Begin`, `QuarterEnd/Begin`, `Week`, `BusinessDay`) will be sped up significantly; and other offsets will still fallback to a slow path, but now will raise a performance warning.
### Master
```
In [1]: idx = pd.date_range(start='20140101', freq='T', periods=100000)
In [2]: %timeit idx + pd.offsets.Day()
100 loops, best of 3: 5.72 ms per loop
In [3]: %timeit idx + pd.Timedelta(1, 'D')
100 loops, best of 3: 5.67 ms per loop
In [4]: %timeit idx + pd.offsets.DateOffset(years=1, months=2, days=1)
1 loops, best of 3: 1.79 s per loop
In [5]: %timeit idx + pd.offsets.WeekOfMonth(week=2, weekday=4)
1 loops, best of 3: 22.8 s per loop
```
### PR
```
In [3]: %timeit idx + pd.offsets.Day()
100 loops, best of 3: 5.8 ms per loop
In [4]: %timeit idx + pd.Timedelta(1, 'D')
100 loops, best of 3: 5.73 ms per loop
In [5]: %timeit idx + pd.offsets.DateOffset(years=1, months=2, days=1)
10 loops, best of 3: 86.9 ms per loop
In [6]: %timeit idx + pd.offsets.WeekOfMonth(week=2, weekday=4)
1 loops, best of 3: 23.4 s per loop
c:\users\chris\documents\python-dev\pandas\pandas\tseries\index.py:695: PerformanceWarning: Non-vectorized DateOffset being applied to Series or DatetimeIndex PerformanceWarning)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10744 | 2015-08-04T04:31:26Z | 2015-08-13T13:07:42Z | 2015-08-13T13:07:42Z | 2015-08-19T00:50:58Z |
TST: test_packers.TestMsgpack checks for minimum structure and extra … | diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 33b7cc79083db..b71fd32a29e1e 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -532,14 +532,30 @@ class TestMsgpack():
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
- from pandas.io.tests.generate_legacy_storage_files import create_msgpack_data
+ from pandas.io.tests.generate_legacy_storage_files import (
+ create_msgpack_data, create_data)
self.data = create_msgpack_data()
+ self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
+ self.minimum_structure = {'series': ['float', 'int', 'mixed', 'ts', 'mi', 'dup'],
+ 'frame': ['float', 'int', 'mixed', 'mi'],
+ 'panel': ['float'],
+ 'index': ['int', 'date', 'period'],
+ 'mi': ['reg2']}
+
+ def check_min_structure(self, data):
+ for typ, v in self.minimum_structure.items():
+ assert typ in data, '"{0}" not found in unpacked data'.format(typ)
+ for kind in v:
+ assert kind in data[typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf):
data = read_msgpack(vf)
+ self.check_min_structure(data)
for typ, dv in data.items():
+ assert typ in self.all_data, 'unpacked data contains extra key "{0}"'.format(typ)
for dt, result in dv.items():
+ assert dt in self.all_data[typ], 'data["{0}"] contains extra key "{1}"'.format(typ, dt)
try:
expected = self.data[typ][dt]
except KeyError:
| …keys
closes #10732
I wasn't able to reproduce the odd behavior. Let's see if this passes on Travis.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10743 | 2015-08-04T03:38:57Z | 2015-08-04T11:47:19Z | 2015-08-04T11:47:19Z | 2015-08-04T11:47:20Z |
ENH: Fixed DF.apply for functions returning a dict, #8735 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9049d8de550d0..b6b55a92e774f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -218,6 +218,8 @@ Other enhancements
- Support pickling of ``Period`` objects (:issue:`10439`)
+- ``DataFrame.apply`` will return a Series of dicts if the passed function returns a dict and ``reduce=True`` (:issue:`8735`).
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e908bf9d579b..29d8b036f322f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3921,13 +3921,16 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
- try:
-
- # the is the fast-path
- values = self.values
- dummy = Series(NA, index=self._get_axis(axis),
- dtype=values.dtype)
+ # the is the fast-path
+ values = self.values
+ # Create a dummy Series from an empty array
+ # Unlike filling with NA, this works for any dtype
+ index = self._get_axis(axis)
+ empty_arr = np.empty(len(index), dtype=values.dtype)
+ dummy = Series(empty_arr, index=self._get_axis(axis),
+ dtype=values.dtype)
+ try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx
index 09f8e0ab42924..eb736e4569009 100644
--- a/pandas/src/reduce.pyx
+++ b/pandas/src/reduce.pyx
@@ -133,7 +133,7 @@ cdef class Reducer:
else:
res = self.f(chunk)
- if hasattr(res,'values'):
+ if hasattr(res,'values') and isinstance(res.values, np.ndarray):
res = res.values
if i == 0:
result = _get_result_array(res,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 16143fa612c48..6d65ab470be1f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11255,6 +11255,25 @@ def test_apply_multi_index(self):
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
tm.assertIsInstance(res.index, MultiIndex)
+ def test_apply_dict(self):
+
+ # GH 8735
+ A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
+ A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
+ dict([(0, 'bar'), (1, 'eggs')])])
+ B = DataFrame([[0, 1], [2, 3]])
+ B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
+ fn = lambda x: x.to_dict()
+
+ for df, dicts in [(A, A_dicts), (B, B_dicts)]:
+ reduce_true = df.apply(fn, reduce=True)
+ reduce_false = df.apply(fn, reduce=False)
+ reduce_none = df.apply(fn, reduce=None)
+
+ assert_series_equal(reduce_true, dicts)
+ assert_frame_equal(reduce_false, df)
+ assert_series_equal(reduce_none, dicts)
+
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
| closes #8735
Previously, when the function argument to DataFrame.apply returned a dict, the reduction code would mistake its "values" property for the values of a Pandas Series, and return a Series of "values" instance methods. The new check ensures that the "values" property is an np.ndarray.
Previous behavior:
```
In [1]: A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
In [2]: A.apply(lambda c: c.to_dict(), reduce=True)
Out[2]:
0 <built-in method values of dict object at 0x7f...
1 <built-in method values of dict object at 0x7f...
dtype: object
```
New behavior:
```
In [1]: A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
In [2]: A.apply(lambda c: c.to_dict(), reduce=True)
Out[2]:
0 {0: u'foo', 1: u'spam'}
1 {0: u'bar', 1: u'eggs'}
dtype: object
```
If reduce=False, the result is a DataFrame (this did not change):
```
In [3]: A.apply(lambda c: c.to_dict(), reduce=False)
Out[3]:
0 1
0 foo bar
1 spam eggs
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10740 | 2015-08-03T20:05:56Z | 2015-08-28T18:17:41Z | null | 2015-08-28T18:17:41Z |
BUG: Fix dtypes order when ordering is different from original file in pandas.io.stata.read_stata | diff --git a/doc/source/api.rst b/doc/source/api.rst
index a1284a3ff7bc9..1cbe55ddbacb6 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -904,6 +904,8 @@ Reshaping, sorting, transposing
DataFrame.sort
DataFrame.sort_index
DataFrame.sortlevel
+ DataFrame.nlargest
+ DataFrame.nsmallest
DataFrame.swaplevel
DataFrame.stack
DataFrame.unstack
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 524f57953d5b8..58374fabaec32 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1497,6 +1497,20 @@ faster than sorting the entire Series and calling ``head(n)`` on the result.
s.nsmallest(3)
s.nlargest(3)
+.. versionadded:: 0.17.0
+
+``DataFrame`` also has the ``nlargest`` and ``nsmallest`` methods.
+
+.. ipython:: python
+
+ df = DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1],
+ 'b': list('abdceff'),
+ 'c': [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0]})
+ df.nlargest(3, 'a')
+ df.nlargest(5, ['a', 'c'])
+ df.nsmallest(3, 'a')
+ df.nsmallest(5, ['a', 'c'])
+
.. _basics.multi-index_sorting:
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 31fee45eb0266..aaa39dd383e2e 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -267,11 +267,11 @@ Optional Dependencies
installation.
* Google's `python-gflags <http://code.google.com/p/python-gflags/>`__
and `google-api-python-client <http://github.com/google/google-api-python-client>`__
- * Needed for :mod:`~pandas.io.gbq`
+ * Needed for :mod:`~pandas.io.gbq`
* `setuptools <https://pypi.python.org/pypi/setuptools/>`__
- * Needed for :mod:`~pandas.io.gbq` (specifically, it utilizes `pkg_resources`)
+ * Needed for :mod:`~pandas.io.gbq` (specifically, it utilizes `pkg_resources`)
* `httplib2 <http://pypi.python.org/pypi/httplib2>`__
- * Needed for :mod:`~pandas.io.gbq`
+ * Needed for :mod:`~pandas.io.gbq`
* One of the following combinations of libraries is needed to use the
top-level :func:`~pandas.io.html.read_html` function:
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 85a7f80696331..38a8d4d05b807 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3610,7 +3610,7 @@ below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/c
If you want to manage your own connections you can pass one of those instead:
-.. ipython:: python
+.. code-block:: python
with engine.connect() as conn, conn.begin():
data = pd.read_sql_table('data', conn)
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1b5a4586e59e7..b00b1d2baaac3 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -208,21 +208,13 @@ Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time):
:okexcept:
# this is the default, raise when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='raise')
+ to_datetime(['2009/07/31', 'asd'], errors='raise')
# return the original input when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='ignore')
+ to_datetime(['2009/07/31', 'asd'], errors='ignore')
# return NaT for input when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='coerce')
-
-
-Take care, ``to_datetime`` may not act as you expect on mixed data:
-
-.. ipython:: python
- :okexcept:
-
- to_datetime([1, '1'])
+ to_datetime(['2009/07/31', 'asd'], errors='coerce')
Epoch Timestamps
~~~~~~~~~~~~~~~~
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 51912b5d6b106..4378d182b3128 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1649,6 +1649,7 @@ values, the resulting grid has two columns and two rows. A histogram is
displayed for each cell of the grid.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1680,6 +1681,7 @@ Example below is the same as previous except the plot is set to kernel density
estimation. A ``seaborn`` example is included beneath.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1706,6 +1708,7 @@ The plot below shows that it is possible to have two or more plots for the same
data displayed on the same Trellis grid cell.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1745,6 +1748,7 @@ Below is a similar plot but with 2D kernel density estimation plot superimposed,
followed by a ``seaborn`` equivalent:
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1774,6 +1778,7 @@ only uses 'sex' attribute. If the second grouping attribute is not specified,
the plots will be arranged in a column.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1792,6 +1797,7 @@ the plots will be arranged in a column.
If the first grouping attribute is not specified the plots will be arranged in a row.
.. ipython:: python
+ :okwarning:
plt.figure()
@@ -1816,6 +1822,7 @@ scale objects to specify these mappings. The list of scale classes is
given below with initialization arguments for quick reference.
.. ipython:: python
+ :okwarning:
plt.figure()
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9a9054fcf0489..b52b004cac1dc 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -13,13 +13,13 @@ users upgrade to this version.
Highlights include:
- - Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
- - The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
- previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
- - The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
- if they are all ``NaN``, see :ref:`here <whatsnew_0170.api_breaking.hdf_dropna>`
- - Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>`
- - Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
+- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
+- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
+ previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
+- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
+ if they are all ``NaN``, see :ref:`here <whatsnew_0170.api_breaking.hdf_dropna>`
+- Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>`
+- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
@@ -32,6 +32,7 @@ Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsne
New features
~~~~~~~~~~~~
+- ``DataFrame`` has the ``nlargest`` and ``nsmallest`` methods (:issue:`10393`)
- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`)
- Enable writing complex values to HDF stores when using table format (:issue:`10447`)
- Enable reading gzip compressed files via URL, either by explicitly setting the compression parameter or by inferring from the presence of the HTTP Content-Encoding header in the response (:issue:`8685`)
@@ -448,6 +449,7 @@ from ``7``.
.. ipython:: python
:suppress:
+
pd.set_option('display.precision', 6)
@@ -457,6 +459,8 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`)
+- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a
+Series with a ``CategoricalIndex`` (:issue:`10704`)
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
@@ -479,9 +483,9 @@ Other API Changes
- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`)
- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`)
- =============================== ==============================================================
+ =============================== ===============================================================
Behavior Methods
- =============================== ==============================================================
+ =============================== ===============================================================
``return np.nan`` ``weekday``, ``isoweekday``
``return NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today``
``return np.datetime64('NaT')`` ``to_datetime64`` (unchanged)
@@ -544,6 +548,8 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
+- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
@@ -606,3 +612,5 @@ Bug Fixes
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
+- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10739`)
+- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c958a70b43089..b0c7ff43bc7d8 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -36,7 +36,7 @@ def match(to_match, values, na_sentinel=-1):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
- result = _hashtable_algo(f, values.dtype)
+ result = _hashtable_algo(f, values.dtype, np.int64)
if na_sentinel != -1:
@@ -66,7 +66,7 @@ def unique(values):
return _hashtable_algo(f, values.dtype)
-def _hashtable_algo(f, dtype):
+def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
"""
@@ -74,6 +74,12 @@ def _hashtable_algo(f, dtype):
return f(htable.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(htable.Int64HashTable, com._ensure_int64)
+ elif com.is_datetime64_dtype(dtype):
+ return_dtype = return_dtype or 'M8[ns]'
+ return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
+ elif com.is_timedelta64_dtype(dtype):
+ return_dtype = return_dtype or 'm8[ns]'
+ return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
else:
return f(htable.PyObjectHashTable, com._ensure_object)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 1604705ff824a..b0d564caa5826 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1027,6 +1027,7 @@ def value_counts(self, dropna=True):
"""
import pandas.hashtable as htable
from pandas.core.series import Series
+ from pandas.core.index import CategoricalIndex
cat = self.dropna() if dropna else self
keys, counts = htable.value_count_int64(com._ensure_int64(cat._codes))
@@ -1036,10 +1037,12 @@ def value_counts(self, dropna=True):
if not dropna and -1 in keys:
ix = np.append(ix, -1)
result = result.reindex(ix, fill_value=0)
- result.index = (np.append(cat.categories, np.nan)
+ index = (np.append(cat.categories, np.nan)
if not dropna and -1 in keys
else cat.categories)
+ result.index = CategoricalIndex(index, self.categories, self.ordered)
+
return result
def get_values(self):
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 4115788e4dd90..a18d0cfa6f195 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1037,7 +1037,7 @@ def _column_header():
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
- if self.fmt.has_index_names:
+ if self.fmt.has_index_names and self.fmt.index:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index deda8294d139a..d8948bc82fe61 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3127,6 +3127,79 @@ def sortlevel(self, level=0, axis=0, ascending=True,
else:
return self._constructor(new_data).__finalize__(self)
+ def _nsorted(self, columns, n, method, take_last):
+ if not com.is_list_like(columns):
+ columns = [columns]
+ columns = list(columns)
+ ser = getattr(self[columns[0]], method)(n, take_last=take_last)
+ ascending = dict(nlargest=False, nsmallest=True)[method]
+ return self.loc[ser.index].sort(columns, ascending=ascending,
+ kind='mergesort')
+
+ def nlargest(self, n, columns, take_last=False):
+ """Get the rows of a DataFrame sorted by the `n` largest
+ values of `columns`.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ n : int
+ Number of items to retrieve
+ columns : list or str
+ Column name or names to order by
+ take_last : bool, optional
+ Where there are duplicate values, take the last duplicate
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = DataFrame({'a': [1, 10, 8, 11, -1],
+ ... 'b': list('abdce'),
+ ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
+ >>> df.nlargest(3, 'a')
+ a b c
+ 3 11 c 3
+ 1 10 b 2
+ 2 8 d NaN
+ """
+ return self._nsorted(columns, n, 'nlargest', take_last)
+
+ def nsmallest(self, n, columns, take_last=False):
+ """Get the rows of a DataFrame sorted by the `n` smallest
+ values of `columns`.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ n : int
+ Number of items to retrieve
+ columns : list or str
+ Column name or names to order by
+ take_last : bool, optional
+ Where there are duplicate values, take the last duplicate
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = DataFrame({'a': [1, 10, 8, 11, -1],
+ ... 'b': list('abdce'),
+ ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
+ >>> df.nsmallest(3, 'a')
+ a b c
+ 4 -1 e 4
+ 0 1 a 1
+ 2 8 d NaN
+ """
+ return self._nsorted(columns, n, 'nsmallest', take_last)
+
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index e73fa207152c1..7837fb60da9d6 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -193,7 +193,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
See Also
--------
- match : analagous, but stricter, relying on re.match instead of re.search
+ match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 0266e2beeca40..5afbc2671e3a7 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1614,14 +1614,12 @@ def _do_select_columns(self, data, columns):
typlist = []
fmtlist = []
lbllist = []
- matched = set()
- for i, col in enumerate(data.columns):
- if col in column_set:
- matched.update([col])
- dtyplist.append(self.dtyplist[i])
- typlist.append(self.typlist[i])
- fmtlist.append(self.fmtlist[i])
- lbllist.append(self.lbllist[i])
+ for col in columns:
+ i = data.columns.get_loc(col)
+ dtyplist.append(self.dtyplist[i])
+ typlist.append(self.typlist[i])
+ fmtlist.append(self.fmtlist[i])
+ lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 33b7cc79083db..b71fd32a29e1e 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -532,14 +532,30 @@ class TestMsgpack():
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
- from pandas.io.tests.generate_legacy_storage_files import create_msgpack_data
+ from pandas.io.tests.generate_legacy_storage_files import (
+ create_msgpack_data, create_data)
self.data = create_msgpack_data()
+ self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
+ self.minimum_structure = {'series': ['float', 'int', 'mixed', 'ts', 'mi', 'dup'],
+ 'frame': ['float', 'int', 'mixed', 'mi'],
+ 'panel': ['float'],
+ 'index': ['int', 'date', 'period'],
+ 'mi': ['reg2']}
+
+ def check_min_structure(self, data):
+ for typ, v in self.minimum_structure.items():
+ assert typ in data, '"{0}" not found in unpacked data'.format(typ)
+ for kind in v:
+ assert kind in data[typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf):
data = read_msgpack(vf)
+ self.check_min_structure(data)
for typ, dv in data.items():
+ assert typ in self.all_data, 'unpacked data contains extra key "{0}"'.format(typ)
for dt, result in dv.items():
+ assert dt in self.all_data[typ], 'data["{0}"] contains extra key "{1}"'.format(typ, dt)
try:
expected = self.data[typ][dt]
except KeyError:
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index aa9f27d1515d3..627ce4383f7be 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -780,8 +780,14 @@ def test_drop_column(self):
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
-
tm.assert_frame_equal(expected, dropped)
+
+ columns = ['int_', 'long_', 'byte_']
+ expected = expected[columns]
+ reordered = read_stata(self.dta15_117, convert_dates=True,
+ columns=columns)
+ tm.assert_frame_equal(expected, reordered)
+
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 30dcd8631f13a..6164b1b4906de 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -4,7 +4,7 @@
import numpy as np
from numpy.random import RandomState
-from pandas.core.api import Series, Categorical
+from pandas.core.api import Series, Categorical, CategoricalIndex
import pandas as pd
import pandas.core.algorithms as algos
@@ -235,6 +235,50 @@ def test_on_index_object(self):
tm.assert_almost_equal(result, expected)
+ def test_datetime64_dtype_array_returned(self):
+ # GH 9431
+ expected = np.array(['2015-01-03T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000'], dtype='M8[ns]')
+
+ dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000'])
+ result = algos.unique(dt_index)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ s = pd.Series(dt_index)
+ result = algos.unique(s)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ arr = s.values
+ result = algos.unique(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+
+ def test_timedelta64_dtype_array_returned(self):
+ # GH 9431
+ expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
+
+ td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
+ result = algos.unique(td_index)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ s = pd.Series(td_index)
+ result = algos.unique(s)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ arr = s.values
+ result = algos.unique(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+
+
class TestValueCounts(tm.TestCase):
_multiprocess_can_split_ = True
@@ -246,9 +290,15 @@ def test_value_counts(self):
factor = cut(arr, 4)
tm.assertIsInstance(factor, Categorical)
-
result = algos.value_counts(factor)
- expected = algos.value_counts(np.asarray(factor))
+ cats = ['(-1.194, -0.535]',
+ '(-0.535, 0.121]',
+ '(0.121, 0.777]',
+ '(0.777, 1.433]'
+ ]
+ expected_index = CategoricalIndex(cats, cats, ordered=True)
+ expected = Series([1, 1, 1, 1],
+ index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
@@ -288,6 +338,57 @@ def test_value_counts_nat(self):
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
+ def test_categorical(self):
+ s = Series(pd.Categorical(list('aaabbc')))
+ result = s.value_counts()
+ expected = pd.Series([3, 2, 1], index=pd.CategoricalIndex(['a', 'b', 'c']))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # preserve order?
+ s = s.cat.as_ordered()
+ result = s.value_counts()
+ expected.index = expected.index.as_ordered()
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_categorical_nans(self):
+ s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
+ s.iloc[1] = np.nan
+ result = s.value_counts()
+ expected = pd.Series([4, 3, 2],
+ index=pd.CategoricalIndex(['a', 'b', 'c'],
+ categories=['a', 'b', 'c']))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+ result = s.value_counts(dropna=False)
+ expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
+ ['a', 'b', 'c', np.nan]))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # out of order
+ s = Series(pd.Categorical(list('aaaaabbbcc'),
+ ordered=True, categories=['b', 'a', 'c']))
+ s.iloc[1] = np.nan
+ result = s.value_counts()
+ expected = pd.Series([4, 3, 2],
+ index=pd.CategoricalIndex(['a', 'b', 'c'],
+ categories=['b', 'a', 'c'],
+ ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = s.value_counts(dropna=False)
+ expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
+ ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_categorical_zeroes(self):
+ # keep the `d` category with 0
+ s = Series(pd.Categorical(list('bbbaac'), categories=list('abcd'),
+ ordered=True))
+ result = s.value_counts()
+ expected = Series([3, 2, 1, 0], index=pd.Categorical(
+ ['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+
def test_dropna(self):
# https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index a85fd52ed6eb3..a065d03d4ad72 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -458,7 +458,8 @@ def test_describe(self):
desc = cat.describe()
expected = DataFrame.from_dict(dict(counts=[1, 2, 1],
freqs=[1/4., 2/4., 1/4.],
- categories=[1,2,np.nan]
+ categories=Categorical([1,2,np.nan],
+ [1, 2])
)
).set_index('categories')
tm.assert_frame_equal(desc, expected)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index b94f4046630e4..388df526e05f5 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -632,6 +632,10 @@ def test_to_html_multiindex_index_false(self):
</table>"""
self.assertEqual(result, expected)
+ df.index = Index(df.index.values, name='idx')
+ result = df.to_html(index=False)
+ self.assertEqual(result, expected)
+
def test_to_html_multiindex_sparsify_false_multi_sparse(self):
with option_context('display.multi_sparse', False):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
@@ -1922,15 +1926,195 @@ def test_to_html_index(self):
'C': ['one', 'two', np.NaN]},
columns=['A', 'B', 'C'],
index=index)
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th>foo</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>baz</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+
+ expected_without_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
result = df.to_html(index=False)
for i in index:
self.assertNotIn(i, result)
+ self.assertEqual(result, expected_without_index)
+ df.index = Index(['foo', 'bar', 'baz'], name='idx')
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>idx</th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th>foo</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>baz</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+ self.assertEqual(df.to_html(index=False), expected_without_index)
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
+
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th rowspan="2" valign="top">foo</th>\n'
+ ' <th>car</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bike</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <th>car</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
self.assertNotIn(i, result)
+ # must be the same result as normal index
+ self.assertEqual(result, expected_without_index)
+
+ df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>idx1</th>\n'
+ ' <th>idx2</th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th rowspan="2" valign="top">foo</th>\n'
+ ' <th>car</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bike</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <th>car</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+ self.assertEqual(df.to_html(index=False), expected_without_index)
def test_repr_html(self):
self.frame._repr_html_()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3b93465c1efe9..77ef5fecf22c9 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -14609,6 +14609,41 @@ def test_dataframe_metadata(self):
self.assertEqual(df._metadata, unpickled._metadata)
self.assertEqual(df.testattr, unpickled.testattr)
+ def test_nlargest(self):
+ # GH10393
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10])})
+ result = df.nlargest(5, 'a')
+ expected = df.sort('a', ascending=False).head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nlargest_multiple_columns(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10]),
+ 'c': np.random.permutation(10).astype('float64')})
+ result = df.nlargest(5, ['a', 'b'])
+ expected = df.sort(['a', 'b'], ascending=False).head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nsmallest(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10])})
+ result = df.nsmallest(5, 'a')
+ expected = df.sort('a').head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nsmallest_multiple_columns(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10]),
+ 'c': np.random.permutation(10).astype('float64')})
+ result = df.nsmallest(5, ['a', 'c'])
+ expected = df.sort(['a', 'c']).head(5)
+ tm.assert_frame_equal(result, expected)
+
def test_to_panel_expanddim(self):
# GH 9762
| This fixes http://stackoverflow.com/questions/31783392/pandas-adding-columns-to-filter-will-mess-with-data-structure
The dtypes of the DataFrame returned by `read_stata` have the same order as the original stata file, even if `columns` are specified with a different order.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10739 | 2015-08-03T19:25:04Z | 2015-08-06T12:17:29Z | null | 2015-08-06T12:17:42Z |
amend sample to return copy and align weight axis | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 71feddc1a8b7c..f45ed9eeeaeee 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -628,6 +628,8 @@ Bug Fixes
- Bug in ``stack`` when index or columns are not unique. (:issue:`10417`)
- Bug in setting a Panel when an axis has a multi-index (:issue:`10360`)
- Bug in ``USFederalHolidayCalendar`` where ``USMemorialDay`` and ``USMartinLutherKingJr`` were incorrect (:issue:`10278` and :issue:`9760` )
+- Bug in ``.sample()`` where returned object, if set, gives unnecessary ``SettingWithCopyWarning`` (:issue:`10738`)
+- Bug in ``.sample()`` where weights passed as Series were not aligned along axis before being treated positionally, potentially causing problems if weight indices were not aligned with sampled object. (:issue:`10738`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 22f3e3016c019..0e8afbdafb866 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2004,9 +2004,14 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
+ If passed a Series, will align with target object on index. Index
+ values in weights not found in sampled object will be ignored and
+ index values in sampled object not in weights will be assigned
+ weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
- Weights must be same length as axis being sampled.
+ Unless weights are a Series, weights must be same length as axis
+ being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
@@ -2019,7 +2024,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
Returns
-------
- Same type as caller.
+ A new object of same type as caller.
"""
if axis is None:
@@ -2034,6 +2039,10 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
# Check weights for compliance
if weights is not None:
+ # If a series, align with frame
+ if isinstance(weights, pd.Series):
+ weights = weights.reindex(self.axes[axis])
+
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, string_types):
if isinstance(self, pd.DataFrame):
@@ -2063,7 +2072,10 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
# Renormalize if don't sum to 1
if weights.sum() != 1:
- weights = weights / weights.sum()
+ if weights.sum() != 0:
+ weights = weights / weights.sum()
+ else:
+ raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
@@ -2082,7 +2094,8 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
raise ValueError("A negative number of rows requested. Please provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
- return self.take(locs, axis=axis)
+ return self.take(locs, axis=axis, is_copy=False)
+
_shared_docs['pipe'] = ("""
Apply func(self, \*args, \*\*kwargs)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index f434992e9fcd8..c1f6045c61d54 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -374,7 +374,7 @@ def test_sample(self):
self._compare(o.sample(frac=0.7,random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
-
+
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
@@ -415,6 +415,10 @@ def test_sample(self):
bad_weights = [0.5]*11
o.sample(n=3, weights=bad_weights)
+ with tm.assertRaises(ValueError):
+ bad_weight_series = Series([0,0,0.2])
+ o.sample(n=4, weights=bad_weight_series)
+
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1]*10
@@ -431,6 +435,16 @@ def test_sample(self):
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
+ # All zeros raises errors
+ zero_weights = [0]*10
+ with tm.assertRaises(ValueError):
+ o.sample(n=3, weights=zero_weights)
+
+ # All missing weights
+ nan_weights = [np.nan]*10
+ with tm.assertRaises(ValueError):
+ o.sample(n=3, weights=nan_weights)
+
# A few dataframe test with degenerate weights.
easy_weight_list = [0]*10
@@ -496,7 +510,6 @@ def test_sample(self):
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
-
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
@@ -527,6 +540,26 @@ def test_sample(self):
assert_panel_equal(p.sample(n=3, random_state=42), p.sample(n=3, axis=1, random_state=42))
assert_frame_equal(df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42))
+ # Test that function aligns weights with frame
+ df = DataFrame({'col1':[5,6,7], 'col2':['a','b','c'], }, index = [9,5,3])
+ s = Series([1,0,0], index=[3,5,9])
+ assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
+
+ # Weights have index values to be dropped because not in
+ # sampled DataFrame
+ s2 = Series([0.001,0,10000], index=[3,5,10])
+ assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
+
+ # Weights have empty values to be filed with zeros
+ s3 = Series([0.01,0], index=[3,5])
+ assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
+
+ # No overlap in weight and sampled DataFrame indices
+ s4 = Series([1,0], index=[1,2])
+ with tm.assertRaises(ValueError):
+ df.sample(1, weights=s4)
+
+
def test_size_compat(self):
# GH8846
# size property should be defined
| Two bug fixes:
- Makes sure to return copies, not views.
- makes sure if a separate series is passed as weights that the axis are aligned before arguments get converted into numpy arrays and handled positionally
closes #10736
| https://api.github.com/repos/pandas-dev/pandas/pulls/10738 | 2015-08-03T19:21:25Z | 2015-08-19T20:52:33Z | 2015-08-19T20:52:33Z | 2015-08-19T20:52:38Z |
DOC: to_datetime outdated example | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1b5a4586e59e7..b00b1d2baaac3 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -208,21 +208,13 @@ Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time):
:okexcept:
# this is the default, raise when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='raise')
+ to_datetime(['2009/07/31', 'asd'], errors='raise')
# return the original input when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='ignore')
+ to_datetime(['2009/07/31', 'asd'], errors='ignore')
# return NaT for input when unparseable
- to_datetime(['2009-07-31', 'asd'], errors='coerce')
-
-
-Take care, ``to_datetime`` may not act as you expect on mixed data:
-
-.. ipython:: python
- :okexcept:
-
- to_datetime([1, '1'])
+ to_datetime(['2009/07/31', 'asd'], errors='coerce')
Epoch Timestamps
~~~~~~~~~~~~~~~~
| With the new default of `errors='raise'`, the following section in the docs is not fully correctly phrased anymore: http://pandas-docs.github.io/pandas-docs-travis/timeseries.html#invalid-data.
So I just removed that example of `to_datetime([1, '1'])` (as it now raises, which is less unexpected, and the new example already shows the three options).
| https://api.github.com/repos/pandas-dev/pandas/pulls/10733 | 2015-08-03T12:58:13Z | 2015-08-05T08:12:32Z | 2015-08-05T08:12:32Z | 2015-08-05T08:12:35Z |
TST: better testing for io/data | diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 7072031b09098..96bac2c45340b 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -291,13 +291,11 @@ def setUpClass(cls):
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
- today = datetime.today()
- cls.year = today.year
- cls.month = today.month + 1
- if cls.month > 12:
- cls.year = cls.year + 1
- cls.month = 1
- cls.expiry = datetime(cls.year, cls.month, 1)
+ d = (Timestamp.today() + pd.offsets.MonthBegin(1)).normalize()
+ cls.year = d.year
+ cls.month = d.month
+ cls.expiry = d
+ cls.expiry2 = d + pd.offsets.MonthBegin(1)
cls.dirpath = tm.get_data_path()
cls.html1 = os.path.join(cls.dirpath, 'yahoo_options1.html')
cls.html2 = os.path.join(cls.dirpath, 'yahoo_options2.html')
@@ -325,7 +323,7 @@ def test_get_options_data(self):
def test_get_near_stock_price(self):
try:
options = self.aapl.get_near_stock_price(call=True, put=True,
- expiry=self.expiry)
+ expiry=[self.expiry,self.expiry2])
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(options) > 1)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10731 | 2015-08-03T11:08:13Z | 2015-08-03T12:09:19Z | 2015-08-03T12:09:19Z | 2015-08-03T12:09:19Z | |
API: CategoricalIndex for value_counts | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 16c6c639a489e..87a3042061b16 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -457,6 +457,8 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`)
+- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a
+Series with a ``CategoricalIndex`` (:issue:`10704`)
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 1604705ff824a..b0d564caa5826 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1027,6 +1027,7 @@ def value_counts(self, dropna=True):
"""
import pandas.hashtable as htable
from pandas.core.series import Series
+ from pandas.core.index import CategoricalIndex
cat = self.dropna() if dropna else self
keys, counts = htable.value_count_int64(com._ensure_int64(cat._codes))
@@ -1036,10 +1037,12 @@ def value_counts(self, dropna=True):
if not dropna and -1 in keys:
ix = np.append(ix, -1)
result = result.reindex(ix, fill_value=0)
- result.index = (np.append(cat.categories, np.nan)
+ index = (np.append(cat.categories, np.nan)
if not dropna and -1 in keys
else cat.categories)
+ result.index = CategoricalIndex(index, self.categories, self.ordered)
+
return result
def get_values(self):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index cf72f0e433634..6164b1b4906de 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -4,7 +4,7 @@
import numpy as np
from numpy.random import RandomState
-from pandas.core.api import Series, Categorical
+from pandas.core.api import Series, Categorical, CategoricalIndex
import pandas as pd
import pandas.core.algorithms as algos
@@ -290,9 +290,15 @@ def test_value_counts(self):
factor = cut(arr, 4)
tm.assertIsInstance(factor, Categorical)
-
result = algos.value_counts(factor)
- expected = algos.value_counts(np.asarray(factor))
+ cats = ['(-1.194, -0.535]',
+ '(-0.535, 0.121]',
+ '(0.121, 0.777]',
+ '(0.777, 1.433]'
+ ]
+ expected_index = CategoricalIndex(cats, cats, ordered=True)
+ expected = Series([1, 1, 1, 1],
+ index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
@@ -332,6 +338,57 @@ def test_value_counts_nat(self):
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
+ def test_categorical(self):
+ s = Series(pd.Categorical(list('aaabbc')))
+ result = s.value_counts()
+ expected = pd.Series([3, 2, 1], index=pd.CategoricalIndex(['a', 'b', 'c']))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # preserve order?
+ s = s.cat.as_ordered()
+ result = s.value_counts()
+ expected.index = expected.index.as_ordered()
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_categorical_nans(self):
+ s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
+ s.iloc[1] = np.nan
+ result = s.value_counts()
+ expected = pd.Series([4, 3, 2],
+ index=pd.CategoricalIndex(['a', 'b', 'c'],
+ categories=['a', 'b', 'c']))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+ result = s.value_counts(dropna=False)
+ expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
+ ['a', 'b', 'c', np.nan]))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # out of order
+ s = Series(pd.Categorical(list('aaaaabbbcc'),
+ ordered=True, categories=['b', 'a', 'c']))
+ s.iloc[1] = np.nan
+ result = s.value_counts()
+ expected = pd.Series([4, 3, 2],
+ index=pd.CategoricalIndex(['a', 'b', 'c'],
+ categories=['b', 'a', 'c'],
+ ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = s.value_counts(dropna=False)
+ expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
+ ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ def test_categorical_zeroes(self):
+ # keep the `d` category with 0
+ s = Series(pd.Categorical(list('bbbaac'), categories=list('abcd'),
+ ordered=True))
+ result = s.value_counts()
+ expected = Series([3, 2, 1, 0], index=pd.Categorical(
+ ['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+
def test_dropna(self):
# https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index a85fd52ed6eb3..a065d03d4ad72 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -458,7 +458,8 @@ def test_describe(self):
desc = cat.describe()
expected = DataFrame.from_dict(dict(counts=[1, 2, 1],
freqs=[1/4., 2/4., 1/4.],
- categories=[1,2,np.nan]
+ categories=Categorical([1,2,np.nan],
+ [1, 2])
)
).set_index('categories')
tm.assert_frame_equal(desc, expected)
| Closes https://github.com/pydata/pandas/issues/10704
Changes `Categorical.value_counts` to return
a Series with a CategoricalIndex. Previously
the Series and an Index.
Just some random observations on the convenience of working with categoricals:
The code here could be simplified if
1. `Categorical.categories` was always a `Categoricalndex`
2. We were consistent about whether `NaN` is allowed in `.categories`
I thought that 1. would be a good idea, but I'm going back on it now. I can see how it'd be useful for `Categorical.categories` to be a more specific type like DatetimeIndex. For 2, I'd personally I'll always keep `NaN` out of my categories, but I could be missing a use case where it's needed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10729 | 2015-08-03T02:26:59Z | 2015-08-04T17:38:52Z | 2015-08-04T17:38:52Z | 2015-08-18T12:44:38Z |
DOC: limit error traceback to one line for expected exceptions (GH10715) | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 850f59c2713eb..1888345e1055c 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -661,18 +661,14 @@ values NOT in the categories, similarly to how you can reindex ANY pandas index.
Reshaping and Comparision operations on a ``CategoricalIndex`` must have the same categories
or a ``TypeError`` will be raised.
- .. code-block:: python
-
- In [9]: df3 = pd.DataFrame({'A' : np.arange(6),
- 'B' : pd.Series(list('aabbca')).astype('category')})
-
- In [11]: df3 = df3.set_index('B')
-
- In [11]: df3.index
- Out[11]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'a'], categories=[u'a', u'b', u'c'], ordered=False, name=u'B', dtype='category')
+ .. ipython:: python
+ :okexcept:
- In [12]: pd.concat([df2, df3]
- TypeError: categories must match existing categories when appending
+ df3 = pd.DataFrame({'A' : np.arange(6),
+ 'B' : pd.Series(list('aabbca')).astype('category')})
+ df3 = df3.set_index('B')
+ df3.index
+ pd.concat([df2, df3]
.. _indexing.float64index:
@@ -738,20 +734,18 @@ In float indexes, slicing using floats is allowed
In non-float indexes, slicing using floats will raise a ``TypeError``
-.. code-block:: python
-
- In [1]: pd.Series(range(5))[3.5]
- TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index)
+.. ipython:: python
+ :okexcept:
- In [1]: pd.Series(range(5))[3.5:4.5]
- TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index)
+ pd.Series(range(5))[3.5]
+ pd.Series(range(5))[3.5:4.5]
Using a scalar float indexer will be deprecated in a future version, but is allowed for now.
-.. code-block:: python
+.. ipython:: python
+ :okwarning:
- In [3]: pd.Series(range(5))[3.0]
- Out[3]: 3
+ pd.Series(range(5))[3.0]
Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 71d16a40f0215..315e7627bda59 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -352,13 +352,11 @@ objects of the same length:
Trying to compare ``Index`` or ``Series`` objects of different lengths will
raise a ValueError:
-.. code-block:: python
-
- In [55]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar'])
- ValueError: Series lengths must match to compare
+.. ipython:: python
+ :okexcept:
- In [56]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo'])
- ValueError: Series lengths must match to compare
+ pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar'])
+ pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo'])
Note that this is different from the numpy behavior where a comparison can
be broadcast:
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 5a62e7dccea34..847044c4745f9 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -143,10 +143,10 @@ label:
If a label is not contained, an exception is raised:
-.. code-block:: python
+.. ipython:: python
+ :okexcept:
- >>> s['f']
- KeyError: 'f'
+ s['f']
Using the ``get`` method, a missing label will return None or specified default:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 38629ee7baaea..a49a4745f7200 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -293,10 +293,10 @@ Selection By Label
dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5))
dfl
- .. code-block:: python
+ .. ipython:: python
+ :okexcept:
- In [4]: dfl.loc[2:3]
- TypeError: cannot do slice indexing on <class 'pandas.tseries.index.DatetimeIndex'> with these indexers [2] of <type 'int'>
+ dfl.loc[2:3]
String likes in slicing *can* be convertible to the type of the index and lead to natural slicing.
@@ -475,13 +475,11 @@ A single indexer that is out of bounds will raise an ``IndexError``.
A list of indexers where any element is out of bounds will raise an
``IndexError``
-.. code-block:: python
+.. ipython:: python
+ :okexcept:
dfl.iloc[[4,5,6]]
- IndexError: positional indexers are out-of-bounds
-
dfl.iloc[:,4]
- IndexError: single positional indexer is out-of-bounds
.. _indexing.basics.partial_setting:
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 26871a11473de..834b4b642c393 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -57,11 +57,7 @@ The following will **not work** because it matches multiple option names, e.g.
.. ipython:: python
:okexcept:
- try:
- pd.get_option("column")
- except KeyError as e:
- print(e)
-
+ pd.get_option("column")
**Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 6f30ff3f51ad5..a2067b9a37d55 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -205,9 +205,9 @@ Invalid Data
Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time):
.. ipython:: python
- :okexcept:
# this is the default, raise when unparseable
+ @okexcept
to_datetime(['2009/07/31', 'asd'], errors='raise')
# return the original input when unparseable
@@ -656,7 +656,7 @@ apply the offset to each element.
rng + DateOffset(months=2)
s + DateOffset(months=2)
s - DateOffset(months=2)
-
+
If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``,
``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be
used exactly like a ``Timedelta`` - see the
@@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the
td + Minute(15)
Note that some offsets (such as ``BQuarterEnd``) do not have a
-vectorized implementation. They can still be used but may
+vectorized implementation. They can still be used but may
calculate signficantly slower and will raise a ``PerformanceWarning``
.. ipython:: python
@@ -1702,13 +1702,13 @@ the top example will fail as it contains ambiguous times and the bottom will
infer the right offset.
.. ipython:: python
- :okexcept:
rng_hourly = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
'11/06/2011 01:00', '11/06/2011 02:00',
'11/06/2011 03:00'])
# This will fail as there are ambiguous times
+ @okexcept
rng_hourly.tz_localize('US/Eastern')
rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
rng_hourly_eastern.tolist()
diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py
index 3f9be95609874..08fc53e43cc88 100644
--- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py
+++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py
@@ -461,10 +461,6 @@ def process_input(self, data, input_prompt, lineno):
self.cout.seek(0)
output = self.cout.read()
- if not is_suppress and not is_semicolon:
- ret.append(output)
- elif is_semicolon: # get spacing right
- ret.append('')
# context information
filename = self.state.document.current_source
@@ -494,6 +490,16 @@ def process_input(self, data, input_prompt, lineno):
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
+ # if :okexcept: has been specified, display shorter traceback
+ if is_okexcept and "Traceback" in output:
+ traceback = output.split('\n\n')
+ output = traceback[-1]
+
+ if not is_suppress and not is_semicolon:
+ ret.append(output)
+ elif is_semicolon: # get spacing right
+ ret.append('')
+
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
| Closes #10715
This gives you an output like
```
In [4]: s = pd.Series([1,3,5,np.nan,6,8])
In [5]: s.iloc[11]
---------------------------------------------------------------------------
IndexError: single positional indexer is out-of-bounds
```
for input
```
.. ipython:: python
:okexcept:
s = pd.Series([1,3,5,np.nan,6,8])
s.iloc[10]
```
I think we would always want such a truncated traceback in the docs in cases that exceptions are expected, so I don't think it is needed to make it an extra new option.
I only should check for cases where the actual message spans multiple lines (now it is hardcoded to give me the last line of the error traceback)
@jreback with this we don't have to use the `code-block` s in eg whatsnew for the expect behaviour of `to_datetime`
| https://api.github.com/repos/pandas-dev/pandas/pulls/10727 | 2015-08-02T21:58:32Z | 2015-08-21T07:33:44Z | 2015-08-21T07:33:44Z | 2015-09-06T08:52:00Z |
API/WIP: .sorted | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 6b188deb9eb42..2f4fd860f270a 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -434,9 +434,8 @@ Reshaping, sorting
:toctree: generated/
Series.argsort
- Series.order
Series.reorder_levels
- Series.sort
+ Series.sort_values
Series.sort_index
Series.sortlevel
Series.swaplevel
@@ -908,7 +907,7 @@ Reshaping, sorting, transposing
DataFrame.pivot
DataFrame.reorder_levels
- DataFrame.sort
+ DataFrame.sort_values
DataFrame.sort_index
DataFrame.sortlevel
DataFrame.nlargest
@@ -1293,7 +1292,6 @@ Modifying and Computations
Index.insert
Index.min
Index.max
- Index.order
Index.reindex
Index.repeat
Index.take
@@ -1319,8 +1317,7 @@ Sorting
:toctree: generated/
Index.argsort
- Index.order
- Index.sort
+ Index.sort_values
Time-specific operations
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 624e10b431de5..1209cb9934e82 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1418,29 +1418,48 @@ description.
.. _basics.sorting:
-Sorting by index and value
---------------------------
+Sorting
+-------
+
+.. warning::
+
+ The sorting API is substantially changed in 0.17.0, see :ref:`here <whatsnew_0170.api_breaking.sorting>` for these changes.
+ In particular, all sorting methods now return a new object by default, and **DO NOT** operate in-place (except by passing ``inplace=True``).
There are two obvious kinds of sorting that you may be interested in: sorting
-by label and sorting by actual values. The primary method for sorting axis
-labels (indexes) across data structures is the :meth:`~DataFrame.sort_index` method.
+by label and sorting by actual values.
+
+By Index
+~~~~~~~~
+
+The primary method for sorting axis
+labels (indexes) are the ``Series.sort_index()`` and the ``DataFrame.sort_index()`` methods.
.. ipython:: python
unsorted_df = df.reindex(index=['a', 'd', 'c', 'b'],
columns=['three', 'two', 'one'])
+
+ # DataFrame
unsorted_df.sort_index()
unsorted_df.sort_index(ascending=False)
unsorted_df.sort_index(axis=1)
-:meth:`DataFrame.sort_index` can accept an optional ``by`` argument for ``axis=0``
+ # Series
+ unsorted_df['three'].sort_index()
+
+By Values
+~~~~~~~~~
+
+The :meth:`Series.sort_values` and :meth:`DataFrame.sort_values` are the entry points for **value** sorting (that is the values in a column or row).
+:meth:`DataFrame.sort_values` can accept an optional ``by`` argument for ``axis=0``
which will use an arbitrary vector or a column name of the DataFrame to
determine the sort order:
.. ipython:: python
df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]})
- df1.sort_index(by='two')
+ df1.sort_values(by='two')
The ``by`` argument can take a list of column names, e.g.:
@@ -1448,9 +1467,7 @@ The ``by`` argument can take a list of column names, e.g.:
df1[['one', 'two', 'three']].sort_index(by=['one','two'])
-Series has the method :meth:`~Series.order` (analogous to `R's order function
-<http://stat.ethz.ch/R-manual/R-patched/library/base/html/order.html>`__) which
-sorts by value, with special treatment of NA values via the ``na_position``
+These methods have special treatment of NA values via the ``na_position``
argument:
.. ipython:: python
@@ -1459,11 +1476,11 @@ argument:
s.order()
s.order(na_position='first')
-.. note::
- :meth:`Series.sort` sorts a Series by value in-place. This is to provide
- compatibility with NumPy methods which expect the ``ndarray.sort``
- behavior. :meth:`Series.order` returns a copy of the sorted data.
+.. _basics.searchsorted:
+
+searchsorted
+~~~~~~~~~~~~
Series has the :meth:`~Series.searchsorted` method, which works similar to
:meth:`numpy.ndarray.searchsorted`.
@@ -1493,7 +1510,7 @@ faster than sorting the entire Series and calling ``head(n)`` on the result.
s = pd.Series(np.random.permutation(10))
s
- s.order()
+ s.sort_values()
s.nsmallest(3)
s.nlargest(3)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7e69a8044a305..bba1db0e25349 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -14,6 +14,7 @@ users upgrade to this version.
Highlights include:
- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
+- The sorting API has been revamped to remove some long-time inconsistencies, see :ref:`here <whatsnew_0170.api_breaking.sorting>`
- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
@@ -187,6 +188,65 @@ Other enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. _whatsnew_0170.api_breaking.sorting:
+
+Changes to sorting API
+^^^^^^^^^^^^^^^^^^^^^^
+
+The sorting API has had some longtime inconsistencies. (:issue:`9816`,:issue:`8239`).
+
+Here is a summary of the **prior** to 0.17.0 API
+
+- ``Series.sort`` is **INPLACE** while ``DataFrame.sort`` returns a new object.
+- ``Series.order`` returned a new object
+- It was possible to use ``Series/DataFrame.sort_index`` to sort by **values** by passing the ``by`` keyword.
+- ``Series/DataFrame.sortlevel`` worked only on a ``MultiIndex`` for sorting by index.
+
+To address these issues, we have revamped the API:
+
+- We have introduced a new method, :meth:`DataFrame.sort_values`, which is the merger of ``DataFrame.sort()``, ``Series.sort()``,
+ and ``Series.order``, to handle sorting of **values**.
+- The existing method ``Series.sort()`` has been deprecated and will be removed in a
+ future version of pandas.
+- The ``by`` argument of ``DataFrame.sort_index()`` has been deprecated and will be removed in a future version of pandas.
+- The methods ``DataFrame.sort()``, ``Series.order()``, will not be recommended to use and will carry a deprecation warning
+ in the doc-string.
+- The existing method ``.sort_index()`` will gain the ``level`` keyword to enable level sorting.
+
+We now have two distinct and non-overlapping methods of sorting. A ``*`` marks items that
+will show a ``FutureWarning``.
+
+To sort by the **values**:
+
+================================= ====================================
+Previous Replacement
+================================= ====================================
+\*``Series.order()`` ``Series.sort_values()``
+\*``Series.sort()`` ``Series.sort_values(inplace=True)``
+\*``DataFrame.sort(columns=...)`` ``DataFrame.sort_values(by=...)``
+================================= ====================================
+
+To sort by the **index**:
+
+================================= ====================================
+Previous Equivalent
+================================= ====================================
+``Series.sort_index()`` ``Series.sort_index()``
+``Series.sortlevel(level=...)`` ``Series.sort_index(level=...``)
+``DataFrame.sort_index()`` ``DataFrame.sort_index()``
+``DataFrame.sortlevel(level=...)`` ``DataFrame.sort_index(level=...)``
+\*``DataFrame.sort()`` ``DataFrame.sort_index()``
+================================== ====================================
+
+We have also deprecated and changed similar methods in two Series-like classes, ``Index`` and ``Categorical``.
+
+================================== ====================================
+Previous Replacement
+================================== ====================================
+\*``Index.order()`` ``Index.sort_values()``
+\*``Categorical.order()`` ``Categorical.sort_values``
+================================== ====================================
+
.. _whatsnew_0170.api_breaking.to_datetime:
Changes to to_datetime and to_timedelta
@@ -570,7 +630,7 @@ Removal of prior version deprecations/changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`)
-
+- Removal of ``na_last`` parameters from ``Series.order()`` and ``Series.sort()``, in favor of ``na_position``, xref (:issue:`5231`)
.. _whatsnew_0170.performance:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index b0c7ff43bc7d8..4bcb24b684ed1 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -262,9 +262,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
result.index = bins[:-1]
if sort:
- result.sort()
- if not ascending:
- result = result[::-1]
+ result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(values.size)
@@ -497,7 +495,7 @@ def select_n_slow(dropped, n, take_last, method):
reverse_it = take_last or method == 'nlargest'
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
- return dropped[slc].order(ascending=ascending).head(n)
+ return dropped[slc].sort_values(ascending=ascending).head(n)
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index c9e30ea31dab8..0e61170471dcc 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1083,7 +1083,7 @@ def argsort(self, ascending=True, **kwargs):
result = result[::-1]
return result
- def order(self, inplace=False, ascending=True, na_position='last'):
+ def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Category by category value returning a new Categorical by default.
Only ordered Categoricals can be sorted!
@@ -1092,10 +1092,10 @@ def order(self, inplace=False, ascending=True, na_position='last'):
Parameters
----------
- ascending : boolean, default True
- Sort ascending. Passing False sorts descending
inplace : boolean, default False
Do operation in place.
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
@@ -1139,6 +1139,37 @@ def order(self, inplace=False, ascending=True, na_position='last'):
return Categorical(values=codes,categories=self.categories, ordered=self.ordered,
fastpath=True)
+ def order(self, inplace=False, ascending=True, na_position='last'):
+ """
+ DEPRECATED: use :meth:`Categorical.sort_values`
+
+ Sorts the Category by category value returning a new Categorical by default.
+
+ Only ordered Categoricals can be sorted!
+
+ Categorical.sort is the equivalent but sorts the Categorical inplace.
+
+ Parameters
+ ----------
+ inplace : boolean, default False
+ Do operation in place.
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
+ na_position : {'first', 'last'} (optional, default='last')
+ 'first' puts NaNs at the beginning
+ 'last' puts NaNs at the end
+
+ Returns
+ -------
+ y : Category or None
+
+ See Also
+ --------
+ Category.sort
+ """
+ warn("order is deprecated, use sort_values(...)",
+ FutureWarning, stacklevel=2)
+ return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last'):
""" Sorts the Category inplace by category value.
@@ -1163,10 +1194,10 @@ def sort(self, inplace=True, ascending=True, na_position='last'):
See Also
--------
- Category.order
+ Category.sort_values
"""
- return self.order(inplace=inplace, ascending=ascending,
- na_position=na_position)
+ return self.sort_values(inplace=inplace, ascending=ascending,
+ na_position=na_position)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
diff --git a/pandas/core/common.py b/pandas/core/common.py
index aaa341240f538..53cd5ca9aa78b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2155,6 +2155,9 @@ def _mut_exclusive(**kwargs):
return val2
+def _not_none(*args):
+ return (arg for arg in args if arg is not None)
+
def _any_none(*args):
for arg in args:
if arg is None:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 062cbe579785c..2bec83d7b094d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -23,7 +23,7 @@
import numpy as np
import numpy.ma as ma
-from pandas.core.common import (isnull, notnull, PandasError, _try_sort,
+from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _not_none,
_default_index, _maybe_upcast, is_sequence,
_infer_dtype_from_scalar, _values_from_object,
is_list_like, _maybe_box_datetimelike,
@@ -2949,9 +2949,71 @@ def f(vals):
#----------------------------------------------------------------------
# Sorting
+ @Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
+ def sort_values(self, by, axis=0, ascending=True, inplace=False,
+ kind='quicksort', na_position='last'):
+
+ axis = self._get_axis_number(axis)
+ labels = self._get_axis(axis)
+
+ if axis != 0:
+ raise ValueError('When sorting by column, axis must be 0 '
+ '(rows)')
+ if not isinstance(by, list):
+ by = [by]
+ if com.is_sequence(ascending) and len(by) != len(ascending):
+ raise ValueError('Length of ascending (%d) != length of by'
+ ' (%d)' % (len(ascending), len(by)))
+ if len(by) > 1:
+ from pandas.core.groupby import _lexsort_indexer
+
+ def trans(v):
+ if com.needs_i8_conversion(v):
+ return v.view('i8')
+ return v
+ keys = []
+ for x in by:
+ k = self[x].values
+ if k.ndim == 2:
+ raise ValueError('Cannot sort by duplicate column %s' % str(x))
+ keys.append(trans(k))
+ indexer = _lexsort_indexer(keys, orders=ascending,
+ na_position=na_position)
+ indexer = com._ensure_platform_int(indexer)
+ else:
+ from pandas.core.groupby import _nargsort
+
+ by = by[0]
+ k = self[by].values
+ if k.ndim == 2:
+
+ # try to be helpful
+ if isinstance(self.columns, MultiIndex):
+ raise ValueError('Cannot sort by column %s in a multi-index'
+ ' you need to explicity provide all the levels'
+ % str(by))
+
+ raise ValueError('Cannot sort by duplicate column %s'
+ % str(by))
+ if isinstance(ascending, (tuple, list)):
+ ascending = ascending[0]
+
+ indexer = _nargsort(k, kind=kind, ascending=ascending,
+ na_position=na_position)
+
+ new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),
+ convert=False, verify=False)
+
+ if inplace:
+ return self._update_inplace(new_data)
+ else:
+ return self._constructor(new_data).__finalize__(self)
+
def sort(self, columns=None, axis=0, ascending=True,
inplace=False, kind='quicksort', na_position='last'):
"""
+ DEPRECATED: use :meth:`DataFrame.sort_values`
+
Sort DataFrame either by labels (along either axis) or by the values in
column(s)
@@ -2982,93 +3044,41 @@ def sort(self, columns=None, axis=0, ascending=True,
-------
sorted : DataFrame
"""
- return self.sort_index(by=columns, axis=axis, ascending=ascending,
- inplace=inplace, kind=kind, na_position=na_position)
- def sort_index(self, axis=0, by=None, ascending=True, inplace=False,
- kind='quicksort', na_position='last'):
- """
- Sort DataFrame either by labels (along either axis) or by the values in
- a column
+ if columns is None:
+ warnings.warn("sort(....) is deprecated, use sort_index(.....)",
+ FutureWarning, stacklevel=2)
+ return self.sort_index(axis=axis, ascending=ascending, inplace=inplace)
- Parameters
- ----------
- axis : {0 or 'index', 1 or 'columns'}, default 0
- Sort index/rows versus columns
- by : object
- Column name(s) in frame. Accepts a column name or a list
- for a nested sort. A tuple will be interpreted as the
- levels of a multi-index.
- ascending : boolean or list, default True
- Sort ascending vs. descending. Specify list for multiple sort
- orders
- inplace : boolean, default False
- Sort the DataFrame without creating a new instance
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
- This option is only applied when sorting on a single column or label.
+ warnings.warn("sort(columns=....) is deprecated, use sort_values(by=.....)",
+ FutureWarning, stacklevel=2)
+ return self.sort_values(by=columns, axis=axis, ascending=ascending,
+ inplace=inplace, kind=kind, na_position=na_position)
- Examples
- --------
- >>> result = df.sort_index(by=['A', 'B'], ascending=[True, False])
+ @Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
+ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
+ kind='quicksort', na_position='last', sort_remaining=True, by=None):
- Returns
- -------
- sorted : DataFrame
- """
+ # 10726
+ if by is not None:
+ warnings.warn("by argument to sort_index is deprecated, pls use .sort_values(by=...)",
+ FutureWarning, stacklevel=2)
+ if level is not None:
+ raise ValueError("unable to simultaneously sort by and level")
+ return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
- from pandas.core.groupby import _lexsort_indexer, _nargsort
- axis = self._get_axis_number(axis)
- if axis not in [0, 1]: # pragma: no cover
- raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
+ axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
- if by is not None:
- if axis != 0:
- raise ValueError('When sorting by column, axis must be 0 '
- '(rows)')
- if not isinstance(by, list):
- by = [by]
- if com.is_sequence(ascending) and len(by) != len(ascending):
- raise ValueError('Length of ascending (%d) != length of by'
- ' (%d)' % (len(ascending), len(by)))
- if len(by) > 1:
- def trans(v):
- if com.needs_i8_conversion(v):
- return v.view('i8')
- return v
- keys = []
- for x in by:
- k = self[x].values
- if k.ndim == 2:
- raise ValueError('Cannot sort by duplicate column %s' % str(x))
- keys.append(trans(k))
- indexer = _lexsort_indexer(keys, orders=ascending,
- na_position=na_position)
- indexer = com._ensure_platform_int(indexer)
- else:
- by = by[0]
- k = self[by].values
- if k.ndim == 2:
-
- # try to be helpful
- if isinstance(self.columns, MultiIndex):
- raise ValueError('Cannot sort by column %s in a multi-index'
- ' you need to explicity provide all the levels'
- % str(by))
-
- raise ValueError('Cannot sort by duplicate column %s'
- % str(by))
- if isinstance(ascending, (tuple, list)):
- ascending = ascending[0]
+ # sort by the index
+ if level is not None:
- indexer = _nargsort(k, kind=kind, ascending=ascending,
- na_position=na_position)
+ new_axis, indexer = labels.sortlevel(level, ascending=ascending,
+ sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
+ from pandas.core.groupby import _lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
@@ -3077,13 +3087,13 @@ def trans(v):
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
- indexer = com._ensure_platform_int(indexer)
else:
+ from pandas.core.groupby import _nargsort
+
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
- bm_axis = self._get_block_manager_axis(axis)
- new_data = self._data.take(indexer, axis=bm_axis,
+ new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
@@ -3111,30 +3121,15 @@ def sortlevel(self, level=0, axis=0, ascending=True,
Returns
-------
sorted : DataFrame
- """
- axis = self._get_axis_number(axis)
- the_axis = self._get_axis(axis)
- if not isinstance(the_axis, MultiIndex):
- raise TypeError('can only sort by level with a hierarchical index')
- new_axis, indexer = the_axis.sortlevel(level, ascending=ascending,
- sort_remaining=sort_remaining)
-
- if self._is_mixed_type and not inplace:
- ax = 'index' if axis == 0 else 'columns'
+ See Also
+ --------
+ DataFrame.sort_index(level=...)
- if new_axis.is_unique:
- return self.reindex(**{ax: new_axis})
- else:
- return self.take(indexer, axis=axis, convert=False)
+ """
+ return self.sort_index(level=level, axis=axis, ascending=ascending,
+ inplace=inplace, sort_remaining=sort_remaining)
- bm_axis = self._get_block_manager_axis(axis)
- new_data = self._data.take(indexer, axis=bm_axis,
- convert=False, verify=False)
- if inplace:
- return self._update_inplace(new_data)
- else:
- return self._constructor(new_data).__finalize__(self)
def _nsorted(self, columns, n, method, take_last):
if not com.is_list_like(columns):
@@ -3142,8 +3137,8 @@ def _nsorted(self, columns, n, method, take_last):
columns = list(columns)
ser = getattr(self[columns[0]], method)(n, take_last=take_last)
ascending = dict(nlargest=False, nsmallest=True)[method]
- return self.loc[ser.index].sort(columns, ascending=ascending,
- kind='mergesort')
+ return self.loc[ser.index].sort_values(columns, ascending=ascending,
+ kind='mergesort')
def nlargest(self, n, columns, take_last=False):
"""Get the rows of a DataFrame sorted by the `n` largest
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2fc288de438b3..1cd7046fa678e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -922,7 +922,7 @@ def to_hdf(self, path_or_buf, key, **kwargs):
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
- dropna : boolean, default False.
+ dropna : boolean, default False.
If true, ALL nan rows will not be written to store.
"""
@@ -1683,25 +1683,74 @@ def add_suffix(self, suffix):
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data).__finalize__(self)
- def sort_index(self, axis=0, ascending=True):
+ _shared_docs['sort_values'] = """
+ Sort by the values along either axis
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ by : string name or list of names which refer to the axis items
+ axis : %(axes)s to direct sorting
+ ascending : bool or list of bool
+ Sort ascending vs. descending. Specify list for multiple sort orders.
+ If this is a list of bools, must match the length of the by
+ inplace : bool
+ if True, perform operation in-place
+ kind : {`quicksort`, `mergesort`, `heapsort`}
+ Choice of sorting algorithm. See also ndarray.np.sort for more information.
+ `mergesort` is the only stable algorithm. For DataFrames, this option is
+ only applied when sorting on a single column or label.
+ na_position : {'first', 'last'}
+ `first` puts NaNs at the beginning, `last` puts NaNs at the end
+
+ Returns
+ -------
+ sorted_obj : %(klass)s
"""
+ def sort_values(self, by, axis=0, ascending=True, inplace=False,
+ kind='quicksort', na_position='last'):
+ raise AbstractMethodError(self)
+
+ _shared_docs['sort_index'] = """
Sort object by labels (along an axis)
Parameters
----------
- axis : {0, 1}
- Sort index/rows versus columns
+ axis : %(axes)s to direct sorting
+ level : int or level name or list of ints or list of level names
+ if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
+ inplace : bool
+ if True, perform operation in-place
+ kind : {`quicksort`, `mergesort`, `heapsort`}
+ Choice of sorting algorithm. See also ndarray.np.sort for more information.
+ `mergesort` is the only stable algorithm. For DataFrames, this option is
+ only applied when sorting on a single column or label.
+ na_position : {'first', 'last'}
+ `first` puts NaNs at the beginning, `last` puts NaNs at the end
+ sort_remaining : bool
+ if true and sorting by level and index is multilevel, sort by other levels
+ too (in order) after sorting by specified level
Returns
-------
- sorted_obj : type of caller
+ sorted_obj : %(klass)s
"""
+
+ @Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame"))
+ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
+ kind='quicksort', na_position='last', sort_remaining=True):
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
+ if level is not None:
+ raise NotImplementedError("level is not implemented")
+ if inplace:
+ raise NotImplementedError("inplace is not implemented")
+
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
@@ -2910,7 +2959,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
use the actual numerical values of the index
* 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all
wrappers around the scipy interpolation methods of similar
- names. These use the actual numerical values of the index. See
+ names. These use the actual numerical values of the index. See
the scipy documentation for more on their behavior:
http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 12ad8a590c304..9f7221df149bc 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1332,7 +1332,7 @@ def asof_locs(self, where, mask):
return result
- def order(self, return_indexer=False, ascending=True):
+ def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
@@ -1347,8 +1347,38 @@ def order(self, return_indexer=False, ascending=True):
else:
return sorted_index
+ def order(self, return_indexer=False, ascending=True):
+ """
+ Return sorted copy of Index
+
+ DEPRECATED: use :meth:`Index.sort_values`
+ """
+ warnings.warn("order is deprecated, use sort_values(...)",
+ FutureWarning, stacklevel=2)
+ return self.sort_values(return_indexer=return_indexer, ascending=ascending)
+
def sort(self, *args, **kwargs):
- raise TypeError('Cannot sort an %r object' % self.__class__.__name__)
+ raise TypeError("cannot sort an Index object in-place, use sort_values instead")
+
+ def sortlevel(self, level=None, ascending=True, sort_remaining=None):
+ """
+
+ For internal compatibility with with the Index API
+
+ Sort the Index. This is for compat with MultiIndex
+
+ Parameters
+ ----------
+ ascending : boolean, default True
+ False to sort in descending order
+
+ level, sort_remaining are compat paramaters
+
+ Returns
+ -------
+ sorted_index : Index
+ """
+ return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
@@ -4864,6 +4894,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
+ Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level.
Returns
@@ -4872,30 +4903,41 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
from pandas.core.groupby import _indexer_from_factorized
- labels = list(self.labels)
- shape = list(self.levshape)
-
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
+ sortorder = None
+
+ # we have a directed ordering via ascending
+ if isinstance(ascending, list):
+ if not len(level) == len(ascending):
+ raise ValueError("level must have same length as ascending")
- # partition labels and shape
- primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
- primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
+ from pandas.core.groupby import _lexsort_indexer
+ indexer = _lexsort_indexer(self.labels, orders=ascending)
- if sort_remaining:
- primary += primary + tuple(labels)
- primshp += primshp + tuple(shape)
- sortorder = None
+ # level ordering
else:
- sortorder = level[0]
- indexer = _indexer_from_factorized(primary,
- primshp,
- compress=False)
+ labels = list(self.labels)
+ shape = list(self.levshape)
- if not ascending:
- indexer = indexer[::-1]
+ # partition labels and shape
+ primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
+ primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
+
+ if sort_remaining:
+ primary += primary + tuple(labels)
+ primshp += primshp + tuple(shape)
+ else:
+ sortorder = level[0]
+
+ indexer = _indexer_from_factorized(primary,
+ primshp,
+ compress=False)
+
+ if not ascending:
+ indexer = indexer[::-1]
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 87fde996aaa67..d5a7f770850bf 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1423,8 +1423,7 @@ def searchsorted(self, v, side='left', sorter=None):
See Also
--------
- Series.sort
- Series.order
+ Series.sort_values
numpy.searchsorted
Notes
@@ -1602,38 +1601,150 @@ def update(self, other):
#----------------------------------------------------------------------
# Reindexing, sorting
- def sort_index(self, ascending=True):
- """
- Sort object by labels (along an axis)
+ @Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs)
+ def sort_values(self, axis=0, ascending=True, inplace=False,
+ kind='quicksort', na_position='last'):
- Parameters
- ----------
- ascending : boolean or list, default True
- Sort ascending vs. descending. Specify list for multiple sort
- orders
+ axis = self._get_axis_number(axis)
- Examples
- --------
- >>> result1 = s.sort_index(ascending=False)
- >>> result2 = s.sort_index(ascending=[1, 0])
+ # GH 5856/5853
+ if inplace and self._is_cached:
+ raise ValueError("This Series is a view of some other array, to "
+ "sort in-place you must create a copy")
- Returns
- -------
- sorted_obj : Series
- """
+ def _try_kind_sort(arr):
+ # easier to ask forgiveness than permission
+ try:
+ # if kind==mergesort, it can fail for object dtype
+ return arr.argsort(kind=kind)
+ except TypeError:
+ # stable sort not available for object dtype
+ # uses the argsort default quicksort
+ return arr.argsort(kind='quicksort')
+
+ arr = self.values
+ sortedIdx = np.empty(len(self), dtype=np.int32)
+
+ bad = isnull(arr)
+
+ good = ~bad
+ idx = np.arange(len(self))
+
+ argsorted = _try_kind_sort(arr[good])
+
+ if not ascending:
+ argsorted = argsorted[::-1]
+
+ if na_position == 'last':
+ n = good.sum()
+ sortedIdx[:n] = idx[good][argsorted]
+ sortedIdx[n:] = idx[bad]
+ elif na_position == 'first':
+ n = bad.sum()
+ sortedIdx[n:] = idx[good][argsorted]
+ sortedIdx[:n] = idx[bad]
+ else:
+ raise ValueError('invalid na_position: {!r}'.format(na_position))
+
+ result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
+
+ if inplace:
+ self._update_inplace(result)
+ else:
+ return result.__finalize__(self)
+
+ @Appender(generic._shared_docs['sort_index'] % _shared_doc_kwargs)
+ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
+ sort_remaining=True):
+
+ axis = self._get_axis_number(axis)
index = self.index
- if isinstance(index, MultiIndex):
+ if level is not None:
+ new_index, indexer = index.sortlevel(level, ascending=ascending,
+ sort_remaining=sort_remaining)
+ elif isinstance(index, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
indexer = _lexsort_indexer(index.labels, orders=ascending)
indexer = com._ensure_platform_int(indexer)
- new_labels = index.take(indexer)
+ new_index = index.take(indexer)
else:
- new_labels, indexer = index.order(return_indexer=True,
- ascending=ascending)
+ new_index, indexer = index.sort_values(return_indexer=True,
+ ascending=ascending)
new_values = self.values.take(indexer)
return self._constructor(new_values,
- index=new_labels).__finalize__(self)
+ index=new_index).__finalize__(self)
+
+ def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):
+ """
+ DEPRECATED: use :meth:`Series.sort_values(inplace=True)` for INPLACE sorting
+
+ Sort values and index labels by value. This is an inplace sort by default.
+ Series.order is the equivalent but returns a new Series.
+
+ Parameters
+ ----------
+ axis : int (can only be zero)
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
+ kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
+ Choice of sorting algorithm. See np.sort for more
+ information. 'mergesort' is the only stable algorithm
+ na_position : {'first', 'last'} (optional, default='last')
+ 'first' puts NaNs at the beginning
+ 'last' puts NaNs at the end
+ inplace : boolean, default True
+ Do operation in place.
+
+ See Also
+ --------
+ Series.sort_values
+ """
+ warnings.warn("sort is deprecated, use sort_values(inplace=True) for for INPLACE sorting",
+ FutureWarning, stacklevel=2)
+
+ return self.sort_values(ascending=ascending,
+ kind=kind,
+ na_position=na_position,
+ inplace=inplace)
+
+ def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):
+ """
+ DEPRECATED: use :meth:`Series.sort_values`
+
+ Sorts Series object, by value, maintaining index-value link.
+ This will return a new Series by default. Series.sort is the equivalent but as an inplace method.
+
+ Parameters
+ ----------
+ na_last : boolean (optional, default=True) (DEPRECATED; use na_position)
+ Put NaN's at beginning or end
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
+ kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
+ Choice of sorting algorithm. See np.sort for more
+ information. 'mergesort' is the only stable algorithm
+ na_position : {'first', 'last'} (optional, default='last')
+ 'first' puts NaNs at the beginning
+ 'last' puts NaNs at the end
+ inplace : boolean, default False
+ Do operation in place.
+
+ Returns
+ -------
+ y : Series
+
+ See Also
+ --------
+ Series.sort_values
+ """
+ warnings.warn("order is deprecated, use sort_values(...)",
+ FutureWarning, stacklevel=2)
+
+ return self.sort_values(ascending=ascending,
+ kind=kind,
+ na_position=na_position,
+ inplace=inplace)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
@@ -1701,114 +1812,6 @@ def rank(self, method='average', na_option='keep', ascending=True,
ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
- def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):
- """
- Sort values and index labels by value. This is an inplace sort by default.
- Series.order is the equivalent but returns a new Series.
-
- Parameters
- ----------
- axis : int (can only be zero)
- ascending : boolean, default True
- Sort ascending. Passing False sorts descending
- kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
- Choice of sorting algorithm. See np.sort for more
- information. 'mergesort' is the only stable algorithm
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
- inplace : boolean, default True
- Do operation in place.
-
- See Also
- --------
- Series.order
- """
- return self.order(ascending=ascending,
- kind=kind,
- na_position=na_position,
- inplace=inplace)
-
- def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):
- """
- Sorts Series object, by value, maintaining index-value link.
- This will return a new Series by default. Series.sort is the equivalent but as an inplace method.
-
- Parameters
- ----------
- na_last : boolean (optional, default=True) (DEPRECATED; use na_position)
- Put NaN's at beginning or end
- ascending : boolean, default True
- Sort ascending. Passing False sorts descending
- kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
- Choice of sorting algorithm. See np.sort for more
- information. 'mergesort' is the only stable algorithm
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
- inplace : boolean, default False
- Do operation in place.
-
- Returns
- -------
- y : Series
-
- See Also
- --------
- Series.sort
- """
-
- # GH 5856/5853
- if inplace and self._is_cached:
- raise ValueError("This Series is a view of some other array, to "
- "sort in-place you must create a copy")
-
- if na_last is not None:
- warnings.warn(("na_last is deprecated. Please use na_position instead"),
- FutureWarning)
- na_position = 'last' if na_last else 'first'
-
- def _try_kind_sort(arr):
- # easier to ask forgiveness than permission
- try:
- # if kind==mergesort, it can fail for object dtype
- return arr.argsort(kind=kind)
- except TypeError:
- # stable sort not available for object dtype
- # uses the argsort default quicksort
- return arr.argsort(kind='quicksort')
-
- arr = self.values
- sortedIdx = np.empty(len(self), dtype=np.int32)
-
- bad = isnull(arr)
-
- good = ~bad
- idx = np.arange(len(self))
-
- argsorted = _try_kind_sort(arr[good])
-
- if not ascending:
- argsorted = argsorted[::-1]
-
- if na_position == 'last':
- n = good.sum()
- sortedIdx[:n] = idx[good][argsorted]
- sortedIdx[n:] = idx[bad]
- elif na_position == 'first':
- n = bad.sum()
- sortedIdx[n:] = idx[good][argsorted]
- sortedIdx[:n] = idx[bad]
- else:
- raise ValueError('invalid na_position: {!r}'.format(na_position))
-
- result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
-
- if inplace:
- self._update_inplace(result)
- else:
- return result.__finalize__(self)
-
def nlargest(self, n=5, take_last=False):
"""Return the largest `n` elements.
@@ -1826,7 +1829,7 @@ def nlargest(self, n=5, take_last=False):
Notes
-----
- Faster than ``.order(ascending=False).head(n)`` for small `n` relative
+ Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative
to the size of the ``Series`` object.
See Also
@@ -1859,7 +1862,7 @@ def nsmallest(self, n=5, take_last=False):
Notes
-----
- Faster than ``.order().head(n)`` for small `n` relative to
+ Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
@@ -1889,15 +1892,13 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
Returns
-------
sorted : Series
- """
- if not isinstance(self.index, MultiIndex):
- raise TypeError('can only sort by level with a hierarchical index')
- new_index, indexer = self.index.sortlevel(level, ascending=ascending,
- sort_remaining=sort_remaining)
- new_values = self.values.take(indexer)
- return self._constructor(new_values,
- index=new_index).__finalize__(self)
+ See Also
+ --------
+ Series.sort_index(level=...)
+
+ """
+ return self.sort_index(level=level, ascending=ascending, sort_remaining=sort_remaining)
def swaplevel(self, i, j, copy=True):
"""
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2c9ffe6b74536..8ef6363f836ae 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3973,7 +3973,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
values = self.selection.select_coords()
# delete the rows in reverse order
- l = Series(values).order()
+ l = Series(values).sort_values()
ln = len(l)
if ln:
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index 66c2bbde0b3f8..c577286ceca9a 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -137,9 +137,9 @@ def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None):
if sort is not None:
- df = df.sort(sort)
+ df = df.sort_values(sort)
else:
- df = df.sort()
+ df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
@@ -162,9 +162,9 @@ def _check_orient(df, orient, dtype=None, numpy=False,
raise
if sort is not None and sort in unser.columns:
- unser = unser.sort(sort)
+ unser = unser.sort_values(sort)
else:
- unser = unser.sort()
+ unser = unser.sort_index()
if dtype is False:
check_dtype=False
@@ -188,7 +188,7 @@ def _check_orient(df, orient, dtype=None, numpy=False,
unser.columns = [str(i) for i in unser.columns]
if sort is None:
- unser = unser.sort()
+ unser = unser.sort_index()
assert_almost_equal(df.values, unser.values)
else:
if convert_axes:
@@ -752,4 +752,4 @@ def my_handler_raises(obj):
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
- '--pdb-failure', '-s'], exit=False)
\ No newline at end of file
+ '--pdb-failure', '-s'], exit=False)
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index 43e1c5c89dd5e..974d06fb68137 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -1246,16 +1246,13 @@ def testDataFrameNumpyLabelled(self):
tm.assert_numpy_array_equal(df.index, outp.index)
def testSeries(self):
- s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
- s.sort()
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
# column indexed
- outp = Series(ujson.decode(ujson.encode(s)))
- outp.sort()
+ outp = Series(ujson.decode(ujson.encode(s))).sort_values()
self.assertTrue((s == outp).values.all())
- outp = Series(ujson.decode(ujson.encode(s), numpy=True))
- outp.sort()
+ outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
@@ -1281,17 +1278,14 @@ def testSeries(self):
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
self.assertTrue((s == outp).values.all())
- outp = Series(ujson.decode(ujson.encode(s, orient="index")))
- outp.sort()
+ outp = Series(ujson.decode(ujson.encode(s, orient="index"))).sort_values()
self.assertTrue((s == outp).values.all())
- outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True))
- outp.sort()
+ outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
def testSeriesNested(self):
- s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
- s.sort()
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 5b934bad38bd3..a9e93f909406b 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -906,8 +906,8 @@ def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
- parsed_115 = parsed_115.sort("srh")
- parsed_117 = parsed_117.sort("srh")
+ parsed_115 = parsed_115.sort_values("srh")
+ parsed_117 = parsed_117.sort_values("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index c9e4285d8b684..39eb72c0c3aa2 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -510,10 +510,8 @@ def test_value_counts_inferred(self):
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
- hist = s.value_counts(sort=False)
- hist.sort()
- expected = Series([3, 1, 4, 2], index=list('acbd'))
- expected.sort()
+ hist = s.value_counts(sort=False).sort_values()
+ expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
@@ -653,7 +651,7 @@ def test_factorize(self):
# sort by value, and create duplicates
if isinstance(o, Series):
- o.sort()
+ o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 680b370cbca41..19713984c9d7a 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1045,22 +1045,22 @@ def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a","b","b","a"], ordered=False)
- cat.order()
+ cat.sort_values()
cat.sort()
cat = Categorical(["a","c","b","d"], ordered=True)
- # order
- res = cat.order()
+ # sort_values
+ res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True)
- res = cat.order()
+ res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
- res = cat.order(ascending=False)
+ res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
@@ -1249,7 +1249,7 @@ def setUp(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
- df = df.sort(columns=['value'], ascending=True)
+ df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
self.cat = df
@@ -1665,7 +1665,7 @@ def test_assignment_to_dataframe(self):
df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
- df = df.sort(columns=['value'], ascending=True)
+ df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
@@ -2548,25 +2548,29 @@ def test_count(self):
def test_sort(self):
- cat = Series(Categorical(["a","b","b","a"], ordered=False))
+ c = Categorical(["a","b","b","a"], ordered=False)
+ cat = Series(c)
+
+ # 9816 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ c.order()
# sort in the categories order
expected = Series(Categorical(["a","a","b","b"], ordered=False),index=[0,3,1,2])
- result = cat.order()
+ result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a","c","b","d"], ordered=True))
-
- res = cat.order()
+ res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True))
- res = cat.order()
+ res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
- res = cat.order(ascending=False)
+ res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"])
self.assert_numpy_array_equal(res.__array__(), exp)
@@ -2576,19 +2580,19 @@ def test_sort(self):
df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]})
# Cats must be sorted in a dataframe
- res = df.sort(columns=["string"], ascending=False)
+ res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
- res = df.sort(columns=["sort"], ascending=False)
- exp = df.sort(columns=["string"], ascending=True)
+ res = df.sort_values(by=["sort"], ascending=False)
+ exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
- df.sort(columns=["unsort"], ascending=False)
+ df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
@@ -2597,18 +2601,18 @@ def test_sort(self):
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
- result = df.sort(columns=['grade'])
+ result = df.sort_values(by=['grade'])
expected = df.iloc[[1,2,5,0,3,4]]
tm.assert_frame_equal(result,expected)
# multi
- result = df.sort(columns=['grade', 'id'])
+ result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2,1,5,4,3,0]]
tm.assert_frame_equal(result,expected)
# reverse
cat = Categorical(["a","c","c","b","d"], ordered=True)
- res = cat.order(ascending=False)
+ res = cat.sort_values(ascending=False)
exp_val = np.array(["d","c", "c", "b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
@@ -2617,28 +2621,28 @@ def test_sort(self):
# some NaN positions
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
- res = cat.order(ascending=False, na_position='last')
+ res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
- res = cat.order(ascending=False, na_position='first')
+ res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
- res = cat.order(ascending=False, na_position='first')
+ res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
- res = cat.order(ascending=False, na_position='last')
+ res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8c836ae564e28..95426e8648a10 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -670,8 +670,8 @@ def test_setitem_cast(self):
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10,'event'] = 'foo'
- result = df.get_dtype_counts().order()
- expected = Series({'float64' : 3, 'object' : 1 }).order()
+ result = df.get_dtype_counts().sort_values()
+ expected = Series({'float64' : 3, 'object' : 1 }).sort_values()
assert_series_equal(result, expected)
def test_setitem_boolean_column(self):
@@ -1096,8 +1096,7 @@ def test_setitem_fancy_mixed_2d(self):
assert_frame_equal(df, expected)
def test_ix_align(self):
- b = Series(randn(10), name=0)
- b.sort()
+ b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
@@ -4158,25 +4157,19 @@ def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
B = Series([ timedelta(days=i) for i in range(3) ])))
- result = df.get_dtype_counts()
- expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 })
- result.sort()
- expected.sort()
+ result = df.get_dtype_counts().sort_values()
+ expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 }).sort_values()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
- expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 })
- result = df.get_dtype_counts()
- result.sort()
- expected.sort()
+ expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 }).sort_values()
+ result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
- expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 })
- result = df.get_dtype_counts()
- result.sort()
- expected.sort()
+ expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 }).sort_values()
+ result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_operators_timedelta64(self):
@@ -7464,10 +7457,11 @@ def test_as_matrix_duplicates(self):
def test_ftypes(self):
frame = self.mixed_float
- expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense'))
- expected.sort()
- result = frame.ftypes
- result.sort()
+ expected = Series(dict(A = 'float32:dense',
+ B = 'float32:dense',
+ C = 'float16:dense',
+ D = 'float64:dense')).sort_values()
+ result = frame.ftypes.sort_values()
assert_series_equal(result,expected)
def test_values(self):
@@ -8389,13 +8383,13 @@ def test_fillna(self):
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
- result = df.get_dtype_counts().order()
+ result = df.get_dtype_counts().sort_values()
expected = Series({ 'object' : 5 })
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A","B","C"], columns = [1,2,3,4,5])
- result = result.get_dtype_counts().order()
+ result = result.get_dtype_counts().sort_values()
expected = Series({ 'int64' : 5 })
assert_series_equal(result, expected)
@@ -11197,13 +11191,60 @@ def test_reorder_levels(self):
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
+ def test_sort_values(self):
+
+ # API for 9816
+
+ # sort_index
+ frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
+ columns=['A', 'B', 'C', 'D'])
+
+ # 9816 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ frame.sort(columns='A')
+ with tm.assert_produces_warning(FutureWarning):
+ frame.sort()
+
+ unordered = frame.ix[[3, 2, 4, 1]]
+ expected = unordered.sort_index()
+
+ result = unordered.sort_index(axis=0)
+ assert_frame_equal(result, expected)
+
+ unordered = frame.ix[:, [2, 1, 3, 0]]
+ expected = unordered.sort_index(axis=1)
+
+ result = unordered.sort_index(axis=1)
+ assert_frame_equal(result, expected)
+ assert_frame_equal(result, expected)
+
+ # sortlevel
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
+ df = DataFrame([[1, 2], [3, 4]], mi)
+
+ result = df.sort_index(level='A', sort_remaining=False)
+ expected = df.sortlevel('A', sort_remaining=False)
+ assert_frame_equal(result, expected)
+
+ df = df.T
+ result = df.sort_index(level='A', axis=1, sort_remaining=False)
+ expected = df.sortlevel('A', axis=1, sort_remaining=False)
+ assert_frame_equal(result, expected)
+
+ # MI sort, but no by
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
+ df = DataFrame([[1, 2], [3, 4]], mi)
+ result = df.sort_index(sort_remaining=False)
+ expected = df.sort_index()
+ assert_frame_equal(result, expected)
+
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
- sorted_df = unordered.sort_index()
+ sorted_df = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(sorted_df, expected)
@@ -11222,46 +11263,42 @@ def test_sort_index(self):
assert_frame_equal(sorted_df, expected)
# by column
- sorted_df = frame.sort_index(by='A')
+ sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
- sorted_df = frame.sort_index(by='A', ascending=False)
+ sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
- sorted_df = frame.sort(columns='A', ascending=False)
+ sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
- sorted_df = frame.sort(columns=['A'], ascending=[False])
+ sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
- sorted_df = frame.sort(columns='A')
+ sorted_df = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected[::-1])
- expected = frame.sort_index(by='A')
+ expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
-
- sorted_df = frame.sort(columns=['A', 'B'], ascending=False)
- expected = frame.sort_index(by=['A', 'B'], ascending=False)
- assert_frame_equal(sorted_df, expected)
-
- sorted_df = frame.sort(columns=['A', 'B'])
+ expected = frame.sort_values(by=['A', 'B'], ascending=False)
+ sorted_df = frame.sort_values(by=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
- self.assertRaises(ValueError, frame.sort_index, axis=2, inplace=True)
+ self.assertRaises(ValueError, lambda : frame.sort_values(by=['A','B'], axis=2, inplace=True))
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
- frame.sort_index(by='A', axis=1)
+ frame.sort_values(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
- frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5)
+ frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_index_categorical_index(self):
@@ -11287,14 +11324,14 @@ def test_sort_nan(self):
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
- sorted_df = df.sort(['A'], na_position='first')
+ sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
- sorted_df = df.sort(['A'], na_position='first', ascending=False)
+ sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
@@ -11302,7 +11339,7 @@ def test_sort_nan(self):
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
- sorted_df = df.sort(['A','B'])
+ sorted_df = df.sort_values(['A','B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
@@ -11310,7 +11347,7 @@ def test_sort_nan(self):
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
- sorted_df = df.sort(['A','B'], na_position='first')
+ sorted_df = df.sort_values(['A','B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
@@ -11318,7 +11355,7 @@ def test_sort_nan(self):
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
- sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first')
+ sorted_df = df.sort_values(['A','B'], ascending=[1,0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
@@ -11326,7 +11363,7 @@ def test_sort_nan(self):
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
- sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last')
+ sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
@@ -11335,28 +11372,28 @@ def test_sort_nan(self):
index = [1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
- sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last')
+ sorted_df = df.sort_index(kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
- sorted_df = df.sort(na_position='first')
+ sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index = [nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
- sorted_df = df.sort(kind='quicksort', ascending=False)
+ sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index = [6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
- sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first')
+ sorted_df = df.sort_index(kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index = [nan, 6, 5, 4, 3, 2, 1])
@@ -11366,8 +11403,8 @@ def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
- sorted_df = df.sort_index(by='sort_col', kind='mergesort',
- ascending=False)
+ sorted_df = df.sort_values(by='sort_col', kind='mergesort',
+ ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
@@ -11379,16 +11416,16 @@ def test_stable_descending_multicolumn_sort(self):
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
- sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first',
- kind='mergesort')
+ sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='first',
+ kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
- sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first',
- kind='mergesort')
+ sorted_df = df.sort_values(['A','B'], ascending=[0,0], na_position='first',
+ kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
@@ -11400,18 +11437,27 @@ def test_sort_index_multicolumn(self):
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
- result = frame.sort_index(by=['A', 'B'])
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ frame.sort_index(by=['A', 'B'])
+ result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
- result = frame.sort_index(by=['A', 'B'], ascending=False)
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ frame.sort_index(by=['A', 'B'], ascending=False)
+ result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
- result = frame.sort_index(by=['B', 'A'])
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ frame.sort_index(by=['B', 'A'])
+ result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
@@ -11458,7 +11504,10 @@ def test_sort_index_different_sortorder(self):
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
- result = df.sort_index(by=['A', 'B'], ascending=[1, 0])
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by=['A', 'B'], ascending=[1, 0])
+ result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
@@ -11480,41 +11529,70 @@ def test_sort_inplace(self):
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
- sorted_df.sort(columns='A', inplace=True)
- expected = frame.sort_index(by='A')
+ sorted_df.sort_values(by='A', inplace=True)
+ expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
- sorted_df.sort(columns='A', ascending=False, inplace=True)
- expected = frame.sort_index(by='A', ascending=False)
+ sorted_df.sort_values(by='A', ascending=False, inplace=True)
+ expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
- sorted_df.sort(columns=['A', 'B'], ascending=False, inplace=True)
- expected = frame.sort_index(by=['A', 'B'], ascending=False)
+ sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
+ expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
+
+ ### with 9816, these are all translated to .sort_values
+
df = DataFrame([lrange(5,9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
- df.sort_index(by='a')
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by='a')
+ with assertRaisesRegexp(ValueError, 'duplicate'):
+ df.sort_values(by='a')
+
+ with assertRaisesRegexp(ValueError, 'duplicate'):
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
- df.sort_index(by=['a'])
+ df.sort_values(by=['a'])
+
+ with assertRaisesRegexp(ValueError, 'duplicate'):
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ # multi-column 'by' is separate codepath
+ df.sort_index(by=['a', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
- df.sort_index(by=['a', 'b'])
+ df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))
with assertRaisesRegexp(ValueError, 'levels'):
- df.sort_index(by='a')
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by='a')
+ with assertRaisesRegexp(ValueError, 'levels'):
+ df.sort_values(by='a')
# convert tuples to a list of tuples
- expected = df.sort_index(by=[('a',1)])
- result = df.sort_index(by=('a',1))
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by=[('a',1)])
+ expected = df.sort_values(by=[('a',1)])
+
+ # use .sort_values #9816
+ with tm.assert_produces_warning(FutureWarning):
+ df.sort_index(by=('a',1))
+ result = df.sort_values(by=('a',1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
@@ -11540,21 +11618,21 @@ def test_sort_datetimes(self):
df['C'] = 2.
df['A1'] = 3.
- df1 = df.sort(columns='A')
- df2 = df.sort(columns=['A'])
+ df1 = df.sort_values(by='A')
+ df2 = df.sort_values(by=['A'])
assert_frame_equal(df1,df2)
- df1 = df.sort(columns='B')
- df2 = df.sort(columns=['B'])
+ df1 = df.sort_values(by='B')
+ df2 = df.sort_values(by=['B'])
assert_frame_equal(df1,df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
- s.sort()
+ s.sort_values(inplace=True)
cp = s.copy()
- cp.sort() # it works!
+ cp.sort_values() # it works!
def test_combine_first(self):
# disjoint
@@ -13632,7 +13710,7 @@ def test_construction_with_mixed(self):
df = DataFrame(data)
# check dtypes
- result = df.get_dtype_counts().order()
+ result = df.get_dtype_counts().sort_values()
expected = Series({ 'datetime64[ns]' : 3 })
# mixed-type frames
@@ -13640,11 +13718,11 @@ def test_construction_with_mixed(self):
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')
self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')
- result = self.mixed_frame.get_dtype_counts().order()
+ result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({ 'float64' : 4,
'object' : 1,
'datetime64[ns]' : 1,
- 'timedelta64[ns]' : 1}).order()
+ 'timedelta64[ns]' : 1}).sort_values()
assert_series_equal(result,expected)
def test_construction_with_conversions(self):
@@ -14311,7 +14389,7 @@ def _check_f(base, f):
_check_f(data.copy(), f)
# sort
- f = lambda x: x.sort('b', inplace=True)
+ f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
@@ -14824,7 +14902,7 @@ def test_nlargest(self):
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
- expected = df.sort('a', ascending=False).head(5)
+ expected = df.sort_values('a', ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
@@ -14833,7 +14911,7 @@ def test_nlargest_multiple_columns(self):
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
- expected = df.sort(['a', 'b'], ascending=False).head(5)
+ expected = df.sort_values(['a', 'b'], ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest(self):
@@ -14841,7 +14919,7 @@ def test_nsmallest(self):
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
- expected = df.sort('a').head(5)
+ expected = df.sort_values('a').head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
@@ -14850,7 +14928,7 @@ def test_nsmallest_multiple_columns(self):
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
- expected = df.sort(['a', 'c']).head(5)
+ expected = df.sort_values(['a', 'c']).head(5)
tm.assert_frame_equal(result, expected)
def test_to_panel_expanddim(self):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index feb3c10a729ae..a306b2887571c 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -533,10 +533,8 @@ def max_value(group):
return group.ix[group['value'].idxmax()]
applied = df.groupby('A').apply(max_value)
- result = applied.get_dtype_counts()
- result.sort()
- expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 })
- expected.sort()
+ result = applied.get_dtype_counts().sort_values()
+ expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 }).sort_values()
assert_series_equal(result,expected)
def test_groupby_return_type(self):
@@ -2197,11 +2195,11 @@ def test_apply_frame_to_series(self):
def test_apply_frame_concat_series(self):
def trans(group):
- return group.groupby('B')['C'].sum().order()[:2]
+ return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
- return grouped.sum().order()[:2]
+ return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
@@ -2223,7 +2221,7 @@ def test_apply_multikey_corner(self):
lambda x: x.month])
def f(group):
- return group.sort('A')[-5:]
+ return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
@@ -2284,7 +2282,7 @@ def test_apply_no_name_column_conflict(self):
# it works! #2605
grouped = df.groupby(['name', 'name2'])
- grouped.apply(lambda x: x.sort('value'))
+ grouped.apply(lambda x: x.sort_values('value',inplace=True))
def test_groupby_series_indexed_differently(self):
s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
@@ -3164,21 +3162,21 @@ def test_skip_group_keys(self):
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
- result = grouped.apply(lambda x: x.sort_index(by='A')[:3])
+ result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
pieces = []
for key, group in grouped:
- pieces.append(group.sort_index(by='A')[:3])
+ pieces.append(group.sort_values(by='A')[:3])
expected = concat(pieces)
assert_frame_equal(result, expected)
grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
- result = grouped.apply(lambda x: x.order()[:3])
+ result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = []
for key, group in grouped:
- pieces.append(group.order()[:3])
+ pieces.append(group.sort_values()[:3])
expected = concat(pieces)
assert_series_equal(result, expected, check_names=False)
@@ -3924,7 +3922,7 @@ def test_groupby_with_timegrouper(self):
]})
# GH 6908 change target column's order
- df_reordered = df_original.sort(columns='Quantity')
+ df_reordered = df_original.sort_values(by='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
@@ -3962,7 +3960,7 @@ def test_groupby_with_timegrouper_methods(self):
DT.datetime(2013,12,2,14,0),
]})
- df_sorted = df_original.sort(columns='Quantity', ascending=False)
+ df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
@@ -3995,7 +3993,7 @@ def test_timegrouper_with_reg_groups(self):
DT.datetime(2013,12,2,14,0),
]}).set_index('Date')
- df_sorted = df_original.sort(columns='Quantity', ascending=False)
+ df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
@@ -4037,7 +4035,7 @@ def test_timegrouper_with_reg_groups(self):
DT.datetime(2013,10,2,14,0),
]}).set_index('Date')
- df_sorted = df_original.sort(columns='Quantity', ascending=False)
+ df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
@@ -4146,7 +4144,7 @@ def test_timegrouper_get_group(self):
'Date' : [datetime(2013,9,1,13,0), datetime(2013,9,1,13,5),
datetime(2013,10,1,20,0), datetime(2013,10,3,10,0),
datetime(2013,12,2,12,0), datetime(2013,9,2,14,0),]})
- df_reordered = df_original.sort(columns='Quantity')
+ df_reordered = df_original.sort_values(by='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
@@ -4174,7 +4172,7 @@ def test_timegrouper_get_group(self):
# with index
df_original = df_original.set_index('Date')
- df_reordered = df_original.sort(columns='Quantity')
+ df_reordered = df_original.sort_values(by='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
@@ -4369,7 +4367,7 @@ def test_filter_against_workaround(self):
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
- assert_series_equal(new_way.order(), old_way.order())
+ assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100*Series(np.random.random(1000))
@@ -4378,7 +4376,7 @@ def test_filter_against_workaround(self):
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
- assert_series_equal(new_way.order(), old_way.order())
+ assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 9a3576a8fd846..c2e6cda500dab 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -232,6 +232,12 @@ def test_sort(self):
for ind in self.indices.values():
self.assertRaises(TypeError, ind.sort)
+ def test_order(self):
+ for ind in self.indices.values():
+ # 9816 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ ind.order()
+
def test_mutability(self):
for ind in self.indices.values():
if not len(ind):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 2c0bfcd9b905d..a613115054385 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1698,7 +1698,7 @@ def loop(mi, df, keys):
for frame in a, b:
for i in range(5): # lexsort depth
- df = frame.copy() if i == 0 else frame.sort(columns=cols[:i])
+ df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@@ -2958,7 +2958,7 @@ def test_non_unique_loc(self):
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,2)]))
# monotonic are ok
- df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]).sort(axis=0)
+ df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A' : [2,4,5,6], 'B' : [4, 6,7,8]}, index = [1,1,2,3])
assert_frame_equal(result,expected)
@@ -3866,10 +3866,9 @@ def f():
self.assertRaises(com.SettingWithCopyError, f)
df = DataFrame(np.random.randn(10,4))
- s = df.iloc[:,0]
- s = s.order()
- assert_series_equal(s,df.iloc[:,0].order())
- assert_series_equal(s,df[0].order())
+ s = df.iloc[:,0].sort_values()
+ assert_series_equal(s,df.iloc[:,0].sort_values())
+ assert_series_equal(s,df[0].sort_values())
# false positives GH6025
df = DataFrame ({'column1':['a', 'a', 'a'], 'column2': [4,8,9] })
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index be7ed6c1b268f..1bce047f3bf96 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -746,14 +746,11 @@ def test_getitem_partial_column_select(self):
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
- assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
- with assertRaisesRegexp(TypeError, 'hierarchical index'):
- self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
@@ -935,7 +932,7 @@ def test_stack(self):
# columns unsorted
unstacked = self.ymd.unstack()
- unstacked = unstacked.sort(axis=1, ascending=False)
+ unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 36a8600e51725..06b14d0f0b609 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -9,6 +9,7 @@
from itertools import product, starmap
from distutils.version import LooseVersion
import warnings
+import random
import nose
@@ -2234,7 +2235,7 @@ def test_ix_setitem_corner(self):
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
- ordered = self.series.order()
+ ordered = self.series.sort_values()
# setting
copy = self.series.copy()
@@ -4869,43 +4870,45 @@ def test_drop_duplicates(self):
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
- def test_sort(self):
+ def test_sort_values(self):
+
ts = self.ts.copy()
- ts.sort()
- self.assert_numpy_array_equal(ts, self.ts.order())
- self.assert_numpy_array_equal(ts.index, self.ts.order().index)
+ # 9816 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ ts.sort()
+
+ self.assert_numpy_array_equal(ts, self.ts.sort_values())
+ self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index)
- ts.sort(ascending=False)
- self.assert_numpy_array_equal(ts, self.ts.order(ascending=False))
+ ts.sort_values(ascending=False, inplace=True)
+ self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False))
self.assert_numpy_array_equal(ts.index,
- self.ts.order(ascending=False).index)
+ self.ts.sort_values(ascending=False).index)
# GH 5856/5853
- # Series.sort operating on a view
+ # Series.sort_values operating on a view
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0]
def f():
- s.sort()
+ s.sort_values(inplace=True)
self.assertRaises(ValueError, f)
# test order/sort inplace
# GH6859
ts1 = self.ts.copy()
- ts1.sort(ascending=False)
+ ts1.sort_values(ascending=False, inplace=True)
ts2 = self.ts.copy()
- ts2.order(ascending=False,inplace=True)
+ ts2.sort_values(ascending=False, inplace=True)
assert_series_equal(ts1,ts2)
ts1 = self.ts.copy()
- ts1 = ts1.sort(ascending=False,inplace=False)
+ ts1 = ts1.sort_values(ascending=False, inplace=False)
ts2 = self.ts.copy()
- ts2 = ts.order(ascending=False)
+ ts2 = ts.sort_values(ascending=False)
assert_series_equal(ts1,ts2)
def test_sort_index(self):
- import random
-
rindex = list(self.ts.index)
random.shuffle(rindex)
@@ -4918,29 +4921,65 @@ def test_sort_index(self):
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
+ def test_sort_API(self):
+
+ # API for 9816
+
+ # sortlevel
+ mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
+ s = Series([1, 2], mi)
+ backwards = s.iloc[[1, 0]]
+
+ res = s.sort_index(level='A')
+ assert_series_equal(backwards, res)
+
+ # sort_index
+ rindex = list(self.ts.index)
+ random.shuffle(rindex)
+
+ random_order = self.ts.reindex(rindex)
+ sorted_series = random_order.sort_index(level=0)
+ assert_series_equal(sorted_series, self.ts)
+
+ # compat on axis
+ sorted_series = random_order.sort_index(axis=0)
+ assert_series_equal(sorted_series, self.ts)
+
+ self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1))
+
+ sorted_series = random_order.sort_index(level=0, axis=0)
+ assert_series_equal(sorted_series, self.ts)
+
+ self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1))
+
def test_order(self):
+
+ # 9816 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ self.ts.order()
+
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
- result = ts.order()
+ result = ts.sort_values()
self.assertTrue(np.isnan(result[-5:]).all())
self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))
- result = ts.order(na_position='first')
+ result = ts.sort_values(na_position='first')
self.assertTrue(np.isnan(result[:5]).all())
self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
- ser.order()
+ ser.sort_values()
# ascending=False
- ordered = ts.order(ascending=False)
+ ordered = ts.sort_values(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
- ordered = ts.order(ascending=False, na_position='first')
+ ordered = ts.sort_values(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
def test_nsmallest_nlargest(self):
@@ -4996,8 +5035,8 @@ def test_nsmallest_nlargest(self):
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
- assert_series_equal(s.nsmallest(len(s)), s.order())
- assert_series_equal(s.nsmallest(len(s) + 1), s.order())
+ assert_series_equal(s.nsmallest(len(s)), s.sort_values())
+ assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
@@ -7399,7 +7438,7 @@ def test_repeat(self):
def test_unique_data_ownership(self):
# it works! #1807
- Series(Series(["a", "c", "b"]).unique()).sort()
+ Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_datetime_timedelta_quantiles(self):
# covers #9694
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 727852ced25b0..0641de22d0d6a 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -151,7 +151,7 @@ def map(self, f):
except Exception:
return _algos.arrmap_object(self.asobject.values, f)
- def order(self, return_indexer=False, ascending=True):
+ def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
| closes #9816
closes #8239
```
Changes to sorting API
^^^^^^^^^^^^^^^^^^^^^^
The sorting API has had some longtime inconsistencies. (:issue:`9816`,:issue:`8239`).
Here is a summary of the **prior** to 0.17.0 API
- ``Series.sort`` is **INPLACE** while ``DataFrame.sort`` returns a new object.
- ``Series.order`` returned a new object
- It was possible to use ``Series/DataFrame.sort_index`` to sort by **values** by passing the ``by`` keyword.
- ``Series/DataFrame.sortlevel`` worked only on a ``MultiIndex`` for sorting by index.
To address these issues, we have revamped the API:
- We have introduced a new method, ``.sort_values()``, which is the merger of ``DataFrame.sort()``, ``Series.sort()``,
and ``Series.order``, to handle sorting of **values**.
- The existing method ``Series.sort()`` has been deprecated and will be removed in a
future version of pandas.
- The ``by`` argument of ``DataFrame.sort_index()`` has been deprecated and will be removed in a future version of pandas.
- The methods ``DataFrame.sort()``, ``Series.order()``, will not be recommended to use and will carry a deprecation warning
in the doc-string.
- The existing method ``.sort_index()`` will gain the ``level`` keyword to enable level sorting.
We now have two distinct and non-overlapping methods of sorting. A ``*`` marks items that
will show a ``PendingDeprecationWarning`` (normally suppressed by python), and a ``+`` marks items that
will show a ``FutureWarning``.
To sort by the **values**:
================================= ====================================
Previous Replacement
================================= ====================================
*``Series.order()`` ``Series.sort_values()``
+``Series.sort()`` ``Series.sort_values(inplace=True)``
``DataFrame.sort(columns=...)`` ``DataFrame.sort_values(by=...)``
================================= ====================================
To sort by the **index**:
================================= ====================================
Previous Equivalent
================================= ====================================
``Series.sort_index()`` ``Series.sort_index()``
``Series.sortlevel(level=...)`` ``Series.sort_index(level=...``)
``DataFrame.sort_index()`` ``DataFrame.sort_index()``
``DataFrame.sortlevel(level=...)`` ``DataFrame.sort_index(level=...)``
*``DataFrame.sort()`` ``DataFrame.sort_index()``
================================== ====================================
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10726 | 2015-08-02T17:00:55Z | 2015-08-21T14:02:20Z | 2015-08-21T14:02:20Z | 2015-08-22T20:23:48Z |
BUG: pd.unique should respect datetime64 and timedelta64 dtypes (GH9431) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9a9054fcf0489..16c6c639a489e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -606,3 +606,4 @@ Bug Fixes
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
+- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c958a70b43089..b0c7ff43bc7d8 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -36,7 +36,7 @@ def match(to_match, values, na_sentinel=-1):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
- result = _hashtable_algo(f, values.dtype)
+ result = _hashtable_algo(f, values.dtype, np.int64)
if na_sentinel != -1:
@@ -66,7 +66,7 @@ def unique(values):
return _hashtable_algo(f, values.dtype)
-def _hashtable_algo(f, dtype):
+def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
"""
@@ -74,6 +74,12 @@ def _hashtable_algo(f, dtype):
return f(htable.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(htable.Int64HashTable, com._ensure_int64)
+ elif com.is_datetime64_dtype(dtype):
+ return_dtype = return_dtype or 'M8[ns]'
+ return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
+ elif com.is_timedelta64_dtype(dtype):
+ return_dtype = return_dtype or 'm8[ns]'
+ return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
else:
return f(htable.PyObjectHashTable, com._ensure_object)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 30dcd8631f13a..cf72f0e433634 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -235,6 +235,50 @@ def test_on_index_object(self):
tm.assert_almost_equal(result, expected)
+ def test_datetime64_dtype_array_returned(self):
+ # GH 9431
+ expected = np.array(['2015-01-03T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000'], dtype='M8[ns]')
+
+ dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000',
+ '2015-01-01T00:00:00.000000000+0000'])
+ result = algos.unique(dt_index)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ s = pd.Series(dt_index)
+ result = algos.unique(s)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ arr = s.values
+ result = algos.unique(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+
+ def test_timedelta64_dtype_array_returned(self):
+ # GH 9431
+ expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
+
+ td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
+ result = algos.unique(td_index)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ s = pd.Series(td_index)
+ result = algos.unique(s)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+ arr = s.values
+ result = algos.unique(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ self.assertEqual(result.dtype, expected.dtype)
+
+
+
class TestValueCounts(tm.TestCase):
_multiprocess_can_split_ = True
| To fix [GH9431](https://github.com/pydata/pandas/issues/9431).
Previously `pd.unique` would return an array of `object` dtype when passed a 1D array, Series or Index with a `datetime64` or `timedelta64` dtype. This PR should remedy that behaviour.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10724 | 2015-08-02T12:05:21Z | 2015-08-03T22:03:11Z | 2015-08-03T22:03:11Z | 2015-08-04T17:58:00Z |
BUG: concat of Series w/o names #10698 | diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index c62647010a131..5cb786d77cd1e 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -352,7 +352,24 @@ Passing ``ignore_index=True`` will drop all name references.
More concatenating with group keys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Let's consider a variation on the first example presented:
+A fairly common use of the ``keys`` argument is to override the column names when creating a new DataFrame based on existing Series.
+Notice how the default behaviour consists on letting the resulting DataFrame inherits the parent Series' name, when these existed.
+
+.. ipython:: python
+
+ s3 = pd.Series([0, 1, 2, 3], name='foo')
+ s4 = pd.Series([0, 1, 2, 3])
+ s5 = pd.Series([0, 1, 4, 5])
+
+ pd.concat([s3, s4, s5], axis=1)
+
+Through the ``keys`` argument we can override the existing column names.
+
+.. ipython:: python
+
+ pd.concat([s3, s4, s5], axis=1, keys=['red','blue','yellow'])
+
+Let's consider now a variation on the very first example presented:
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e9d39e0441055..e9d7296dbeb0d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -226,6 +226,30 @@ Other enhancements
- ``DataFrame.apply`` will return a Series of dicts if the passed function returns a dict and ``reduce=True`` (:issue:`8735`).
+- ``concat`` will now use existing Series names if provided (:issue:`10698`).
+
+ .. ipython:: python
+
+ foo = pd.Series([1,2], name='foo')
+ bar = pd.Series([1,2])
+ baz = pd.Series([4,5])
+
+ Previous Behavior:
+
+ .. code-block:: python
+
+ In [1] pd.concat([foo, bar, baz], 1)
+ Out[1]:
+ 0 1 2
+ 0 1 1 4
+ 1 2 2 5
+
+ New Behavior:
+
+ .. ipython:: python
+
+ pd.concat([foo, bar, baz], 1)
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8b0d37b55bfe..0b5aad118e381 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -3,7 +3,7 @@
"""
import numpy as np
-from pandas.compat import range, long, lrange, lzip, zip, map, filter
+from pandas.compat import range, lrange, lzip, zip, map, filter
import pandas.compat as compat
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
@@ -15,7 +15,7 @@
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
-from pandas.core.common import ABCSeries
+from pandas.core.common import ABCSeries, isnull
import pandas.core.common as com
@@ -912,8 +912,14 @@ def get_result(self):
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
tmpdf = DataFrame(data, index=index)
- if columns is not None:
- tmpdf.columns = columns
+ # checks if the column variable already stores valid column names (because set via the 'key' argument
+ # in the 'concat' function call. If that's not the case, use the series names as column names
+ if columns.equals(Index(np.arange(len(self.objs)))) and not self.ignore_index:
+ columns = np.array([ data[i].name for i in range(len(data)) ], dtype='object')
+ indexer = isnull(columns)
+ if indexer.any():
+ columns[indexer] = np.arange(len(indexer[indexer]))
+ tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
# combine block managers
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index b7b7dd20a2045..bb359d386aae3 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1879,6 +1879,24 @@ def test_concat_dataframe_keys_bug(self):
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
+ def test_concat_series_partial_columns_names(self):
+ # GH10698
+ foo = pd.Series([1,2], name='foo')
+ bar = pd.Series([1,2])
+ baz = pd.Series([4,5])
+
+ result = pd.concat([foo, bar, baz], axis=1)
+ expected = DataFrame({'foo' : [1,2], 0 : [1,2], 1 : [4,5]}, columns=['foo',0,1])
+ tm.assert_frame_equal(result, expected)
+
+ result = pd.concat([foo, bar, baz], axis=1, keys=['red','blue','yellow'])
+ expected = DataFrame({'red' : [1,2], 'blue' : [1,2], 'yellow' : [4,5]}, columns=['red','blue','yellow'])
+ tm.assert_frame_equal(result, expected)
+
+ result = pd.concat([foo, bar, baz], axis=1, ignore_index=True)
+ expected = DataFrame({0 : [1,2], 1 : [1,2], 2 : [4,5]})
+ tm.assert_frame_equal(result, expected)
+
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
@@ -2412,7 +2430,7 @@ def test_concat_series_axis1(self):
s2.name = None
result = concat([s, s2], axis=1)
- self.assertTrue(np.array_equal(result.columns, lrange(2)))
+ self.assertTrue(np.array_equal(result.columns, Index(['A', 0], dtype='object')))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
| closes #10698
Let the result of 'concat' to inherit the parent Series' names. The Series' name (if present) will be used as the resulting DataFrame column name. When only one of the Series has a valid name, the resulting DataFrame will inherit the name only, and use a column name for the other columns the column index value.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10723 | 2015-08-02T11:42:29Z | 2015-09-02T11:54:14Z | 2015-09-02T11:54:14Z | 2015-09-02T11:54:22Z |
Clarify docstring: Series.sort_index() creates new instance (rather than sorting in-place) | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 506aa1a6eb51e..3c4ccb131ffff 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1582,7 +1582,8 @@ def update(self, other):
def sort_index(self, ascending=True):
"""
- Sort object by labels (along an axis)
+ Sort object by labels (along an axis). Creates a new instance (i.e.
+ does not sort in-place).
Parameters
----------
| https://api.github.com/repos/pandas-dev/pandas/pulls/10721 | 2015-08-01T20:42:22Z | 2015-08-01T21:00:21Z | null | 2015-08-01T21:14:03Z | |
DEPR: deprecate irow,icol,iget_value,iget in Series/DataFrame, #10711 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 76e03ce70342f..2781ed170b889 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1558,7 +1558,6 @@ application to columns of a specific data type.
DataFrameGroupBy.hist
DataFrameGroupBy.idxmax
DataFrameGroupBy.idxmin
- DataFrameGroupBy.irow
DataFrameGroupBy.mad
DataFrameGroupBy.pct_change
DataFrameGroupBy.plot
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 618a2ae42c65f..9f58ee2f8b99b 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -121,18 +121,6 @@ the specification are assumed to be ``:``. (e.g. ``p.loc['a']`` is equiv to
DataFrame; ``df.loc[row_indexer,column_indexer]``
Panel; ``p.loc[item_indexer,major_indexer,minor_indexer]``
-Deprecations
-------------
-
-Beginning with version 0.11.0, it's recommended that you transition away from
-the following methods as they *may* be deprecated in future versions.
-
- - ``irow``
- - ``icol``
- - ``iget_value``
-
-See the section :ref:`Selection by Position <indexing.integer>` for substitutes.
-
.. _indexing.basics:
Basics
@@ -432,20 +420,14 @@ Select via integer list
df1.iloc[[1,3,5],[1,3]]
-For slicing rows explicitly (equiv to deprecated ``df.irow(slice(1,3))``).
-
.. ipython:: python
df1.iloc[1:3,:]
-For slicing columns explicitly (equiv to deprecated ``df.icol(slice(1,3))``).
-
.. ipython:: python
df1.iloc[:,1:3]
-For getting a scalar via integer position (equiv to deprecated ``df.get_value(1,1)``)
-
.. ipython:: python
# this is also equivalent to ``df1.iat[1,1]``
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fe5e7371bddf6..bdbb984414182 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -434,6 +434,26 @@ Other API Changes
Deprecations
^^^^^^^^^^^^
+.. note:: These indexing function have been deprecated in the documentation since 0.11.0.
+
+- For ``Series`` the following indexing functions are deprecated (:issue:`10177`).
+ ===================== ==============================================================
+ Deprecated Function Replacement
+ ===================== ==============================================================
+ ``.irow(i)`` ``.iloc[i]`` or ``.iat[i]``
+ ``.iget(i)`` ``.iloc[i]`` or ``.iat[i]``
+ ``.iget_value(i)`` ``.iloc[i]`` or ``.iat[i]``
+ ===================== ==============================================================
+
+- For ``DataFrame`` the following indexing functions are deprecated (:issue:`10177`).
+ ===================== ==============================================================
+ Deprecated Function Replacement
+ ===================== ==============================================================
+ ``.irow(i)`` ``.iloc[i]``
+ ``.iget_value(i, j)`` ``.iloc[i, j]`` or ``.iat[i, j]``
+ ``.icol(j)`` ``.iloc[:, j]``
+ ===================== ==============================================================
+
.. _whatsnew_0170.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3770fc01462e8..b66bc97d8fa97 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -552,7 +552,7 @@ def iteritems(self):
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
- yield k, self.icol(i)
+ yield k, self._ixs(i,axis=1)
def iterrows(self):
"""
@@ -1697,9 +1697,20 @@ def set_value(self, index, col, value, takeable=False):
return self
def irow(self, i, copy=False):
+ """
+ DEPRECATED. Use ``.iloc[i]`` instead
+ """
+
+ warnings.warn("irow(i) is deprecated. Please use .iloc[i]",
+ FutureWarning, stacklevel=2)
return self._ixs(i, axis=0)
def icol(self, i):
+ """
+ DEPRECATED. Use ``.iloc[:, i]`` instead
+ """
+ warnings.warn("icol(i) is deprecated. Please use .iloc[:,i]",
+ FutureWarning, stacklevel=2)
return self._ixs(i, axis=1)
def _ixs(self, i, axis=0):
@@ -1773,6 +1784,11 @@ def _ixs(self, i, axis=0):
return result
def iget_value(self, i, j):
+ """
+ DEPRECATED. Use ``.iat[i, j]`` instead
+ """
+ warnings.warn("iget_value(i, j) is deprecated. Please use .iat[i, j]",
+ FutureWarning, stacklevel=2)
return self.iat[i, j]
def __getitem__(self, key):
@@ -3769,7 +3785,7 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
dtype = object if self._is_mixed_type else None
if axis == 0:
- series_gen = (self.icol(i) for i in range(len(self.columns)))
+ series_gen = (self._ixs(i,axis=1) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
@@ -4900,11 +4916,11 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
if isinstance(data, DataFrame):
if columns is not None:
- arrays = [data.icol(i).values for i, col in enumerate(data.columns)
+ arrays = [data._ixs(i,axis=1).values for i, col in enumerate(data.columns)
if col in columns]
else:
columns = data.columns
- arrays = [data.icol(i).values for i in range(len(columns))]
+ arrays = [data._ixs(i,axis=1).values for i in range(len(columns))]
return arrays, columns
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 15c5429f81e88..d23cb39c15548 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3,6 +3,7 @@
import numpy as np
import datetime
import collections
+import warnings
from pandas.compat import(
zip, builtins, range, long, lzip,
@@ -71,7 +72,7 @@
'fillna',
'mad',
'any', 'all',
- 'irow', 'take',
+ 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
@@ -170,7 +171,7 @@ class Grouper(object):
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object. For full specification of available frequencies, please see
- `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
+ `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
@@ -188,7 +189,7 @@ class Grouper(object):
Examples
--------
-
+
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
@@ -198,9 +199,9 @@ class Grouper(object):
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
- with a frequency of 60s
+ with a frequency of 60s
- >>> df.groupby(Grouper(level='date', freq='60s', axis=1))
+ >>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
@@ -711,6 +712,16 @@ def _iterate_slices(self):
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
+ def irow(self, i):
+ """
+ DEPRECATED. Use ``.nth(i)`` instead
+ """
+
+ # 10177
+ warnings.warn("irow(i) is deprecated. Please use .nth(i)",
+ FutureWarning, stacklevel=2)
+ return self.nth(i)
+
def mean(self):
"""
Compute mean of groups, excluding missing values
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 506aa1a6eb51e..47fdbbf777570 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -781,9 +781,30 @@ def reshape(self, *args, **kwargs):
return self.values.reshape(shape, **kwargs)
- iget_value = _ixs
- iget = _ixs
- irow = _ixs
+ def iget_value(self, i, axis=0):
+ """
+ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
+ """
+ warnings.warn("iget_value(i) is deprecated. Please use .iloc[i] or .iat[i]",
+ FutureWarning, stacklevel=2)
+ return self._ixs(i)
+
+ def iget(self, i, axis=0):
+ """
+ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
+ """
+
+ warnings.warn("iget(i) is deprecated. Please use .iloc[i] or .iat[i]",
+ FutureWarning, stacklevel=2)
+ return self._ixs(i)
+
+ def irow(self, i, axis=0):
+ """
+ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead
+ """
+ warnings.warn("irow(i) is deprecated. Please use .iloc[i] or iat[i]",
+ FutureWarning, stacklevel=2)
+ return self._ixs(i)
def get_value(self, label, takeable=False):
"""
@@ -2323,7 +2344,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
- result = df.icol(0)
+ result = df.iloc[:,0]
if header is None:
result.index.name = result.name = None
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index cadf008fb40fb..103f3992f950a 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -1240,8 +1240,10 @@ def test_getitem(self):
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
+ # 10711 deprecated
+
# 2227
- result = self.frame.icol(0)
+ result = self.frame.iloc[:, 0]
self.assertTrue(isinstance(result, SparseSeries))
assert_sp_series_equal(result, self.frame['A'])
@@ -1249,7 +1251,7 @@ def test_icol(self):
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEqual(type(iframe['A'].sp_index),
- type(iframe.icol(0).sp_index))
+ type(iframe.iloc[:, 0].sp_index))
def test_set_value(self):
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 41c487adc0d6e..a85fd52ed6eb3 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -2203,27 +2203,27 @@ def test_slicing_and_getting_ops(self):
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
- res_row = df.irow(2)
+ res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
- res_df = df.irow(slice(2,4))
+ res_df = df.iloc[slice(2,4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
- res_df = df.irow([2,3])
+ res_df = df.iloc[[2,3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
- res_col = df.icol(0)
+ res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
- res_df = df.icol(slice(0,2))
+ res_df = df.iloc[:,slice(0,2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
- res_df = df.icol([0,1])
+ res_df = df.iloc[:,[0,1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 1ca23c124e250..19fd45cdf6ad2 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -97,7 +97,7 @@ def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
- self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
+ self.run_arithmetic_test(self.integer.iloc[:,0], self.integer.iloc[:, 0],
assert_series_equal, check_dtype=True)
@nose.tools.nottest
@@ -182,7 +182,7 @@ def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
- self.run_series(self.integer.icol(0), self.integer.icol(0))
+ self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@slow
def test_integer_panel(self):
@@ -192,7 +192,7 @@ def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
- self.run_series(self.frame2.icol(0), self.frame2.icol(0))
+ self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@slow
def test_float_panel(self):
@@ -220,7 +220,7 @@ def test_mixed_panel(self):
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
- self.run_arithmetic_test(self.frame.icol(0), self.frame.icol(0),
+ self.run_arithmetic_test(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
@@ -232,7 +232,7 @@ def test_mixed_arithmetic(self):
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
- self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
+ self.run_arithmetic_test(self.integer.iloc[:, 0], self.integer.iloc[:, 0],
assert_series_equal)
def test_invalid(self):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b7175cb45687c..e42e72dcdda8f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1781,16 +1781,20 @@ def test_single_element_ix_dont_upcast(self):
def test_irow(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
- result = df.irow(1)
+ # 10711, deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ df.irow(1)
+
+ result = df.iloc[1]
exp = df.ix[2]
assert_series_equal(result, exp)
- result = df.irow(2)
+ result = df.iloc[2]
exp = df.ix[4]
assert_series_equal(result, exp)
# slice
- result = df.irow(slice(4, 8))
+ result = df.iloc[slice(4, 8)]
expected = df.ix[8:14]
assert_frame_equal(result, expected)
@@ -1804,23 +1808,28 @@ def f():
assert_series_equal(df[2], exp_col)
# list of integers
- result = df.irow([1, 2, 4, 6])
+ result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_icol(self):
+
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
- result = df.icol(1)
+ # 10711, deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ df.icol(1)
+
+ result = df.iloc[:, 1]
exp = df.ix[:, 2]
assert_series_equal(result, exp)
- result = df.icol(2)
+ result = df.iloc[:, 2]
exp = df.ix[:, 4]
assert_series_equal(result, exp)
# slice
- result = df.icol(slice(4, 8))
+ result = df.iloc[:, slice(4, 8)]
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
@@ -1832,21 +1841,23 @@ def f():
self.assertTrue((df[8] == 0).all())
# list of integers
- result = df.icol([1, 2, 4, 6])
+ result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_irow_icol_duplicates(self):
+ # 10711, deprecated
+
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
- result = df.irow(0)
+ result = df.iloc[0]
result2 = df.ix[0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
- result = df.T.icol(0)
+ result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
@@ -1856,34 +1867,39 @@ def test_irow_icol_duplicates(self):
df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
- rs = df.irow(0)
+ rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
- rs = df.icol(0)
+ rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
- rs = df.icol([0])
+ rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
- result = df.icol([0])
+ result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_icol_sparse_propegate_fill_value(self):
from pandas.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
- self.assertTrue(len(df['A'].sp_values) == len(df.icol(0).sp_values))
+ self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values))
def test_iget_value(self):
+ # 10711 deprecated
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.frame.iget_value(0,0)
+
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
- result = self.frame.iget_value(i, j)
- expected = self.frame.get_value(row, col)
+ result = self.frame.iat[i,j]
+ expected = self.frame.at[row, col]
assert_almost_equal(result, expected)
def test_nested_exception(self):
@@ -4755,7 +4771,7 @@ def test_from_records_sequencelike(self):
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
- tup.extend(b.irow(i).values)
+ tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
@@ -5621,9 +5637,9 @@ def test_arith_flex_frame(self):
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
- self.frame.add(self.frame.irow(0), fill_value=3)
+ self.frame.add(self.frame.iloc[0], fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
- self.frame.add(self.frame.irow(0), axis='index', fill_value=3)
+ self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
@@ -6380,7 +6396,7 @@ def _to_uni(x):
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
- delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]
+ delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl-1)]
ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
@@ -9409,7 +9425,7 @@ def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
- exp = df.irow(2)
+ exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c1addbca619cc..feb3c10a729ae 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -4884,7 +4884,7 @@ def test_groupby_whitelist(self):
'fillna',
'mad',
'any', 'all',
- 'irow', 'take',
+ 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
@@ -4905,7 +4905,7 @@ def test_groupby_whitelist(self):
'fillna',
'mad',
'any', 'all',
- 'irow', 'take',
+ 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
@@ -4930,6 +4930,20 @@ def test_groupby_whitelist(self):
'mad', 'std', 'var', 'sem']
AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']
+ def test_groupby_whitelist_deprecations(self):
+ from string import ascii_lowercase
+ letters = np.array(list(ascii_lowercase))
+ N = 10
+ random_letters = letters.take(np.random.randint(0, 26, N))
+ df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
+ 'letters': Series(random_letters)})
+
+ # 10711 deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ df.groupby('letters').irow(0)
+ with tm.assert_produces_warning(FutureWarning):
+ df.groupby('letters').floats.irow(0)
+
def test_regression_whitelist_methods(self) :
# GH6944
@@ -5000,16 +5014,17 @@ def test_tab_completion(self):
grp = self.mframe.groupby(level='second')
results = set([v for v in dir(grp) if not v.startswith('_')])
expected = set(['A','B','C',
- 'agg','aggregate','apply','boxplot','filter','first','get_group',
- 'groups','hist','indices','last','max','mean','median',
- 'min','name','ngroups','nth','ohlc','plot', 'prod',
- 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head',
- 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail',
- 'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
- 'all', 'shift', 'skew', 'bfill', 'irow', 'ffill',
- 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
- 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
- ])
+ 'agg','aggregate','apply','boxplot','filter','first','get_group',
+ 'groups','hist','indices','last','max','mean','median',
+ 'min','name','ngroups','nth','ohlc','plot', 'prod',
+ 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head',
+ 'irow',
+ 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail',
+ 'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
+ 'all', 'shift', 'skew', 'bfill', 'ffill',
+ 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
+ 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
+ ])
self.assertEqual(results, expected)
def test_lexsort_indexer(self):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 59732aa705cdb..d0ccbee378df8 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -559,16 +559,17 @@ def test_iloc_getitem_slice_dups(self):
def test_iloc_getitem_multiindex(self):
- df = DataFrame(np.random.randn(3, 3),
+ arr = np.random.randn(3, 3)
+ df = DataFrame(arr,
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
rs = df.iloc[2]
- xp = df.irow(2)
+ xp = Series(arr[2],index=df.columns)
assert_series_equal(rs, xp)
rs = df.iloc[:,2]
- xp = df.icol(2)
+ xp = Series(arr[:, 2],index=df.index)
assert_series_equal(rs, xp)
rs = df.iloc[2,2]
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 4198bf87a4bae..a7ef49c41a011 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1993,7 +1993,7 @@ def test_indexing_ambiguity_bug_1678(self):
columns=columns)
result = frame.ix[:, 1]
- exp = frame.icol(1)
+ exp = frame.loc[:, ('Ohio', 'Red')]
tm.assertIsInstance(result, Series)
assert_series_equal(result, exp)
diff --git a/pandas/tests/test_rplot.py b/pandas/tests/test_rplot.py
index c58f17550a137..e79acfcbc58d8 100644
--- a/pandas/tests/test_rplot.py
+++ b/pandas/tests/test_rplot.py
@@ -160,7 +160,7 @@ def setUp(self):
def test_gradient(self):
for index in range(len(self.data)):
- row = self.data.irow(index)
+ row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
@@ -178,7 +178,7 @@ def setUp(self):
def test_gradient2(self):
for index in range(len(self.data)):
- row = self.data.irow(index)
+ row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
diff --git a/pandas/tools/rplot.py b/pandas/tools/rplot.py
index c3c71ab749536..5996fceff8877 100644
--- a/pandas/tools/rplot.py
+++ b/pandas/tools/rplot.py
@@ -363,7 +363,7 @@ def work(self, fig=None, ax=None):
else:
ax = fig.gca()
for index in range(len(self.data)):
- row = self.data.irow(index)
+ row = self.data.iloc[index]
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 95e41e43efd52..9ec336466266f 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -240,47 +240,47 @@ def test_resample_basic_from_daily(self):
self.assertEqual(len(result), 3)
self.assertTrue((result.index.dayofweek == [6, 6, 6]).all())
- self.assertEqual(result.irow(0), s['1/2/2005'])
- self.assertEqual(result.irow(1), s['1/9/2005'])
- self.assertEqual(result.irow(2), s.irow(-1))
+ self.assertEqual(result.iloc[0], s['1/2/2005'])
+ self.assertEqual(result.iloc[1], s['1/9/2005'])
+ self.assertEqual(result.iloc[2], s.iloc[-1])
result = s.resample('W-MON', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [0, 0]).all())
- self.assertEqual(result.irow(0), s['1/3/2005'])
- self.assertEqual(result.irow(1), s['1/10/2005'])
+ self.assertEqual(result.iloc[0], s['1/3/2005'])
+ self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [1, 1]).all())
- self.assertEqual(result.irow(0), s['1/4/2005'])
- self.assertEqual(result.irow(1), s['1/10/2005'])
+ self.assertEqual(result.iloc[0], s['1/4/2005'])
+ self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [2, 2]).all())
- self.assertEqual(result.irow(0), s['1/5/2005'])
- self.assertEqual(result.irow(1), s['1/10/2005'])
+ self.assertEqual(result.iloc[0], s['1/5/2005'])
+ self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [3, 3]).all())
- self.assertEqual(result.irow(0), s['1/6/2005'])
- self.assertEqual(result.irow(1), s['1/10/2005'])
+ self.assertEqual(result.iloc[0], s['1/6/2005'])
+ self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [4, 4]).all())
- self.assertEqual(result.irow(0), s['1/7/2005'])
- self.assertEqual(result.irow(1), s['1/10/2005'])
+ self.assertEqual(result.iloc[0], s['1/7/2005'])
+ self.assertEqual(result.iloc[1], s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEqual(len(result), 7)
self.assertTrue((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
- self.assertEqual(result.irow(0), s['1/2/2005'])
- self.assertEqual(result.irow(1), s['1/3/2005'])
- self.assertEqual(result.irow(5), s['1/9/2005'])
+ self.assertEqual(result.iloc[0], s['1/2/2005'])
+ self.assertEqual(result.iloc[1], s['1/3/2005'])
+ self.assertEqual(result.iloc[5], s['1/9/2005'])
self.assertEqual(result.index.name, 'index')
def test_resample_upsampling_picked_but_not_correct(self):
@@ -407,13 +407,13 @@ def test_resample_ohlc(self):
self.assertEqual(len(result), len(expect))
self.assertEqual(len(result.columns), 4)
- xs = result.irow(-2)
+ xs = result.iloc[-2]
self.assertEqual(xs['open'], s[-6])
self.assertEqual(xs['high'], s[-6:-1].max())
self.assertEqual(xs['low'], s[-6:-1].min())
self.assertEqual(xs['close'], s[-2])
- xs = result.irow(0)
+ xs = result.iloc[0]
self.assertEqual(xs['open'], s[0])
self.assertEqual(xs['high'], s[:5].max())
self.assertEqual(xs['low'], s[:5].min())
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index bcfeeded3abc9..5c7d459d3abc4 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -1401,7 +1401,7 @@ def test_partial_slice(self):
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
- self.assertEqual(result, s.irow(133))
+ self.assertEqual(result, s.iloc[133])
self.assertRaises(KeyError, s.__getitem__, '50 days')
@@ -1420,7 +1420,7 @@ def test_partial_slice_high_reso(self):
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
- self.assertEqual(result, s.irow(1001))
+ self.assertEqual(result, s.iloc[1001])
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 26acbb2073ab8..29d065f2bb6dc 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -366,7 +366,7 @@ def test_series_box_timestamp(self):
s = Series(rng, index=rng)
tm.assertIsInstance(s[5], Timestamp)
- tm.assertIsInstance(s.iget_value(5), Timestamp)
+ tm.assertIsInstance(s.iat[5], Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
@@ -3854,7 +3854,7 @@ def test_partial_slice(self):
assert_series_equal(result, expected)
result = s['2005-1-1']
- self.assertEqual(result, s.irow(0))
+ self.assertEqual(result, s.iloc[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
@@ -4065,12 +4065,12 @@ def test_min_max_series(self):
'L': lvls})
result = df.TS.max()
- exp = Timestamp(df.TS.iget(-1))
+ exp = Timestamp(df.TS.iat[-1])
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
- exp = Timestamp(df.TS.iget(0))
+ exp = Timestamp(df.TS.iat[0])
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 54f4e70b36cc2..979ac007c7500 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -734,8 +734,8 @@ def assert_frame_equal(left, right, check_dtype=True,
else:
for i, col in enumerate(left.columns):
assert col in right
- lcol = left.icol(i)
- rcol = right.icol(i)
+ lcol = left.iloc[:, i]
+ rcol = right.iloc[:, i]
assert_series_equal(lcol, rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index d7ab014453f2e..be546b72f9465 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -425,7 +425,7 @@ def print_report(df,h_head=None,h_msg="",h_baseline=None,b_msg=""):
lfmt = ("{:%s}" % name_width)
lfmt += ("| {:%d.4f} " % (col_width-2))* len(df.columns)
lfmt += "|\n"
- s += lfmt.format(df.index[i],*list(df.irow(i).values))
+ s += lfmt.format(df.index[i],*list(df.iloc[i].values))
s+= ftr + "\n"
| closes #10711
| https://api.github.com/repos/pandas-dev/pandas/pulls/10719 | 2015-08-01T15:57:29Z | 2015-08-02T17:07:09Z | 2015-08-02T17:07:09Z | 2015-08-02T21:17:47Z |
BUG: Categorical doesn't show tzinfo properly | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 13d61957eea00..d7b16eda3495b 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -606,6 +606,9 @@ Bug Fixes
- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10757`)
+- Bug in ``Categorical`` may not representing properly when category contains ``tz`` or ``Period`` (:issue:`10713`)
+- Bug in ``Categorical.__iter__`` may not returning correct ``datetime`` and ``Period`` (:issue:`10713`)
+
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index b0d564caa5826..c9e30ea31dab8 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -12,7 +12,7 @@
import pandas.core.common as com
from pandas.util.decorators import cache_readonly, deprecate_kwarg
-from pandas.core.common import (CategoricalDtype, ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
+from pandas.core.common import (CategoricalDtype, ABCSeries, ABCIndexClass, ABCCategoricalIndex,
isnull, notnull, is_dtype_equal,
is_categorical_dtype, is_integer_dtype, is_object_dtype,
_possibly_infer_to_datetimelike, get_dtype_kinds,
@@ -1053,15 +1053,12 @@ def get_values(self):
Returns
-------
values : numpy array
- A numpy array of the same dtype as categorical.categories.dtype or dtype string if
- periods
+ A numpy array of the same dtype as categorical.categories.dtype or
+ Index if datetime / periods
"""
-
- # if we are a period index, return a string repr
- if isinstance(self.categories, ABCPeriodIndex):
- return take_1d(np.array(self.categories.to_native_types(), dtype=object),
- self._codes)
-
+ # if we are a datetime and period index, return Index to keep metadata
+ if com.is_datetimelike(self.categories):
+ return self.categories.take(self._codes)
return np.array(self)
def check_for_ordered(self, op):
@@ -1308,7 +1305,7 @@ def __len__(self):
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
- return iter(np.array(self))
+ return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default footer) """
@@ -1328,7 +1325,7 @@ def _repr_categories(self):
max_categories = (10 if get_option("display.max_categories") == 0
else get_option("display.max_categories"))
from pandas.core import format as fmt
- category_strs = fmt.format_array(self.categories.get_values(), None)
+ category_strs = fmt.format_array(self.categories, None)
if len(category_strs) > max_categories:
num = max_categories // 2
head = category_strs[:num]
@@ -1343,8 +1340,9 @@ def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
- levheader = "Categories (%d, %s): " % (len(self.categories),
- self.categories.dtype)
+ dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype))
+
+ levheader = "Categories (%d, %s): " % (len(self.categories), dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
@@ -1352,13 +1350,14 @@ def _repr_categories_info(self):
max_width = 0
levstring = ""
start = True
- cur_col_len = len(levheader)
+ cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
+ linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
- levstring += "\n" + (" "* len(levheader))
- cur_col_len = len(levheader)
- if not start:
+ levstring += linesep + (" " * (len(levheader) + 1))
+ cur_col_len = len(levheader) + 1 # header + a whitespace
+ elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
diff --git a/pandas/core/format.py b/pandas/core/format.py
index a18d0cfa6f195..4ec4375349764 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -207,7 +207,7 @@ def _get_formatted_index(self):
return fmt_index, have_header
def _get_formatted_values(self):
- return format_array(self.tr_series.get_values(), None,
+ return format_array(self.tr_series.values, None,
float_format=self.float_format,
na_rep=self.na_rep)
@@ -681,7 +681,7 @@ def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
- (frame.iloc[:, i]).get_values(),
+ frame.iloc[:, i].values,
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
@@ -1895,8 +1895,13 @@ def get_formatted_cells(self):
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right'):
- if com.is_float_dtype(values.dtype):
+
+ if com.is_categorical_dtype(values):
+ fmt_klass = CategoricalArrayFormatter
+ elif com.is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
+ elif com.is_period_arraylike(values):
+ fmt_klass = PeriodArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif com.is_datetime64_dtype(values.dtype):
@@ -1963,6 +1968,8 @@ def _format(x):
return '%s' % formatter(x)
vals = self.values
+ if isinstance(vals, Index):
+ vals = vals.values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
@@ -2076,8 +2083,30 @@ def _format_strings(self):
values = values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format))
- fmt_values = [ formatter(x) for x in self.values ]
+ fmt_values = [ formatter(x) for x in values ]
+
+ return fmt_values
+
+class PeriodArrayFormatter(IntArrayFormatter):
+
+ def _format_strings(self):
+ values = np.array(self.values.to_native_types(), dtype=object)
+ formatter = self.formatter or (lambda x: '%s' % x)
+ fmt_values = [formatter(x) for x in values]
+ return fmt_values
+
+
+class CategoricalArrayFormatter(GenericArrayFormatter):
+
+ def __init__(self, values, *args, **kwargs):
+ GenericArrayFormatter.__init__(self, values, *args, **kwargs)
+
+ def _format_strings(self):
+ fmt_values = format_array(self.values.get_values(), self.formatter,
+ float_format=self.float_format,
+ na_rep=self.na_rep, digits=self.digits,
+ space=self.space, justify=self.justify)
return fmt_values
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ce6c60df2fd94..5b57d602d7e41 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -276,6 +276,11 @@ def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
+ @cache_readonly
+ def dtype_str(self):
+ """ return the dtype str of the underlying data """
+ return str(self.dtype)
+
@property
def values(self):
""" return the underlying data as an ndarray """
@@ -2994,6 +2999,10 @@ def equals(self, other):
return False
+ @property
+ def _formatter_func(self):
+ return self.categories._formatter_func
+
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index a065d03d4ad72..680b370cbca41 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1736,6 +1736,582 @@ def test_repr(self):
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp,a.__unicode__())
+ def test_categorical_repr(self):
+ c = pd.Categorical([1, 2 ,3])
+ exp = """[1, 2, 3]
+Categories (3, int64): [1, 2, 3]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3])
+ exp = """[1, 2, 3, 1, 2, 3]
+Categories (3, int64): [1, 2, 3]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical([1, 2, 3, 4, 5] * 10)
+ exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
+Length: 50
+Categories (5, int64): [1, 2, 3, 4, 5]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(np.arange(20))
+ exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
+Length: 20
+Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_ordered(self):
+ c = pd.Categorical([1, 2 ,3], ordered=True)
+ exp = """[1, 2, 3]
+Categories (3, int64): [1 < 2 < 3]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3], ordered=True)
+ exp = """[1, 2, 3, 1, 2, 3]
+Categories (3, int64): [1 < 2 < 3]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
+ exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
+Length: 50
+Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(np.arange(20), ordered=True)
+ exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
+Length: 20
+Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_datetime(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ c = pd.Categorical(idx)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ c = pd.Categorical(idx)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_datetime_ordered(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_period(self):
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ c = pd.Categorical(idx)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ c = pd.Categorical(idx)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_period_ordered(self):
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_timedelta(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ c = pd.Categorical(idx)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=20)
+ c = pd.Categorical(idx)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 20
+Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
+ 18 days 01:00:00, 19 days 01:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 40
+Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
+ 18 days 01:00:00, 19 days 01:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_repr_timedelta_ordered(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+ self.assertEqual(repr(c), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=20)
+ c = pd.Categorical(idx, ordered=True)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 20
+Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
+ 18 days 01:00:00 < 19 days 01:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 40
+Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
+ 18 days 01:00:00 < 19 days 01:00:00]"""
+ self.assertEqual(repr(c), exp)
+
+ def test_categorical_series_repr(self):
+ s = pd.Series(pd.Categorical([1, 2 ,3]))
+ exp = """0 1
+1 2
+2 3
+dtype: category
+Categories (3, int64): [1, 2, 3]"""
+ self.assertEqual(repr(s), exp)
+
+ s = pd.Series(pd.Categorical(np.arange(10)))
+ exp = """0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+dtype: category
+Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_ordered(self):
+ s = pd.Series(pd.Categorical([1, 2 ,3], ordered=True))
+ exp = """0 1
+1 2
+2 3
+dtype: category
+Categories (3, int64): [1 < 2 < 3]"""
+ self.assertEqual(repr(s), exp)
+
+ s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
+ exp = """0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+dtype: category
+Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_datetime(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 2011-01-01 09:00:00
+1 2011-01-01 10:00:00
+2 2011-01-01 11:00:00
+3 2011-01-01 12:00:00
+4 2011-01-01 13:00:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 2011-01-01 11:00:00-05:00
+3 2011-01-01 12:00:00-05:00
+4 2011-01-01 13:00:00-05:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_datetime_ordered(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00:00
+1 2011-01-01 10:00:00
+2 2011-01-01 11:00:00
+3 2011-01-01 12:00:00
+4 2011-01-01 13:00:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 2011-01-01 11:00:00-05:00
+3 2011-01-01 12:00:00-05:00
+4 2011-01-01 13:00:00-05:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_period(self):
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 2011-01-01 09:00
+1 2011-01-01 10:00
+2 2011-01-01 11:00
+3 2011-01-01 12:00
+4 2011-01-01 13:00
+dtype: category
+Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 2011-01
+1 2011-02
+2 2011-03
+3 2011-04
+4 2011-05
+dtype: category
+Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_period_ordered(self):
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00
+1 2011-01-01 10:00
+2 2011-01-01 11:00
+3 2011-01-01 12:00
+4 2011-01-01 13:00
+dtype: category
+Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 2011-01
+1 2011-02
+2 2011-03
+3 2011-04
+4 2011-05
+dtype: category
+Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_timedelta(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 1 days
+1 2 days
+2 3 days
+3 4 days
+4 5 days
+dtype: category
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=10)
+ s = pd.Series(pd.Categorical(idx))
+ exp = """0 0 days 01:00:00
+1 1 days 01:00:00
+2 2 days 01:00:00
+3 3 days 01:00:00
+4 4 days 01:00:00
+5 5 days 01:00:00
+6 6 days 01:00:00
+7 7 days 01:00:00
+8 8 days 01:00:00
+9 9 days 01:00:00
+dtype: category
+Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
+ 8 days 01:00:00, 9 days 01:00:00]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_series_repr_timedelta_ordered(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 1 days
+1 2 days
+2 3 days
+3 4 days
+4 5 days
+dtype: category
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+ self.assertEqual(repr(s), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=10)
+ s = pd.Series(pd.Categorical(idx, ordered=True))
+ exp = """0 0 days 01:00:00
+1 1 days 01:00:00
+2 2 days 01:00:00
+3 3 days 01:00:00
+4 4 days 01:00:00
+5 5 days 01:00:00
+6 6 days 01:00:00
+7 7 days 01:00:00
+8 8 days 01:00:00
+9 9 days 01:00:00
+dtype: category
+Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
+ 8 days 01:00:00 < 9 days 01:00:00]"""
+ self.assertEqual(repr(s), exp)
+
+ def test_categorical_index_repr(self):
+ idx = pd.CategoricalIndex(pd.Categorical([1, 2 ,3]))
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
+ self.assertEqual(repr(idx), exp)
+
+ i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_ordered(self):
+ i = pd.CategoricalIndex(pd.Categorical([1, 2 ,3], ordered=True))
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_datetime(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
+ '2011-01-01 11:00:00', '2011-01-01 12:00:00',
+ '2011-01-01 13:00:00'],
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_datetime_ordered(self):
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
+ '2011-01-01 11:00:00', '2011-01-01 12:00:00',
+ '2011-01-01 13:00:00'],
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
+ '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
+ '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_period(self):
+ # test all length
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
+ '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
+ '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_period_ordered(self):
+ idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.period_range('2011-01', freq='M', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_timedelta(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=10)
+ i = pd.CategoricalIndex(pd.Categorical(idx))
+ exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
+ '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
+ '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
+ '9 days 01:00:00'],
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_index_repr_timedelta_ordered(self):
+ idx = pd.timedelta_range('1 days', periods=5)
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ idx = pd.timedelta_range('1 hours', periods=10)
+ i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
+ '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
+ '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
+ '9 days 01:00:00'],
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
+ self.assertEqual(repr(i), exp)
+
+ def test_categorical_frame(self):
+ # normal DataFrame
+ dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
+ p = pd.period_range('2011-01', freq='M', periods=5)
+ df = pd.DataFrame({'dt': dt, 'p': p})
+ exp = """ dt p
+0 2011-01-01 09:00:00-05:00 2011-01
+1 2011-01-01 10:00:00-05:00 2011-02
+2 2011-01-01 11:00:00-05:00 2011-03
+3 2011-01-01 12:00:00-05:00 2011-04
+4 2011-01-01 13:00:00-05:00 2011-05"""
+
+ df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
+ self.assertEqual(repr(df), exp)
+
def test_info(self):
# make sure it works
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 2699e780f0edb..79ec18c521a00 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -132,6 +132,15 @@ def test_str(self):
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
+ def test_dtype_str(self):
+ for idx in self.indices.values():
+ dtype = idx.dtype_str
+ self.assertIsInstance(dtype, compat.string_types)
+ if isinstance(idx, PeriodIndex):
+ self.assertEqual(dtype, 'period')
+ else:
+ self.assertEqual(dtype, str(idx.dtype))
+
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 6413ce9cd5a03..f68073fd54025 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -21,6 +21,8 @@
_values_from_object, ABCSeries,
is_integer, is_float, is_object_dtype)
from pandas import compat
+from pandas.util.decorators import cache_readonly
+
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
@@ -530,6 +532,11 @@ def shift(self, n):
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
+ @cache_readonly
+ def dtype_str(self):
+ """ return the dtype str of the underlying data """
+ return self.inferred_type
+
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
| Closes #10713.
- [x] Check timedelta
- [x] Refactor not to import `DatetimeIndex` and `PeriodIndex` if possible.
- [x] Fix `categories` line break location
- [x] Check `Categorical` `Series` formatting
- [x] Check `Categorical` `DataFrame` formatting
- [x] Fix `CategoricalIndex` formatting
## CategoricalIndex problem (befor this PR)
```
import pandas as pd
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
idx = pd.Index(pd.Categorical(idx))
idx
# CategoricalIndex([359409, 359410, 359411, 359412, 359413], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')
```
## Format differences (after this PR)
I left ordered category representation of `CategoricalIndex` as below because it has `ordered` repr separately.
```
pd.Categorical([1, 2, 3], ordered=True)
# [1, 2, 3]
# Categories (3, int64): [1 < 2 < 3]
pd.Series(pd.Categorical([1, 2, 3], ordered=True))
#0 1
#1 2
#2 3
# dtype: category
# Categories (3, int64): [1 < 2 < 3]
pd.CategoricalIndex([1, 2, 3], ordered=True)
# CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10718 | 2015-08-01T14:48:50Z | 2015-08-08T19:43:55Z | 2015-08-08T19:43:55Z | 2015-08-08T19:44:00Z |
CLN: plotting cleanups for groupby plotting | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index de35894b017be..1f799c23c5396 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -774,7 +774,12 @@ class MPLPlot(object):
data :
"""
- _kind = 'base'
+
+ @property
+ def _kind(self):
+ """Specify kind str. Must be overridden in child class"""
+ raise NotImplementedError
+
_layout_type = 'vertical'
_default_rot = 0
orientation = None
@@ -938,7 +943,10 @@ def generate(self):
self._make_plot()
self._add_table()
self._make_legend()
- self._post_plot_logic()
+
+ for ax in self.axes:
+ self._post_plot_logic_common(ax, self.data)
+ self._post_plot_logic(ax, self.data)
self._adorn_subplots()
def _args_adjust(self):
@@ -1055,12 +1063,34 @@ def _add_table(self):
ax = self._get_ax(0)
table(ax, data)
- def _post_plot_logic(self):
+ def _post_plot_logic_common(self, ax, data):
+ """Common post process for each axes"""
+ labels = [com.pprint_thing(key) for key in data.index]
+ labels = dict(zip(range(len(data.index)), labels))
+
+ if self.orientation == 'vertical' or self.orientation is None:
+ if self._need_to_set_index:
+ xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
+ ax.set_xticklabels(xticklabels)
+ self._apply_axis_properties(ax.xaxis, rot=self.rot,
+ fontsize=self.fontsize)
+ self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
+ elif self.orientation == 'horizontal':
+ if self._need_to_set_index:
+ yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
+ ax.set_yticklabels(yticklabels)
+ self._apply_axis_properties(ax.yaxis, rot=self.rot,
+ fontsize=self.fontsize)
+ self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
+ else: # pragma no cover
+ raise ValueError
+
+ def _post_plot_logic(self, ax, data):
+ """Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
- to_adorn = self.axes
-
+ """Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
@@ -1069,7 +1099,7 @@ def _adorn_subplots(self):
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
- for ax in to_adorn:
+ for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
@@ -1090,25 +1120,6 @@ def _adorn_subplots(self):
else:
self.axes[0].set_title(self.title)
- labels = [com.pprint_thing(key) for key in self.data.index]
- labels = dict(zip(range(len(self.data.index)), labels))
-
- for ax in self.axes:
- if self.orientation == 'vertical' or self.orientation is None:
- if self._need_to_set_index:
- xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
- ax.set_xticklabels(xticklabels)
- self._apply_axis_properties(ax.xaxis, rot=self.rot,
- fontsize=self.fontsize)
- self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
- elif self.orientation == 'horizontal':
- if self._need_to_set_index:
- yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
- ax.set_yticklabels(yticklabels)
- self._apply_axis_properties(ax.yaxis, rot=self.rot,
- fontsize=self.fontsize)
- self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
-
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
@@ -1419,34 +1430,48 @@ def _get_axes_layout(self):
y_set.add(points[0][1])
return (len(y_set), len(x_set))
-class ScatterPlot(MPLPlot):
- _kind = 'scatter'
+
+class PlanePlot(MPLPlot):
+ """
+ Abstract class for plotting on plane, currently scatter and hexbin.
+ """
+
_layout_type = 'single'
- def __init__(self, data, x, y, c=None, **kwargs):
+ def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
- raise ValueError( 'scatter requires and x and y column')
+ raise ValueError(self._kind + ' requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
- if com.is_integer(c) and not self.data.columns.holds_integer():
- c = self.data.columns[c]
self.x = x
self.y = y
- self.c = c
@property
def nseries(self):
return 1
+ def _post_plot_logic(self, ax, data):
+ x, y = self.x, self.y
+ ax.set_ylabel(com.pprint_thing(y))
+ ax.set_xlabel(com.pprint_thing(x))
+
+
+class ScatterPlot(PlanePlot):
+ _kind = 'scatter'
+
+ def __init__(self, data, x, y, c=None, **kwargs):
+ super(ScatterPlot, self).__init__(data, x, y, **kwargs)
+ if com.is_integer(c) and not self.data.columns.holds_integer():
+ c = self.data.columns[c]
+ self.c = c
+
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
- import matplotlib.pyplot as plt
-
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
@@ -1457,7 +1482,7 @@ def _make_plot(self):
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
- cmap = plt.cm.get_cmap(cmap)
+ cmap = self.plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
@@ -1491,46 +1516,22 @@ def _make_plot(self):
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
- def _post_plot_logic(self):
- ax = self.axes[0]
- x, y = self.x, self.y
- ax.set_ylabel(com.pprint_thing(y))
- ax.set_xlabel(com.pprint_thing(x))
-
-class HexBinPlot(MPLPlot):
+class HexBinPlot(PlanePlot):
_kind = 'hexbin'
- _layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
- MPLPlot.__init__(self, data, **kwargs)
-
- if x is None or y is None:
- raise ValueError('hexbin requires and x and y column')
- if com.is_integer(x) and not self.data.columns.holds_integer():
- x = self.data.columns[x]
- if com.is_integer(y) and not self.data.columns.holds_integer():
- y = self.data.columns[y]
-
+ super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
-
- self.x = x
- self.y = y
self.C = C
- @property
- def nseries(self):
- return 1
-
def _make_plot(self):
- import matplotlib.pyplot as plt
-
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
- cmap = plt.cm.get_cmap(cmap)
+ cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
@@ -1547,12 +1548,6 @@ def _make_plot(self):
def _make_legend(self):
pass
- def _post_plot_logic(self):
- ax = self.axes[0]
- x, y = self.x, self.y
- ax.set_ylabel(com.pprint_thing(y))
- ax.set_xlabel(com.pprint_thing(x))
-
class LinePlot(MPLPlot):
_kind = 'line'
@@ -1685,26 +1680,23 @@ def _update_stacker(cls, ax, stacking_id, values):
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
- def _post_plot_logic(self):
- df = self.data
-
+ def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x()
- and df.index.is_all_dates
+ and data.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
- for ax in self.axes:
- if condition:
- # irregular TS rotated 30 deg. by default
- # probably a better place to check / set this.
- if not self._rot_set:
- self.rot = 30
- format_date_labels(ax, rot=self.rot)
+ if condition:
+ # irregular TS rotated 30 deg. by default
+ # probably a better place to check / set this.
+ if not self._rot_set:
+ self.rot = 30
+ format_date_labels(ax, rot=self.rot)
- if index_name is not None and self.use_index:
- ax.set_xlabel(index_name)
+ if index_name is not None and self.use_index:
+ ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
@@ -1758,16 +1750,14 @@ def _add_legend_handle(self, handle, label, index=None):
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
- def _post_plot_logic(self):
- LinePlot._post_plot_logic(self)
+ def _post_plot_logic(self, ax, data):
+ LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
- if (self.data >= 0).all().all():
- for ax in self.axes:
- ax.set_ylim(0, None)
- elif (self.data <= 0).all().all():
- for ax in self.axes:
- ax.set_ylim(None, 0)
+ if (data >= 0).all().all():
+ ax.set_ylim(0, None)
+ elif (data <= 0).all().all():
+ ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
@@ -1865,19 +1855,17 @@ def _make_plot(self):
start=start, label=label, log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
- def _post_plot_logic(self):
- for ax in self.axes:
- if self.use_index:
- str_index = [com.pprint_thing(key) for key in self.data.index]
- else:
- str_index = [com.pprint_thing(key) for key in
- range(self.data.shape[0])]
- name = self._get_index_name()
+ def _post_plot_logic(self, ax, data):
+ if self.use_index:
+ str_index = [com.pprint_thing(key) for key in data.index]
+ else:
+ str_index = [com.pprint_thing(key) for key in range(data.shape[0])]
+ name = self._get_index_name()
- s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
- e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
+ s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
+ e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
- self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
+ self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
@@ -1975,13 +1963,11 @@ def _make_plot_keywords(self, kwds, y):
kwds['bins'] = self.bins
return kwds
- def _post_plot_logic(self):
+ def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
- for ax in self.axes:
- ax.set_xlabel('Frequency')
+ ax.set_xlabel('Frequency')
else:
- for ax in self.axes:
- ax.set_ylabel('Frequency')
+ ax.set_ylabel('Frequency')
@property
def orientation(self):
@@ -2038,9 +2024,8 @@ def _make_plot_keywords(self, kwds, y):
kwds['ind'] = self._get_ind(y)
return kwds
- def _post_plot_logic(self):
- for ax in self.axes:
- ax.set_ylabel('Density')
+ def _post_plot_logic(self, ax, data):
+ ax.set_ylabel('Density')
class PiePlot(MPLPlot):
@@ -2242,7 +2227,7 @@ def _set_ticklabels(self, ax, labels):
def _make_legend(self):
pass
- def _post_plot_logic(self):
+ def _post_plot_logic(self, ax, data):
pass
@property
| Related to #8018. Because it touches lots of codes, I've split some cleanups required for #8018 which does NOT changes current functionality.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10717 | 2015-08-01T10:52:31Z | 2015-08-31T13:53:59Z | 2015-08-31T13:53:59Z | 2015-08-31T13:54:03Z |
BUG: Series.align with MultiIndex may be inverted | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index de2261a79da47..3f5976903418f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -603,6 +603,8 @@ Bug Fixes
- Bug in line and kde plot cannot accept multiple colors when ``subplots=True`` (:issue:`9894`)
- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
+- Bug in left and right ``align`` of ``Series`` with ``MultiIndex`` may be inverted (:issue:`10665`)
+- Bug in left and right ``join`` of with ``MultiIndex`` may be inverted (:issue:`10741`)
- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10757`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ce6c60df2fd94..a2ad29221685a 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2156,6 +2156,8 @@ def _join_multi(self, other, how, return_indexers=True):
if self_is_mi:
self, other = other, self
flip_order = True
+ # flip if join method is right or left
+ how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 77ef5fecf22c9..cf63d167eeb81 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4749,6 +4749,33 @@ def test_join_str_datetime(self):
self.assertEqual(len(tst.columns), 3)
+ def test_join_multiindex_leftright(self):
+ # GH 10741
+ df1 = pd.DataFrame([['a', 'x', 0.471780], ['a','y', 0.774908],
+ ['a', 'z', 0.563634], ['b', 'x', -0.353756],
+ ['b', 'y', 0.368062], ['b', 'z', -1.721840],
+ ['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]],
+ columns=['first', 'second', 'value1']).set_index(['first', 'second'])
+ df2 = pd.DataFrame([['a', 10], ['b', 20]], columns=['first', 'value2']).set_index(['first'])
+
+ exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
+ [-0.353756, 20], [0.368062, 20], [-1.721840, 20],
+ [1.000000, np.nan], [2.000000, np.nan], [3.000000, np.nan]],
+ index=df1.index, columns=['value1', 'value2'])
+
+ # these must be the same results (but columns are flipped)
+ tm.assert_frame_equal(df1.join(df2, how='left'), exp)
+ tm.assert_frame_equal(df2.join(df1, how='right'), exp[['value2', 'value1']])
+
+ exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']],
+ names=['first', 'second'])
+ exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
+ [-0.353756, 20], [0.368062, 20], [-1.721840, 20]],
+ index=exp_idx, columns=['value1', 'value2'])
+
+ tm.assert_frame_equal(df1.join(df2, how='right'), exp)
+ tm.assert_frame_equal(df2.join(df1, how='left'), exp[['value2', 'value1']])
+
def test_from_records_sequencelike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
@@ -9895,6 +9922,39 @@ def test_align_int_fill_bug(self):
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
+ def test_align_multiindex(self):
+ # GH 10665
+ # same test cases as test_align_multiindex in test_series.py
+
+ midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
+ names=('a', 'b', 'c'))
+ idx = pd.Index(range(2), name='b')
+ df1 = pd.DataFrame(np.arange(12), index=midx)
+ df2 = pd.DataFrame(np.arange(2), index=idx)
+
+ # these must be the same results (but flipped)
+ res1l, res1r = df1.align(df2, join='left')
+ res2l, res2r = df2.align(df1, join='right')
+
+ expl = df1
+ tm.assert_frame_equal(expl, res1l)
+ tm.assert_frame_equal(expl, res2r)
+ expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
+ tm.assert_frame_equal(expr, res1r)
+ tm.assert_frame_equal(expr, res2l)
+
+ res1l, res1r = df1.align(df2, join='right')
+ res2l, res2r = df2.align(df1, join='left')
+
+ exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
+ names=('a', 'b', 'c'))
+ expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
+ tm.assert_frame_equal(expl, res1l)
+ tm.assert_frame_equal(expl, res2r)
+ expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
+ tm.assert_frame_equal(expr, res1r)
+ tm.assert_frame_equal(expr, res2l)
+
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 2699e780f0edb..948d43d8d7fd7 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -4589,6 +4589,37 @@ def test_join_self(self):
joined = res.join(res, how=kind)
self.assertIs(res, joined)
+ def test_join_multi(self):
+ # GH 10665
+ midx = pd.MultiIndex.from_product([np.arange(4), np.arange(4)], names=['a', 'b'])
+ idx = pd.Index([1, 2, 5], name='b')
+
+ # inner
+ jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
+ exp_idx = pd.MultiIndex.from_product([np.arange(4), [1, 2]], names=['a', 'b'])
+ exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14])
+ exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1])
+ self.assert_index_equal(jidx, exp_idx)
+ self.assert_numpy_array_equal(lidx, exp_lidx)
+ self.assert_numpy_array_equal(ridx, exp_ridx)
+ # flip
+ jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
+ self.assert_index_equal(jidx, exp_idx)
+ self.assert_numpy_array_equal(lidx, exp_lidx)
+ self.assert_numpy_array_equal(ridx, exp_ridx)
+
+ # keep MultiIndex
+ jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
+ exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1])
+ self.assert_index_equal(jidx, midx)
+ self.assertIsNone(lidx)
+ self.assert_numpy_array_equal(ridx, exp_ridx)
+ # flip
+ jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
+ self.assert_index_equal(jidx, midx)
+ self.assertIsNone(lidx)
+ self.assert_numpy_array_equal(ridx, exp_ridx)
+
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
tm.assertIsInstance(result, MultiIndex)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 66a38cd858846..fe76ec0f36a97 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -6288,6 +6288,38 @@ def test_align_sameindex(self):
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
+ def test_align_multiindex(self):
+ # GH 10665
+
+ midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
+ names=('a', 'b', 'c'))
+ idx = pd.Index(range(2), name='b')
+ s1 = pd.Series(np.arange(12), index=midx)
+ s2 = pd.Series(np.arange(2), index=idx)
+
+ # these must be the same results (but flipped)
+ res1l, res1r = s1.align(s2, join='left')
+ res2l, res2r = s2.align(s1, join='right')
+
+ expl = s1
+ tm.assert_series_equal(expl, res1l)
+ tm.assert_series_equal(expl, res2r)
+ expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
+ tm.assert_series_equal(expr, res1r)
+ tm.assert_series_equal(expr, res2l)
+
+ res1l, res1r = s1.align(s2, join='right')
+ res2l, res2r = s2.align(s1, join='left')
+
+ exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
+ names=('a', 'b', 'c'))
+ expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
+ tm.assert_series_equal(expl, res1l)
+ tm.assert_series_equal(expl, res2r)
+ expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
+ tm.assert_series_equal(expr, res1r)
+ tm.assert_series_equal(expr, res2l)
+
def test_reindex(self):
identity = self.series.reindex(self.series.index)
| Closes #10665. Closes #10741.
I'm adding some more tests for `Index.join`, and found a skeptic behavior.
For `right` join, all levels / labels of right side remains (OK).
```
import pandas as pd
midx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]], names=['a', 'b'])
idx = pd.Index([1, 2 ,3], name='b')
idx.join(midx, how='right')
# MultiIndex(levels=[[0, 1], [0, 1, 2]],
# labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
# names=[u'a', u'b'])
idx.join(midx, how='right').values
# array([(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)], dtype=object)
```
For `left` join, all levels are remain but not used as levels. Thus, values only on left side is dropped (NG).
```
idx.join(midx, how='left')
# MultiIndex(levels=[[0, 1], [1, 2, 3]],
# labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
# names=[u'a', u'b'])
idx.join(midx, how='left').values
array([(0, 1), (0, 2), (1, 1), (1, 2)], dtype=object)
```
I understand the last result must be the following. If my understanding is correct, I'll include the fix to this PR.
```
idx.join(midx, how='left')
# MultiIndex(levels=[[0, 1], [1, 2, 3]],
# labels=[[0, 0, 1, 1], [0, 1, 2, 0, 1, 2]],
# names=[u'a', u'b'])
idx.join(midx, how='left').values
# array([(0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3)], dtype=object)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10716 | 2015-08-01T01:56:20Z | 2015-08-18T10:51:57Z | 2015-08-18T10:51:57Z | 2015-08-18T11:22:55Z |
updating account info | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 6b91a3aa5b484..762656ba05bd6 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -83,7 +83,7 @@ for pandas data objects.
`Plotly <https://plot.ly/python>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`Plotly’s <https://plot.ly/>`__ `Python API <https://plot.ly/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <http://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://plot.ly/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plot.ly/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards.
+`Plotly’s <https://plot.ly/>`__ `Python API <https://plot.ly/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <http://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://plot.ly/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plot.ly/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly is free for unlimited sharing, and has `cloud <https://plot.ly/product/plans/>`__, `offline <https://plot.ly/python/offline/>`__, or `on-premise <https://plot.ly/product/enterprise/>`__ accounts for private use.
.. _ecosystem.ide:
| https://api.github.com/repos/pandas-dev/pandas/pulls/10714 | 2015-07-31T23:45:51Z | 2015-07-31T23:48:23Z | 2015-07-31T23:48:23Z | 2015-08-07T22:54:09Z | |
added axvlines_color parameter to parallel_coordinates | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 3ca56ecc00d36..f19da7d7ab5d8 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -25,6 +25,7 @@ Enhancements
objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
- ``DataFrame`` now uses the fields of a ``namedtuple`` as columns, if columns are not supplied (:issue:`11181`)
- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
+- Added ``axvlines_kwds`` to parallel coordinates plot (:issue: `10709`)
.. _whatsnew_0171.api:
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 98d6f5e8eb797..8098cc05ba616 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -638,7 +638,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
- axvlines=True, **kwds):
+ axvlines=True, axvlines_kwds={'linewidth':1,'color':'black'}, **kwds):
"""Parallel coordinates plotting.
Parameters
@@ -660,6 +660,8 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
+ axvlines_kwds: keywords, optional
+ Options to be passed to axvline method for vertical lines
kwds: keywords
Options to pass to matplotlib plotting method
@@ -726,7 +728,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
if axvlines:
for i in x:
- ax.axvline(i, linewidth=1, color='black')
+ ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
| parallel_coordinates are in pandas.tools.plotting
'black' color was hard-coded for vertical lines, I suggest to make it an option
| https://api.github.com/repos/pandas-dev/pandas/pulls/10709 | 2015-07-31T05:04:05Z | 2015-11-13T17:13:47Z | null | 2015-11-13T17:13:48Z |
adding plotly to ecosystem | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 26ff9ec536c45..6b91a3aa5b484 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -80,6 +80,10 @@ The `Vincent <https://github.com/wrobstory/vincent>`__ project leverages `Vega <
(that in turn, leverages `d3 <http://d3js.org/>`__) to create plots . It has great support
for pandas data objects.
+`Plotly <https://plot.ly/python>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`Plotly’s <https://plot.ly/>`__ `Python API <https://plot.ly/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <http://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://plot.ly/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plot.ly/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards.
.. _ecosystem.ide:
| https://api.github.com/repos/pandas-dev/pandas/pulls/10706 | 2015-07-30T18:08:28Z | 2015-07-30T21:19:39Z | 2015-07-30T21:19:39Z | 2015-07-31T23:48:38Z | |
ENH: Index optional pivot | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index c97f143cb63c2..4cf4c51f75fbb 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -110,6 +110,8 @@ Other enhancements
- ``pd.merge`` will now allow duplicate column names if they are not merged upon (:issue:`10639`).
+- ``pd.pivot`` will now allow passing index as ``None`` (:issue:`3962`).
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 872e8efb0d4e9..3770fc01462e8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3460,8 +3460,9 @@ def pivot(self, index=None, columns=None, values=None):
Parameters
----------
- index : string or object
- Column name to use to make new frame's index
+ index : string or object, optional
+ Column name to use to make new frame's index. If None, uses
+ existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 99767ab199843..f782aa38bc965 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -319,11 +319,17 @@ def pivot(self, index=None, columns=None, values=None):
See DataFrame.pivot
"""
if values is None:
- indexed = self.set_index([index, columns])
+ cols = [columns] if index is None else [index, columns]
+ append = index is None
+ indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
+ if index is None:
+ index = self.index
+ else:
+ index = self[index]
indexed = Series(self[values].values,
- index=MultiIndex.from_arrays([self[index],
+ index=MultiIndex.from_arrays([index,
self[columns]]))
return indexed.unstack(columns)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 9ab004eb31a99..b7175cb45687c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9478,6 +9478,47 @@ def test_pivot_integer_bug(self):
repr(result)
self.assert_numpy_array_equal(result.columns, ['A', 'B'])
+ def test_pivot_index_none(self):
+ # gh-3962
+ data = {
+ 'index': ['A', 'B', 'C', 'C', 'B', 'A'],
+ 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values': [1., 2., 3., 3., 2., 1.]
+ }
+
+ frame = DataFrame(data).set_index('index')
+ result = frame.pivot(columns='columns', values='values')
+ expected = DataFrame({
+ 'One': {'A': 1., 'B': 2., 'C': 3.},
+ 'Two': {'A': 1., 'B': 2., 'C': 3.}
+ })
+
+ expected.index.name, expected.columns.name = 'index', 'columns'
+ assert_frame_equal(result, expected)
+
+ # omit values
+ result = frame.pivot(columns='columns')
+
+ expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
+ ('values', 'Two')],
+ names=[None, 'columns'])
+ expected.index.name = 'index'
+ assert_frame_equal(result, expected, check_names=False)
+ self.assertEqual(result.index.name, 'index',)
+ self.assertEqual(result.columns.names, (None, 'columns'))
+ expected.columns = expected.columns.droplevel(0)
+
+ data = {
+ 'index': range(7),
+ 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values': [1., 2., 3., 3., 2., 1.]
+ }
+
+ result = frame.pivot(columns='columns', values='values')
+
+ expected.columns.name = 'columns'
+ assert_frame_equal(result, expected)
+
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
| Closes #3962.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10705 | 2015-07-30T16:45:26Z | 2015-07-31T11:14:36Z | 2015-07-31T11:14:36Z | 2015-07-31T18:14:04Z |
BUG: Bug in Index construction with a mixed list of tuples #10697) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index aa8241e3cc272..c97f143cb63c2 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -367,7 +367,7 @@ Bug Fixes
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
-
+- Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ea9d1f38b92c1..ce6c60df2fd94 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -164,16 +164,21 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
cls._scalar_data_error(data)
else:
if tupleize_cols and isinstance(data, list) and data and isinstance(data[0], tuple):
- try:
- # must be orderable in py3
- if compat.PY3:
- sorted(data)
- return MultiIndex.from_tuples(
- data, names=name or kwargs.get('names'))
- except (TypeError, KeyError):
- # python2 - MultiIndex fails on mixed types
- pass
+ # we must be all tuples, otherwise don't construct
+ # 10697
+ if all( isinstance(e, tuple) for e in data ):
+
+ try:
+
+ # must be orderable in py3
+ if compat.PY3:
+ sorted(data)
+ return MultiIndex.from_tuples(
+ data, names=name or kwargs.get('names'))
+ except (TypeError, KeyError):
+ # python2 - MultiIndex fails on mixed types
+ pass
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 87e06bad7fbe1..2699e780f0edb 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -553,6 +553,15 @@ def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
+ def test_consruction_list_mixed_tuples(self):
+ # 10697
+ # if we are constructing from a mixed list of tuples, make sure that we
+ # are independent of the sorting order
+ idx1 = Index([('A',1),'B'])
+ self.assertIsInstance(idx1, Index) and self.assertNotInstance(idx1, MultiIndex)
+ idx2 = Index(['B',('A',1)])
+ self.assertIsInstance(idx2, Index) and self.assertNotInstance(idx2, MultiIndex)
+
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index af2373a72fb9d..8b1457e7fd490 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -2002,6 +2002,13 @@ def test_dups_index(self):
result = df.append(df)
assert_frame_equal(result, expected)
+ def test_with_mixed_tuples(self):
+ # 10697
+ # columns have mixed tuples, so handle properly
+ df1 = DataFrame({ u'A' : 'foo', (u'B',1) : 'bar' },index=range(2))
+ df2 = DataFrame({ u'B' : 'foo', (u'B',1) : 'bar' },index=range(2))
+ result = concat([df1,df2])
+
def test_join_dups(self):
# joining dups
| closes #10697
| https://api.github.com/repos/pandas-dev/pandas/pulls/10703 | 2015-07-30T12:25:15Z | 2015-07-30T14:36:19Z | 2015-07-30T14:36:19Z | 2015-07-30T14:36:19Z |
ENH: Add the moment function as DataFrame and Series method WITH namespacing | diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 586d507b27493..70b6bcb5c4de9 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -122,7 +122,7 @@
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
-the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
+the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
"""
@@ -399,7 +399,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
if center:
result = _center_window(result, window, axis)
-
+
return return_hook(result)
@@ -1036,3 +1036,125 @@ def expanding_apply(arg, func, min_periods=1, freq=None,
window = max(len(arg), min_periods) if min_periods else len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
args=args, kwargs=kwargs)
+
+#----------------------------------------------------------------------
+# Add all the methods to DataFrame and Series
+import sys
+thismodule = sys.modules[__name__]
+
+from pandas.core.base import AccessorProperty
+from functools import update_wrapper
+from pandas.core import common as com
+from pandas.core.base import PandasDelegate
+
+
+RollingMethods = type(
+ "RollingMethods",
+ (),
+ {
+ fct_name.replace("rolling_", ""): staticmethod(
+ getattr(thismodule, fct_name)
+ )
+ for fct_name in __all__
+ if fct_name.startswith("rolling_")
+ }
+)
+
+ExpandingMethods = type(
+ "ExpandingMethods",
+ (),
+ {
+ fct_name.replace("expanding_", ""): staticmethod(
+ getattr(thismodule, fct_name)
+ )
+ for fct_name in __all__
+ if fct_name.startswith("expanding_")
+ }
+)
+
+EwmMethods = type(
+ "EwmMethods",
+ (),
+ {
+ fct_name.replace("ewm", ""): staticmethod(
+ getattr(thismodule, fct_name)
+ )
+ for fct_name in __all__
+ if fct_name.startswith("ewm")
+ }
+)
+
+class MomentDelegator(PandasDelegate):
+ prefix = None
+ klass = None
+
+ def __init__(self, series, prefix, klass):
+ self.series = series
+ def _delegate_method(self, name, *args, **kwargs):
+ method = getattr(self.klass, name)
+ return method(self.series, *args, **kwargs)
+
+ @classmethod
+ def generate_make_moment_accessor(cls):
+ prefix = cls.prefix
+ klass = cls.klass
+ def moment_rolling_accessor(self):
+ check_dtype(self, prefix)
+ return cls(self, prefix, klass)
+ return moment_rolling_accessor
+
+class RollingDelegator(MomentDelegator):
+ prefix = "rolling_"
+ klass = RollingMethods
+
+RollingDelegator._add_delegate_accessors(
+ delegate=RollingMethods,
+ accessors=RollingMethods.__dict__.keys(),
+ typ='method'
+)
+
+class ExpandingDelegator(MomentDelegator):
+ prefix = "expanding_"
+ klass = ExpandingMethods
+
+ExpandingDelegator._add_delegate_accessors(
+ delegate=ExpandingMethods,
+ accessors=ExpandingMethods.__dict__.keys(),
+ typ='method'
+)
+
+class EwmDelegator(MomentDelegator):
+ prefix = "ewm"
+ klass = EwmMethods
+
+EwmDelegator._add_delegate_accessors(
+ delegate=EwmMethods,
+ accessors=EwmMethods.__dict__.keys(),
+ typ='method'
+)
+
+def check_dtype(self, name):
+ if isinstance(self, Series) \
+ and not (
+ com.is_float_dtype(self.dtype)
+ or
+ com.is_integer_dtype(self.dtype)
+ ):
+ raise AttributeError(
+ "Can only use .{} accessor with floats or int values".format(name)
+ )
+ if isinstance(self, DataFrame) \
+ and not True in [ # check whether there is one dtype float or integer
+ com.is_float_dtype(t) or com.is_integer_dtype(t)
+ for t in self.dtypes]:
+ raise AttributeError(
+ "Can only use .{}".format(name) +
+ " accessor if there exist at least one column of dtype floats or int"
+ )
+
+DataFrame.rolling = AccessorProperty(RollingDelegator, RollingDelegator.generate_make_moment_accessor())
+DataFrame.expanding = AccessorProperty(ExpandingDelegator, ExpandingDelegator.generate_make_moment_accessor())
+DataFrame.ewm = AccessorProperty(EwmDelegator, EwmDelegator.generate_make_moment_accessor())
+Series.rolling = AccessorProperty(RollingDelegator, RollingDelegator.generate_make_moment_accessor())
+Series.expanding = AccessorProperty(ExpandingDelegator, ExpandingDelegator.generate_make_moment_accessor())
+Series.ewm = AccessorProperty(EwmDelegator, EwmDelegator.generate_make_moment_accessor())
| This makes the usage of those methods more object oriented
Also, delete some trailing whitespaces.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10702 | 2015-07-30T10:47:07Z | 2015-11-15T16:29:49Z | null | 2015-11-15T16:29:49Z |
TST: fix usage of assert_produces_warning | diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index cc9ab977241f9..ca05eda20b0a8 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -464,7 +464,7 @@ def test_timestamp_and_label(self):
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
-
+
with StataReader(path) as reader:
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
@@ -475,10 +475,8 @@ def test_numeric_column_names(self):
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
- with warnings.catch_warnings(record=True) as w:
- tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
- # should produce a single warning
- tm.assert_equal(len(w), 1)
+ with tm.assert_produces_warning(InvalidColumnName):
+ original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
@@ -530,11 +528,8 @@ def test_large_value_conversion(self):
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
- with warnings.catch_warnings(record=True) as w:
- tm.assert_produces_warning(original.to_stata(path),
- PossiblePrecisionLoss)
- # should produce a single warning
- tm.assert_equal(len(w), 1)
+ with tm.assert_produces_warning(PossiblePrecisionLoss):
+ original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
@@ -548,10 +543,8 @@ def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
- with warnings.catch_warnings(record=True) as w:
- tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
- InvalidColumnName)
- tm.assert_equal(len(w), 1)
+ with tm.assert_produces_warning(InvalidColumnName):
+ original.to_stata(path, {0: 'tc'})
written_and_read_again = self.read_dta(path)
modified = original.copy()
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 624fa11ac908a..3ba58d7208474 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -4103,9 +4103,12 @@ def test_slice_indexer(self):
def check_iloc_compat(s):
# invalid type for iloc (but works with a warning)
- self.assert_produces_warning(FutureWarning, lambda : s.iloc[6.0:8])
- self.assert_produces_warning(FutureWarning, lambda : s.iloc[6.0:8.0])
- self.assert_produces_warning(FutureWarning, lambda : s.iloc[6:8.0])
+ with self.assert_produces_warning(FutureWarning):
+ s.iloc[6.0:8]
+ with self.assert_produces_warning(FutureWarning):
+ s.iloc[6.0:8.0]
+ with self.assert_produces_warning(FutureWarning):
+ s.iloc[6:8.0]
def check_slicing_positional(index):
| @jreback I just notices some tests were not using `assert_produces_warning` as a context manager, therefore these would never fail (you could put whatever in there).
I changed it to using `with`, as I don't think there is way to use that function not as a context?
@bashtage I also changed some of your tests. I am not familiar with this code, but I think this should still be correct
| https://api.github.com/repos/pandas-dev/pandas/pulls/10701 | 2015-07-30T09:21:07Z | 2015-07-31T07:23:04Z | 2015-07-31T07:23:04Z | 2015-07-31T07:23:04Z |
Allow interpolate() to fill backwards as well as forwards | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 51293ca4240c6..96ae46621dca2 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -329,6 +329,10 @@ Interpolation
:meth:`~pandas.DataFrame.interpolate`, and :meth:`~pandas.Series.interpolate` have
revamped interpolation methods and functionality.
+.. versionadded:: 0.17.0
+
+ The ``limit_direction`` keyword argument was added.
+
Both Series and Dataframe objects have an ``interpolate`` method that, by default,
performs linear interpolation at missing datapoints.
@@ -448,17 +452,33 @@ at the new values.
.. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
.. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
+Interpolation Limits
+^^^^^^^^^^^^^^^^^^^^
Like other pandas fill methods, ``interpolate`` accepts a ``limit`` keyword
-argument. Use this to limit the number of consecutive interpolations, keeping
-``NaN`` values for interpolations that are too far from the last valid
+argument. Use this argument to limit the number of consecutive interpolations,
+keeping ``NaN`` values for interpolations that are too far from the last valid
observation:
.. ipython:: python
- ser = pd.Series([1, 3, np.nan, np.nan, np.nan, 11])
+ ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13])
ser.interpolate(limit=2)
+By default, ``limit`` applies in a forward direction, so that only ``NaN``
+values after a non-``NaN`` value can be filled. If you provide ``'backward'`` or
+``'both'`` for the ``limit_direction`` keyword argument, you can fill ``NaN``
+values before non-``NaN`` values, or both before and after non-``NaN`` values,
+respectively:
+
+.. ipython:: python
+
+ ser.interpolate(limit=1) # limit_direction == 'forward'
+
+ ser.interpolate(limit=1, limit_direction='backward')
+
+ ser.interpolate(limit=1, limit_direction='both')
+
.. _missing_data.replace:
Replacing Generic Values
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3e81a923a114c..7688572fe277a 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -55,6 +55,12 @@ New features
- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`)
- Enable writing complex values to HDF stores when using table format (:issue:`10447`)
- Enable reading gzip compressed files via URL, either by explicitly setting the compression parameter or by inferring from the presence of the HTTP Content-Encoding header in the response (:issue:`8685`)
+- Add a ``limit_direction`` keyword argument that works with ``limit`` to enable ``interpolate`` to fill ``NaN`` values forward, backward, or both (:issue:`9218` and :issue:`10420`)
+
+ .. ipython:: python
+
+ ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13])
+ ser.interpolate(limit=1, limit_direction='both')
.. _whatsnew_0170.gil:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 72ea6d14456b0..77536fb391f93 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1589,6 +1589,7 @@ def _clean_interp_method(method, **kwargs):
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
+ limit_direction='forward',
fill_value=None, bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
@@ -1602,9 +1603,15 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
invalid = isnull(yvalues)
valid = ~invalid
- valid_y = yvalues[valid]
- valid_x = xvalues[valid]
- new_x = xvalues[invalid]
+ if not valid.any():
+ # have to call np.asarray(xvalues) since xvalues could be an Index
+ # which cant be mutated
+ result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
+ result.fill(np.nan)
+ return result
+
+ if valid.all():
+ return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
@@ -1614,33 +1621,54 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
'DatetimeIndex')
method = 'values'
- def _interp_limit(invalid, limit):
- """mask off values that won't be filled since they exceed the limit"""
+ def _interp_limit(invalid, fw_limit, bw_limit):
+ "Get idx of values that won't be forward-filled b/c they exceed the limit."
all_nans = np.where(invalid)[0]
if all_nans.size == 0: # no nans anyway
return []
- violate = [invalid[x:x + limit + 1] for x in all_nans]
- violate = np.array([x.all() & (x.size > limit) for x in violate])
- return all_nans[violate] + limit
+ violate = [invalid[max(0, x - bw_limit):x + fw_limit + 1] for x in all_nans]
+ violate = np.array([x.all() & (x.size > bw_limit + fw_limit) for x in violate])
+ return all_nans[violate] + fw_limit - bw_limit
+
+ valid_limit_directions = ['forward', 'backward', 'both']
+ limit_direction = limit_direction.lower()
+ if limit_direction not in valid_limit_directions:
+ msg = 'Invalid limit_direction: expecting one of %r, got %r.' % (
+ valid_limit_directions, limit_direction)
+ raise ValueError(msg)
- xvalues = getattr(xvalues, 'values', xvalues)
- yvalues = getattr(yvalues, 'values', yvalues)
+ from pandas import Series
+ ys = Series(yvalues)
+ start_nans = set(range(ys.first_valid_index()))
+ end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
+
+ # This is a list of the indexes in the series whose yvalue is currently NaN,
+ # but whose interpolated yvalue will be overwritten with NaN after computing
+ # the interpolation. For each index in this list, one of these conditions is
+ # true of the corresponding NaN in the yvalues:
+ #
+ # a) It is one of a chain of NaNs at the beginning of the series, and either
+ # limit is not specified or limit_direction is 'forward'.
+ # b) It is one of a chain of NaNs at the end of the series, and limit is
+ # specified and limit_direction is 'backward' or 'both'.
+ # c) Limit is nonzero and it is further than limit from the nearest non-NaN
+ # value (with respect to the limit_direction setting).
+ #
+ # The default behavior is to fill forward with no limit, ignoring NaNs at
+ # the beginning (see issues #9218 and #10420)
+ violate_limit = sorted(start_nans)
if limit:
- violate_limit = _interp_limit(invalid, limit)
- if valid.any():
- firstIndex = valid.argmax()
- valid = valid[firstIndex:]
- invalid = invalid[firstIndex:]
- result = yvalues.copy()
- if valid.all():
- return yvalues
- else:
- # have to call np.array(xvalues) since xvalues could be an Index
- # which cant be mutated
- result = np.empty_like(np.array(xvalues), dtype=np.float64)
- result.fill(np.nan)
- return result
+ if limit_direction == 'forward':
+ violate_limit = sorted(start_nans | set(_interp_limit(invalid, limit, 0)))
+ if limit_direction == 'backward':
+ violate_limit = sorted(end_nans | set(_interp_limit(invalid, 0, limit)))
+ if limit_direction == 'both':
+ violate_limit = _interp_limit(invalid, limit, limit)
+
+ xvalues = getattr(xvalues, 'values', xvalues)
+ yvalues = getattr(yvalues, 'values', yvalues)
+ result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
@@ -1648,32 +1676,27 @@ def _interp_limit(invalid, limit):
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
-
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
-
- inds = inds[firstIndex:]
-
- result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
- yvalues[firstIndex:][valid])
-
- if limit:
- result[violate_limit] = np.nan
+ result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
+ result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'piecewise_polynomial', 'pchip']
if method in sp_methods:
- new_x = new_x[firstIndex:]
-
- result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
- valid_x, valid_y, new_x, method=method, fill_value=fill_value,
+ inds = np.asarray(xvalues)
+ # hack for DatetimeIndex, #1646
+ if issubclass(inds.dtype.type, np.datetime64):
+ inds = inds.view(np.int64)
+ result[invalid] = _interpolate_scipy_wrapper(
+ inds[valid], yvalues[valid], inds[invalid], method=method,
+ fill_value=fill_value,
bounds_error=bounds_error, order=order, **kwargs)
- if limit:
- result[violate_limit] = np.nan
+ result[violate_limit] = np.nan
return result
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fe09e03281b4f..237da987a780e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2964,7 +2964,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
return self._constructor(new_data).__finalize__(self)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
- downcast=None, **kwargs):
+ limit_direction='forward', downcast=None, **kwargs):
"""
Interpolate values according to different methods.
@@ -3001,6 +3001,12 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
* 1: fill row-by-row
limit : int, default None.
Maximum number of consecutive NaNs to fill.
+ limit_direction : {'forward', 'backward', 'both'}, defaults to 'forward'
+ If limit is specified, consecutive NaNs will be filled in this
+ direction.
+
+ .. versionadded:: 0.17.0
+
inplace : bool, default False
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
@@ -3071,6 +3077,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
index=index,
values=_maybe_transposed_self,
limit=limit,
+ limit_direction=limit_direction,
inplace=inplace,
downcast=downcast,
**kwargs
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 1d6269ae904d2..c9ff67945225d 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -747,6 +747,7 @@ def putmask(self, mask, new, align=True, inplace=False,
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
+ limit_direction='forward',
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
@@ -790,6 +791,7 @@ def check_int_bool(self, inplace):
values=values,
axis=axis,
limit=limit,
+ limit_direction=limit_direction,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
@@ -829,6 +831,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
+ limit_direction='forward',
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
@@ -855,6 +858,7 @@ def func(x):
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
+ limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 7ed8799dd6ded..19989116b26df 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -857,10 +857,79 @@ def test_interp_scipy_basic(self):
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
+
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
+ def test_interp_limit_forward(self):
+ s = Series([1, 3, np.nan, np.nan, np.nan, 11])
+
+ # Provide 'forward' (the default) explicitly here.
+ expected = Series([1., 3., 5., 7., np.nan, 11.])
+
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='forward')
+ assert_series_equal(result, expected)
+
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='FORWARD')
+ assert_series_equal(result, expected)
+
+ def test_interp_limit_bad_direction(self):
+ s = Series([1, 3, np.nan, np.nan, np.nan, 11])
+ expected = Series([1., 3., 5., 7., 9., 11.])
+
+ self.assertRaises(ValueError, s.interpolate,
+ method='linear', limit=2,
+ limit_direction='abc')
+
+ # raises an error even if no limit is specified.
+ self.assertRaises(ValueError, s.interpolate,
+ method='linear',
+ limit_direction='abc')
+
+ def test_interp_limit_direction(self):
+ # These tests are for issue #9218 -- fill NaNs in both directions.
+ s = Series([1, 3, np.nan, np.nan, np.nan, 11])
+
+ expected = Series([1., 3., np.nan, 7., 9., 11.])
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='backward')
+ assert_series_equal(result, expected)
+
+ expected = Series([1., 3., 5., np.nan, 9., 11.])
+ result = s.interpolate(
+ method='linear', limit=1, limit_direction='both')
+ assert_series_equal(result, expected)
+
+ # Check that this works on a longer series of nans.
+ s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
+
+ expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='both')
+ assert_series_equal(result, expected)
+
+ expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
+ result = s.interpolate(
+ method='linear', limit=1, limit_direction='both')
+ assert_series_equal(result, expected)
+
+ def test_interp_limit_to_ends(self):
+ # These test are for issue #10420 -- flow back to beginning.
+ s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
+
+ expected = Series([5., 5., 5., 7., 9., np.nan])
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='backward')
+ assert_series_equal(result, expected)
+
+ expected = Series([5., 5., 5., 7., 9., 9.])
+ result = s.interpolate(
+ method='linear', limit=2, limit_direction='both')
+ assert_series_equal(result, expected)
+
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
| closes #9218
closes #10420
**Edit: changed approach, see the rest of the thread**
Currently, interpolate() has a "limit" kwarg that, when set to n,
prevents interpolated values from propagating more than n rows forward.
This change adds a "backward_limit" kwarg that, when set to n, prevents
interpolated values from propagating more than n rows backward.
The behavior prior to this change is as though "backward_limit" existed
but was always set to 0. That is, interpolated values never filled in
NaNs immediately before a non-NaN value (unless those same NaNs happened
to follow a different, non-NaN value within "limit" rows).
In this change the "backward_limit" kwarg has no effect if "limit" is
not specified (i.e., None).
| https://api.github.com/repos/pandas-dev/pandas/pulls/10691 | 2015-07-28T19:26:37Z | 2015-09-01T12:04:25Z | 2015-09-01T12:04:25Z | 2015-09-15T16:39:18Z |
Empty subtypes of Index return their type, rather than Index | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 322f431a37a79..b50ac38d81ade 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -372,6 +372,7 @@ Bug Fixes
- Bug in ``pd.get_dummies`` with `sparse=True` not returning ``SparseDataFrame`` (:issue:`10531`)
- Bug in ``Index`` subtypes (such as ``PeriodIndex``) not returning their own type for ``.drop`` and ``.insert`` methods (:issue:`10620`)
+- Bug in subclasses of ``Index`` with no values returned Index objects rather than their own classes, in some cases (:issue:`10596`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ea9d1f38b92c1..1ac0cc1730fa2 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1538,7 +1538,7 @@ def difference(self, other):
self._assert_can_do_setop(other)
if self.equals(other):
- return Index([], name=self.name)
+ return self._shallow_copy(np.asarray([]))
other, result_name = self._convert_can_do_setop(other)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 81c6366b4cb41..3c1fd2144c101 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -367,6 +367,12 @@ def test_difference_base(self):
with tm.assertRaisesRegexp(TypeError, msg):
result = first.difference([1, 2, 3])
+ # GH 10596 - empty difference retains index's type
+
+ result = idx.difference(idx)
+ expected = idx[0:0]
+ self.assertTrue(result.equals(expected))
+
def test_symmetric_diff(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 242d9a7757556..ac0f9b6cf05c9 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -733,9 +733,9 @@ def __getitem__(self, key):
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
- return PeriodIndex(result, name=self.name, freq=self.freq)
+ return self._shallow_copy(result)
- return PeriodIndex(result, name=self.name, freq=self.freq)
+ return self._shallow_copy(result)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
@@ -796,7 +796,7 @@ def append(self, other):
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
- return PeriodIndex(cat_values, freq=self.freq, name=name)
+ return self._shallow_copy(cat_values, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 1b38f51ed4f71..9219616f20cdc 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -644,7 +644,7 @@ def test_dti_dti_deprecated_ops(self):
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
- expected = Index([])
+ expected = dti[0:0]
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
@@ -654,7 +654,7 @@ def test_dti_dti_deprecated_ops(self):
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
- expected = Index([])
+ expected = dti_tz[0:0]
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
| Resolves #10596
Prior was in #10599 FYI
I think this has a PyTables error which I need to resolve
| https://api.github.com/repos/pandas-dev/pandas/pulls/10687 | 2015-07-28T15:13:49Z | 2015-07-28T21:44:23Z | null | 2015-07-28T22:15:32Z |
BUG: GH10581 where read_msgpack does not respect encoding | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7e69a8044a305..7e4bd302a4f0a 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -172,6 +172,8 @@ Other enhancements
- ``drop_duplicates`` and ``duplicated`` now accept ``keep`` keyword to target first, last, and all duplicates. ``take_last`` keyword is deprecated, see :ref:`deprecations <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`)
+- ``msgpack`` submodule has been updated to 0.4.6 with backward compatibility (:issue:`10581`)
+
.. ipython :: python
s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D'])
@@ -669,4 +671,5 @@ Bug Fixes
- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
- Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`)
+- Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`)
- Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 6472da58ac711..d5c02736a1cf5 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -60,7 +60,7 @@
from pandas.core.internals import BlockManager, make_block
import pandas.core.internals as internals
-from pandas.msgpack import Unpacker as _Unpacker, Packer as _Packer
+from pandas.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
# until we can pass this into our conversion functions,
# this is pretty hacky
@@ -131,7 +131,7 @@ def read_msgpack(path_or_buf, iterator=False, **kwargs):
return Iterator(path_or_buf)
def read(fh):
- l = list(unpack(fh))
+ l = list(unpack(fh, **kwargs))
if len(l) == 1:
return l[0]
return l
@@ -222,7 +222,7 @@ def convert(values):
# convert to a bytes array
v = v.tostring()
import zlib
- return zlib.compress(v)
+ return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
@@ -233,18 +233,24 @@ def convert(values):
# convert to a bytes array
v = v.tostring()
import blosc
- return blosc.compress(v, typesize=dtype.itemsize)
+ return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
- return v.tostring()
+ return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
+ as_is_ext = isinstance(values, ExtType) and values.code == 0
+
+ if as_is_ext:
+ values = values.data
+
if dtype == np.object_:
return np.array(values, dtype=object)
- values = values.encode('latin1')
+ if not as_is_ext:
+ values = values.encode('latin1')
if compress == 'zlib':
import zlib
@@ -558,19 +564,23 @@ def create_block(b):
def pack(o, default=encode,
- encoding='latin1', unicode_errors='strict', use_single_float=False):
+ encoding='latin1', unicode_errors='strict', use_single_float=False,
+ autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
- use_single_float=use_single_float).pack(o)
+ use_single_float=use_single_float,
+ autoreset=autoreset,
+ use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='latin1',
- unicode_errors='strict', object_pairs_hook=None):
+ unicode_errors='strict', object_pairs_hook=None,
+ max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
@@ -580,7 +590,9 @@ def unpack(packed, object_hook=decode,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
- object_pairs_hook=object_pairs_hook)
+ object_pairs_hook=object_pairs_hook,
+ max_buffer_size=max_buffer_size,
+ ext_hook=ext_hook)
class Packer(_Packer):
@@ -588,11 +600,15 @@ class Packer(_Packer):
def __init__(self, default=encode,
encoding='latin1',
unicode_errors='strict',
- use_single_float=False):
+ use_single_float=False,
+ autoreset=1,
+ use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
- use_single_float=use_single_float)
+ use_single_float=use_single_float,
+ autoreset=autoreset,
+ use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
@@ -600,7 +616,7 @@ class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='latin1',
- unicode_errors='strict', max_buffer_size=0):
+ unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
@@ -609,7 +625,8 @@ def __init__(self, file_like=None, read_size=0, use_list=False,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
- max_buffer_size=max_buffer_size)
+ max_buffer_size=max_buffer_size,
+ ext_hook=ext_hook)
class Iterator(object):
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index b71fd32a29e1e..1267821086d61 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -54,9 +54,9 @@ def setUp(self):
def tearDown(self):
pass
- def encode_decode(self, x, **kwargs):
+ def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
- to_msgpack(p, x, **kwargs)
+ to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
@@ -517,12 +517,38 @@ def test_compression_blosc(self):
assert_frame_equal(self.frame[k], i_rec[k])
+class TestEncoding(TestPackers):
+ def setUp(self):
+ super(TestEncoding, self).setUp()
+ data = {
+ 'A': [compat.u('\u2019')] * 1000,
+ 'B': np.arange(1000, dtype=np.int32),
+ 'C': list(100 * 'abcdefghij'),
+ 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
+ 'E': [datetime.timedelta(days=x) for x in range(1000)],
+ 'G': [400] * 1000
+ }
+ self.frame = {
+ 'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
+ 'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
+ 'mixed': DataFrame(data),
+ }
+ self.utf_encodings = ['utf8', 'utf16', 'utf32']
+
+ def test_utf(self):
+ # GH10581
+ for encoding in self.utf_encodings:
+ for frame in compat.itervalues(self.frame):
+ result = self.encode_decode(frame, encoding=encoding)
+ assert_frame_equal(result, frame)
+
+
class TestMsgpack():
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
-
+TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
diff --git a/pandas/msgpack.pyx b/pandas/msgpack.pyx
deleted file mode 100644
index 625ac55ee832c..0000000000000
--- a/pandas/msgpack.pyx
+++ /dev/null
@@ -1,669 +0,0 @@
-# coding: utf-8
-#cython: embedsignature=True
-#cython: profile=False
-
-from cpython cimport *
-cdef extern from "Python.h":
- ctypedef char* const_char_ptr "const char*"
- ctypedef char* const_void_ptr "const void*"
- ctypedef struct PyObject
- cdef int PyObject_AsReadBuffer(object o, const_void_ptr* buff, Py_ssize_t* buf_len) except -1
-
-from libc.stdlib cimport *
-from libc.string cimport *
-from libc.limits cimport *
-
-import cython
-import numpy as np
-from numpy cimport *
-
-class UnpackException(IOError):
- pass
-
-
-class BufferFull(UnpackException):
- pass
-
-
-class OutOfData(UnpackException):
- pass
-
-
-class UnpackValueError(UnpackException, ValueError):
- pass
-
-
-class ExtraData(ValueError):
- def __init__(self, unpacked, extra):
- self.unpacked = unpacked
- self.extra = extra
-
- def __str__(self):
- return "unpack(b) recieved extra data."
-
-class PackException(IOError):
- pass
-
-class PackValueError(PackException, ValueError):
- pass
-
-cdef extern from "msgpack/unpack.h":
- ctypedef struct msgpack_user:
- bint use_list
- PyObject* object_hook
- bint has_pairs_hook # call object_hook with k-v pairs
- PyObject* list_hook
- char *encoding
- char *unicode_errors
-
- ctypedef struct template_context:
- msgpack_user user
- PyObject* obj
- size_t count
- unsigned int ct
- PyObject* key
-
- ctypedef int (*execute_fn)(template_context* ctx, const_char_ptr data,
- size_t len, size_t* off) except? -1
- execute_fn template_construct
- execute_fn template_skip
- execute_fn read_array_header
- execute_fn read_map_header
- void template_init(template_context* ctx)
- object template_data(template_context* ctx)
-
-cdef extern from "msgpack/pack.h":
- struct msgpack_packer:
- char* buf
- size_t length
- size_t buf_size
-
- int msgpack_pack_int(msgpack_packer* pk, int d)
- int msgpack_pack_nil(msgpack_packer* pk)
- int msgpack_pack_true(msgpack_packer* pk)
- int msgpack_pack_false(msgpack_packer* pk)
- int msgpack_pack_long(msgpack_packer* pk, long d)
- int msgpack_pack_long_long(msgpack_packer* pk, long long d)
- int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
- int msgpack_pack_float(msgpack_packer* pk, float d)
- int msgpack_pack_double(msgpack_packer* pk, double d)
- int msgpack_pack_array(msgpack_packer* pk, size_t l)
- int msgpack_pack_map(msgpack_packer* pk, size_t l)
- int msgpack_pack_raw(msgpack_packer* pk, size_t l)
- int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
-
-cdef int DEFAULT_RECURSE_LIMIT=511
-
-
-
-cdef class Packer(object):
- """MessagePack Packer
-
- usage:
-
- packer = Packer()
- astream.write(packer.pack(a))
- astream.write(packer.pack(b))
-
- Packer's constructor has some keyword arguments:
-
- * *defaut* - Convert user type to builtin type that Packer supports.
- See also simplejson's document.
- * *encoding* - Convert unicode to bytes with this encoding. (default: 'utf-8')
- * *unicode_errors* - Error handler for encoding unicode. (default: 'strict')
- * *use_single_float* - Use single precision float type for float. (default: False)
- * *autoreset* - Reset buffer after each pack and return its content as `bytes`. (default: True).
- If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
- """
- cdef msgpack_packer pk
- cdef object _default
- cdef object _bencoding
- cdef object _berrors
- cdef char *encoding
- cdef char *unicode_errors
- cdef bool use_float
- cdef bint autoreset
-
- def __cinit__(self):
- cdef int buf_size = 1024*1024
- self.pk.buf = <char*> malloc(buf_size);
- if self.pk.buf == NULL:
- raise MemoryError("Unable to allocate internal buffer.")
- self.pk.buf_size = buf_size
- self.pk.length = 0
-
- def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
- use_single_float=False, bint autoreset=1):
- self.use_float = use_single_float
- self.autoreset = autoreset
- if default is not None:
- if not PyCallable_Check(default):
- raise TypeError("default must be a callable.")
- self._default = default
- if encoding is None:
- self.encoding = NULL
- self.unicode_errors = NULL
- else:
- if isinstance(encoding, unicode):
- self._bencoding = encoding.encode('ascii')
- else:
- self._bencoding = encoding
- self.encoding = PyBytes_AsString(self._bencoding)
- if isinstance(unicode_errors, unicode):
- self._berrors = unicode_errors.encode('ascii')
- else:
- self._berrors = unicode_errors
- self.unicode_errors = PyBytes_AsString(self._berrors)
-
- def __dealloc__(self):
- free(self.pk.buf);
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
- cdef long long llval
- cdef unsigned long long ullval
- cdef long longval
- cdef float fval
- cdef double dval
- cdef char* rawval
- cdef int ret
- cdef dict d
- cdef object dtype
-
- cdef int n,i
-
- if nest_limit < 0:
- raise PackValueError("recursion limit exceeded.")
-
- if o is None:
- ret = msgpack_pack_nil(&self.pk)
- elif isinstance(o, bool):
- if o:
- ret = msgpack_pack_true(&self.pk)
- else:
- ret = msgpack_pack_false(&self.pk)
- elif PyLong_Check(o):
- if o > 0:
- ullval = o
- ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
- else:
- llval = o
- ret = msgpack_pack_long_long(&self.pk, llval)
- elif PyInt_Check(o):
- longval = o
- ret = msgpack_pack_long(&self.pk, longval)
- elif PyFloat_Check(o):
- if self.use_float:
- fval = o
- ret = msgpack_pack_float(&self.pk, fval)
- else:
- dval = o
- ret = msgpack_pack_double(&self.pk, dval)
- elif PyBytes_Check(o):
- rawval = o
- ret = msgpack_pack_raw(&self.pk, len(o))
- if ret == 0:
- ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
- elif PyUnicode_Check(o):
- if not self.encoding:
- raise TypeError("Can't encode unicode string: no encoding is specified")
- o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
- rawval = o
- ret = msgpack_pack_raw(&self.pk, len(o))
- if ret == 0:
- ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
- elif PyDict_CheckExact(o):
- d = <dict>o
- ret = msgpack_pack_map(&self.pk, len(d))
- if ret == 0:
- for k, v in d.iteritems():
- ret = self._pack(k, nest_limit-1)
- if ret != 0: break
- ret = self._pack(v, nest_limit-1)
- if ret != 0: break
- elif PyDict_Check(o):
- ret = msgpack_pack_map(&self.pk, len(o))
- if ret == 0:
- for k, v in o.items():
- ret = self._pack(k, nest_limit-1)
- if ret != 0: break
- ret = self._pack(v, nest_limit-1)
- if ret != 0: break
- elif PyTuple_Check(o) or PyList_Check(o):
- ret = msgpack_pack_array(&self.pk, len(o))
- if ret == 0:
- for v in o:
- ret = self._pack(v, nest_limit-1)
- if ret != 0: break
-
- elif self._default:
- o = self._default(o)
- ret = self._pack(o, nest_limit-1)
- else:
- raise TypeError("can't serialize %r" % (o,))
- return ret
-
- cpdef pack(self, object obj):
- cdef int ret
- ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
- if ret == -1:
- raise MemoryError
- elif ret: # should not happen.
- raise TypeError
- if self.autoreset:
- buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
- self.pk.length = 0
- return buf
-
- def pack_array_header(self, size_t size):
- cdef int ret = msgpack_pack_array(&self.pk, size)
- if ret == -1:
- raise MemoryError
- elif ret: # should not happen
- raise TypeError
- if self.autoreset:
- buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
- self.pk.length = 0
- return buf
-
- def pack_map_header(self, size_t size):
- cdef int ret = msgpack_pack_map(&self.pk, size)
- if ret == -1:
- raise MemoryError
- elif ret: # should not happen
- raise TypeError
- if self.autoreset:
- buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
- self.pk.length = 0
- return buf
-
- def pack_map_pairs(self, object pairs):
- """
- Pack *pairs* as msgpack map type.
-
- *pairs* should sequence of pair.
- (`len(pairs)` and `for k, v in *pairs*:` should be supported.)
- """
- cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
- if ret == 0:
- for k, v in pairs:
- ret = self._pack(k)
- if ret != 0: break
- ret = self._pack(v)
- if ret != 0: break
- if ret == -1:
- raise MemoryError
- elif ret: # should not happen
- raise TypeError
- if self.autoreset:
- buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
- self.pk.length = 0
- return buf
-
- def reset(self):
- """Clear internal buffer."""
- self.pk.length = 0
-
- def bytes(self):
- """Return buffer content."""
- return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
-
-
- cdef inline pack_pair(self, object k, object v, int nest_limit):
- ret = self._pack(k, nest_limit-1)
- if ret != 0: raise PackException("cannot pack : %s" % k)
- ret = self._pack(v, nest_limit-1)
- if ret != 0: raise PackException("cannot pack : %s" % v)
- return ret
-
-def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'):
- """
- pack an object `o` and write it to stream)."""
- packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
- stream.write(packer.pack(o))
-
-def packb(object o, default=None, encoding='utf-8', unicode_errors='strict', use_single_float=False):
- """
- pack o and return packed bytes."""
- packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors,
- use_single_float=use_single_float)
- return packer.pack(o)
-
-
-cdef inline init_ctx(template_context *ctx,
- object object_hook, object object_pairs_hook, object list_hook,
- bint use_list, char* encoding, char* unicode_errors):
- template_init(ctx)
- ctx.user.use_list = use_list
- ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
-
- if object_hook is not None and object_pairs_hook is not None:
- raise ValueError("object_pairs_hook and object_hook are mutually exclusive.")
-
- if object_hook is not None:
- if not PyCallable_Check(object_hook):
- raise TypeError("object_hook must be a callable.")
- ctx.user.object_hook = <PyObject*>object_hook
-
- if object_pairs_hook is None:
- ctx.user.has_pairs_hook = False
- else:
- if not PyCallable_Check(object_pairs_hook):
- raise TypeError("object_pairs_hook must be a callable.")
- ctx.user.object_hook = <PyObject*>object_pairs_hook
- ctx.user.has_pairs_hook = True
-
- if list_hook is not None:
- if not PyCallable_Check(list_hook):
- raise TypeError("list_hook must be a callable.")
- ctx.user.list_hook = <PyObject*>list_hook
-
- ctx.user.encoding = encoding
- ctx.user.unicode_errors = unicode_errors
-
-def unpackb(object packed, object object_hook=None, object list_hook=None,
- bint use_list=1, encoding=None, unicode_errors="strict",
- object_pairs_hook=None,
- ):
- """Unpack packed_bytes to object. Returns an unpacked object.
-
- Raises `ValueError` when `packed` contains extra bytes.
- """
- cdef template_context ctx
- cdef size_t off = 0
- cdef int ret
-
- cdef char* buf
- cdef Py_ssize_t buf_len
- cdef char* cenc = NULL
- cdef char* cerr = NULL
-
- PyObject_AsReadBuffer(packed, <const_void_ptr*>&buf, &buf_len)
-
- if encoding is not None:
- if isinstance(encoding, unicode):
- encoding = encoding.encode('ascii')
- cenc = PyBytes_AsString(encoding)
-
- if unicode_errors is not None:
- if isinstance(unicode_errors, unicode):
- unicode_errors = unicode_errors.encode('ascii')
- cerr = PyBytes_AsString(unicode_errors)
-
- init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
- ret = template_construct(&ctx, buf, buf_len, &off)
- if ret == 1:
- obj = template_data(&ctx)
- if off < buf_len:
- raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
- return obj
- elif ret < 0:
- raise ValueError("Unpack failed: error = %d" % (ret,))
- else:
- raise UnpackValueError
-
-
-def unpack(object stream, object object_hook=None, object list_hook=None,
- bint use_list=1, encoding=None, unicode_errors="strict",
- object_pairs_hook=None,
- ):
- """Unpack an object from `stream`.
-
- Raises `ValueError` when `stream` has extra bytes.
- """
- return unpackb(stream.read(), use_list=use_list,
- object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
- encoding=encoding, unicode_errors=unicode_errors,
- )
-
-
-cdef class Unpacker(object):
- """
- Streaming unpacker.
-
- `file_like` is a file-like object having `.read(n)` method.
- When `Unpacker` initialized with `file_like`, unpacker reads serialized data
- from it and `.feed()` method is not usable.
-
- `read_size` is used as `file_like.read(read_size)`.
- (default: min(1024**2, max_buffer_size))
-
- If `use_list` is true (default), msgpack list is deserialized to Python list.
- Otherwise, it is deserialized to Python tuple.
-
- `object_hook` is same to simplejson. If it is not None, it should be callable
- and Unpacker calls it with a dict argument after deserializing a map.
-
- `object_pairs_hook` is same to simplejson. If it is not None, it should be callable
- and Unpacker calls it with a list of key-value pairs after deserializing a map.
-
- `encoding` is encoding used for decoding msgpack bytes. If it is None (default),
- msgpack bytes is deserialized to Python bytes.
-
- `unicode_errors` is used for decoding bytes.
-
- `max_buffer_size` limits size of data waiting unpacked.
- 0 means system's INT_MAX (default).
- Raises `BufferFull` exception when it is insufficient.
- You shoud set this parameter when unpacking data from untrasted source.
-
- example of streaming deserialize from file-like object::
-
- unpacker = Unpacker(file_like)
- for o in unpacker:
- do_something(o)
-
- example of streaming deserialize from socket::
-
- unpacker = Unpacker()
- while 1:
- buf = sock.recv(1024**2)
- if not buf:
- break
- unpacker.feed(buf)
- for o in unpacker:
- do_something(o)
- """
- cdef template_context ctx
- cdef char* buf
- cdef size_t buf_size, buf_head, buf_tail
- cdef object file_like
- cdef object file_like_read
- cdef Py_ssize_t read_size
- cdef object object_hook
- cdef object encoding, unicode_errors
- cdef size_t max_buffer_size
-
- def __cinit__(self):
- self.buf = NULL
-
- def __dealloc__(self):
- free(self.buf)
- self.buf = NULL
-
- def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
- object object_hook=None, object object_pairs_hook=None, object list_hook=None,
- encoding=None, unicode_errors='strict', int max_buffer_size=0,
- ):
- cdef char *cenc=NULL, *cerr=NULL
-
- self.file_like = file_like
- if file_like:
- self.file_like_read = file_like.read
- if not PyCallable_Check(self.file_like_read):
- raise ValueError("`file_like.read` must be a callable.")
- if not max_buffer_size:
- max_buffer_size = INT_MAX
- if read_size > max_buffer_size:
- raise ValueError("read_size should be less or equal to max_buffer_size")
- if not read_size:
- read_size = min(max_buffer_size, 1024**2)
- self.max_buffer_size = max_buffer_size
- self.read_size = read_size
- self.buf = <char*>malloc(read_size)
- if self.buf == NULL:
- raise MemoryError("Unable to allocate internal buffer.")
- self.buf_size = read_size
- self.buf_head = 0
- self.buf_tail = 0
-
- if encoding is not None:
- if isinstance(encoding, unicode):
- encoding = encoding.encode('ascii')
- self.encoding = encoding
- cenc = PyBytes_AsString(encoding)
-
- if unicode_errors is not None:
- if isinstance(unicode_errors, unicode):
- unicode_errors = unicode_errors.encode('ascii')
- self.unicode_errors = unicode_errors
- cerr = PyBytes_AsString(unicode_errors)
-
- init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
-
- def feed(self, object next_bytes):
- """Append `next_bytes` to internal buffer."""
- cdef char* buf
- cdef Py_ssize_t buf_len
- if self.file_like is not None:
- raise TypeError(
- "unpacker.feed() is not be able to use with `file_like`.")
- PyObject_AsReadBuffer(next_bytes, <const_void_ptr*>&buf, &buf_len)
- self.append_buffer(buf, buf_len)
-
- cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
- cdef:
- char* buf = self.buf
- char* new_buf
- size_t head = self.buf_head
- size_t tail = self.buf_tail
- size_t buf_size = self.buf_size
- size_t new_size
-
- if tail + _buf_len > buf_size:
- if ((tail - head) + _buf_len) <= buf_size:
- # move to front.
- memmove(buf, buf + head, tail - head)
- tail -= head
- head = 0
- else:
- # expand buffer.
- new_size = (tail-head) + _buf_len
- if new_size > self.max_buffer_size:
- raise BufferFull
- new_size = min(new_size*2, self.max_buffer_size)
- new_buf = <char*>malloc(new_size)
- if new_buf == NULL:
- # self.buf still holds old buffer and will be freed during
- # obj destruction
- raise MemoryError("Unable to enlarge internal buffer.")
- memcpy(new_buf, buf + head, tail - head)
- free(buf)
-
- buf = new_buf
- buf_size = new_size
- tail -= head
- head = 0
-
- memcpy(buf + tail, <char*>(_buf), _buf_len)
- self.buf = buf
- self.buf_head = head
- self.buf_size = buf_size
- self.buf_tail = tail + _buf_len
-
- cdef read_from_file(self):
- next_bytes = self.file_like_read(
- min(self.read_size,
- self.max_buffer_size - (self.buf_tail - self.buf_head)
- ))
- if next_bytes:
- self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
- else:
- self.file_like = None
-
- cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
- cdef int ret
- cdef object obj
- cdef size_t prev_head
- while 1:
- prev_head = self.buf_head
- ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
- if write_bytes is not None:
- write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
-
- if ret == 1:
- obj = template_data(&self.ctx)
- template_init(&self.ctx)
- return obj
- elif ret == 0:
- if self.file_like is not None:
- self.read_from_file()
- continue
- if iter:
- raise StopIteration("No more data to unpack.")
- else:
- raise OutOfData("No more data to unpack.")
- else:
- raise ValueError("Unpack failed: error = %d" % (ret,))
-
- def read_bytes(self, Py_ssize_t nbytes):
- """read a specified number of raw bytes from the stream"""
- cdef size_t nread
- nread = min(self.buf_tail - self.buf_head, nbytes)
- ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
- self.buf_head += nread
- if len(ret) < nbytes and self.file_like is not None:
- ret += self.file_like.read(nbytes - len(ret))
- return ret
-
- def unpack(self, object write_bytes=None):
- """
- unpack one object
-
- If write_bytes is not None, it will be called with parts of the raw
- message as it is unpacked.
-
- Raises `OutOfData` when there are no more bytes to unpack.
- """
- return self._unpack(template_construct, write_bytes)
-
- def skip(self, object write_bytes=None):
- """
- read and ignore one object, returning None
-
- If write_bytes is not None, it will be called with parts of the raw
- message as it is unpacked.
-
- Raises `OutOfData` when there are no more bytes to unpack.
- """
- return self._unpack(template_skip, write_bytes)
-
- def read_array_header(self, object write_bytes=None):
- """assuming the next object is an array, return its size n, such that
- the next n unpack() calls will iterate over its contents.
-
- Raises `OutOfData` when there are no more bytes to unpack.
- """
- return self._unpack(read_array_header, write_bytes)
-
- def read_map_header(self, object write_bytes=None):
- """assuming the next object is a map, return its size n, such that the
- next n * 2 unpack() calls will iterate over its key-value pairs.
-
- Raises `OutOfData` when there are no more bytes to unpack.
- """
- return self._unpack(read_map_header, write_bytes)
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self._unpack(template_construct, None, 1)
-
- # for debug.
- #def _buf(self):
- # return PyString_FromStringAndSize(self.buf, self.buf_tail)
-
- #def _off(self):
- # return self.buf_head
diff --git a/pandas/msgpack/__init__.py b/pandas/msgpack/__init__.py
new file mode 100644
index 0000000000000..bf0e2853ae131
--- /dev/null
+++ b/pandas/msgpack/__init__.py
@@ -0,0 +1,49 @@
+# coding: utf-8
+from pandas.msgpack._version import version
+from pandas.msgpack.exceptions import *
+
+from collections import namedtuple
+
+
+class ExtType(namedtuple('ExtType', 'code data')):
+ """ExtType represents ext type in msgpack."""
+ def __new__(cls, code, data):
+ if not isinstance(code, int):
+ raise TypeError("code must be int")
+ if not isinstance(data, bytes):
+ raise TypeError("data must be bytes")
+ if not 0 <= code <= 127:
+ raise ValueError("code must be 0~127")
+ return super(ExtType, cls).__new__(cls, code, data)
+
+
+import os
+from pandas.msgpack._packer import Packer
+from pandas.msgpack._unpacker import unpack, unpackb, Unpacker
+
+
+
+def pack(o, stream, **kwargs):
+ """
+ Pack object `o` and write it to `stream`
+
+ See :class:`Packer` for options.
+ """
+ packer = Packer(**kwargs)
+ stream.write(packer.pack(o))
+
+
+def packb(o, **kwargs):
+ """
+ Pack object `o` and return packed bytes
+
+ See :class:`Packer` for options.
+ """
+ return Packer(**kwargs).pack(o)
+
+# alias for compatibility to simplejson/marshal/pickle.
+load = unpack
+loads = unpackb
+
+dump = pack
+dumps = packb
diff --git a/pandas/msgpack/_packer.pyx b/pandas/msgpack/_packer.pyx
new file mode 100644
index 0000000000000..5004b9e8e7262
--- /dev/null
+++ b/pandas/msgpack/_packer.pyx
@@ -0,0 +1,294 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+
+from pandas.msgpack.exceptions import PackValueError
+from pandas.msgpack import ExtType
+
+
+cdef extern from "../src/msgpack/pack.h":
+ struct msgpack_packer:
+ char* buf
+ size_t length
+ size_t buf_size
+ bint use_bin_type
+
+ int msgpack_pack_int(msgpack_packer* pk, int d)
+ int msgpack_pack_nil(msgpack_packer* pk)
+ int msgpack_pack_true(msgpack_packer* pk)
+ int msgpack_pack_false(msgpack_packer* pk)
+ int msgpack_pack_long(msgpack_packer* pk, long d)
+ int msgpack_pack_long_long(msgpack_packer* pk, long long d)
+ int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+ int msgpack_pack_float(msgpack_packer* pk, float d)
+ int msgpack_pack_double(msgpack_packer* pk, double d)
+ int msgpack_pack_array(msgpack_packer* pk, size_t l)
+ int msgpack_pack_map(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw(msgpack_packer* pk, size_t l)
+ int msgpack_pack_bin(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+ int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l)
+
+cdef int DEFAULT_RECURSE_LIMIT=511
+
+
+cdef class Packer(object):
+ """
+ MessagePack Packer
+
+ usage::
+
+ packer = Packer()
+ astream.write(packer.pack(a))
+ astream.write(packer.pack(b))
+
+ Packer's constructor has some keyword arguments:
+
+ :param callable default:
+ Convert user type to builtin type that Packer supports.
+ See also simplejson's document.
+ :param str encoding:
+ Convert unicode to bytes with this encoding. (default: 'utf-8')
+ :param str unicode_errors:
+ Error handler for encoding unicode. (default: 'strict')
+ :param bool use_single_float:
+ Use single precision float type for float. (default: False)
+ :param bool autoreset:
+ Reset buffer after each pack and return it's content as `bytes`. (default: True).
+ If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+ :param bool use_bin_type:
+ Use bin type introduced in msgpack spec 2.0 for bytes.
+ It also enable str8 type for unicode.
+ """
+ cdef msgpack_packer pk
+ cdef object _default
+ cdef object _bencoding
+ cdef object _berrors
+ cdef char *encoding
+ cdef char *unicode_errors
+ cdef bool use_float
+ cdef bint autoreset
+
+ def __cinit__(self):
+ cdef int buf_size = 1024*1024
+ self.pk.buf = <char*> malloc(buf_size);
+ if self.pk.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.pk.buf_size = buf_size
+ self.pk.length = 0
+
+ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
+ use_single_float=False, bint autoreset=1, bint use_bin_type=0):
+ """
+ """
+ self.use_float = use_single_float
+ self.autoreset = autoreset
+ self.pk.use_bin_type = use_bin_type
+ if default is not None:
+ if not PyCallable_Check(default):
+ raise TypeError("default must be a callable.")
+ self._default = default
+ if encoding is None:
+ self.encoding = NULL
+ self.unicode_errors = NULL
+ else:
+ if isinstance(encoding, unicode):
+ self._bencoding = encoding.encode('ascii')
+ else:
+ self._bencoding = encoding
+ self.encoding = PyBytes_AsString(self._bencoding)
+ if isinstance(unicode_errors, unicode):
+ self._berrors = unicode_errors.encode('ascii')
+ else:
+ self._berrors = unicode_errors
+ self.unicode_errors = PyBytes_AsString(self._berrors)
+
+ def __dealloc__(self):
+ free(self.pk.buf);
+
+ cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ cdef long long llval
+ cdef unsigned long long ullval
+ cdef long longval
+ cdef float fval
+ cdef double dval
+ cdef char* rawval
+ cdef int ret
+ cdef dict d
+ cdef size_t L
+ cdef int default_used = 0
+
+ if nest_limit < 0:
+ raise PackValueError("recursion limit exceeded.")
+
+ while True:
+ if o is None:
+ ret = msgpack_pack_nil(&self.pk)
+ elif isinstance(o, bool):
+ if o:
+ ret = msgpack_pack_true(&self.pk)
+ else:
+ ret = msgpack_pack_false(&self.pk)
+ elif PyLong_Check(o):
+ # PyInt_Check(long) is True for Python 3.
+ # Sow we should test long before int.
+ if o > 0:
+ ullval = o
+ ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ else:
+ llval = o
+ ret = msgpack_pack_long_long(&self.pk, llval)
+ elif PyInt_Check(o):
+ longval = o
+ ret = msgpack_pack_long(&self.pk, longval)
+ elif PyFloat_Check(o):
+ if self.use_float:
+ fval = o
+ ret = msgpack_pack_float(&self.pk, fval)
+ else:
+ dval = o
+ ret = msgpack_pack_double(&self.pk, dval)
+ elif PyBytes_Check(o):
+ L = len(o)
+ if L > (2**32)-1:
+ raise ValueError("bytes is too large")
+ rawval = o
+ ret = msgpack_pack_bin(&self.pk, L)
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, L)
+ elif PyUnicode_Check(o):
+ if not self.encoding:
+ raise TypeError("Can't encode unicode string: no encoding is specified")
+ o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ L = len(o)
+ if L > (2**32)-1:
+ raise ValueError("dict is too large")
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyDict_CheckExact(o):
+ d = <dict>o
+ L = len(d)
+ if L > (2**32)-1:
+ raise ValueError("dict is too large")
+ ret = msgpack_pack_map(&self.pk, L)
+ if ret == 0:
+ for k, v in d.iteritems():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PyDict_Check(o):
+ L = len(o)
+ if L > (2**32)-1:
+ raise ValueError("dict is too large")
+ ret = msgpack_pack_map(&self.pk, L)
+ if ret == 0:
+ for k, v in o.items():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif isinstance(o, ExtType):
+ # This should be before Tuple because ExtType is namedtuple.
+ longval = o.code
+ rawval = o.data
+ L = len(o.data)
+ if L > (2**32)-1:
+ raise ValueError("EXT data is too large")
+ ret = msgpack_pack_ext(&self.pk, longval, L)
+ ret = msgpack_pack_raw_body(&self.pk, rawval, L)
+ elif PyTuple_Check(o) or PyList_Check(o):
+ L = len(o)
+ if L > (2**32)-1:
+ raise ValueError("list is too large")
+ ret = msgpack_pack_array(&self.pk, L)
+ if ret == 0:
+ for v in o:
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif not default_used and self._default:
+ o = self._default(o)
+ default_used = 1
+ continue
+ else:
+ raise TypeError("can't serialize %r" % (o,))
+ return ret
+
+ cpdef pack(self, object obj):
+ cdef int ret
+ ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen.
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_ext_type(self, typecode, data):
+ msgpack_pack_ext(&self.pk, typecode, len(data))
+ msgpack_pack_raw_body(&self.pk, data, len(data))
+
+ def pack_array_header(self, size_t size):
+ if size > (2**32-1):
+ raise ValueError
+ cdef int ret = msgpack_pack_array(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_header(self, size_t size):
+ if size > (2**32-1):
+ raise ValueError
+ cdef int ret = msgpack_pack_map(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_pairs(self, object pairs):
+ """
+ Pack *pairs* as msgpack map type.
+
+ *pairs* should sequence of pair.
+ (`len(pairs)` and `for k, v in pairs:` should be supported.)
+ """
+ cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
+ if ret == 0:
+ for k, v in pairs:
+ ret = self._pack(k)
+ if ret != 0: break
+ ret = self._pack(v)
+ if ret != 0: break
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def reset(self):
+ """Clear internal buffer."""
+ self.pk.length = 0
+
+ def bytes(self):
+ """Return buffer content."""
+ return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
diff --git a/pandas/msgpack/_unpacker.pyx b/pandas/msgpack/_unpacker.pyx
new file mode 100644
index 0000000000000..f68bf3369427c
--- /dev/null
+++ b/pandas/msgpack/_unpacker.pyx
@@ -0,0 +1,466 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+cdef extern from "Python.h":
+ ctypedef struct PyObject
+ cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1
+
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+
+from pandas.msgpack.exceptions import (
+ BufferFull,
+ OutOfData,
+ UnpackValueError,
+ ExtraData,
+ )
+from pandas.msgpack import ExtType
+
+
+cdef extern from "../src/msgpack/unpack.h":
+ ctypedef struct msgpack_user:
+ bint use_list
+ PyObject* object_hook
+ bint has_pairs_hook # call object_hook with k-v pairs
+ PyObject* list_hook
+ PyObject* ext_hook
+ char *encoding
+ char *unicode_errors
+ Py_ssize_t max_str_len
+ Py_ssize_t max_bin_len
+ Py_ssize_t max_array_len
+ Py_ssize_t max_map_len
+ Py_ssize_t max_ext_len
+
+ ctypedef struct unpack_context:
+ msgpack_user user
+ PyObject* obj
+ size_t count
+
+ ctypedef int (*execute_fn)(unpack_context* ctx, const char* data,
+ size_t len, size_t* off) except? -1
+ execute_fn unpack_construct
+ execute_fn unpack_skip
+ execute_fn read_array_header
+ execute_fn read_map_header
+ void unpack_init(unpack_context* ctx)
+ object unpack_data(unpack_context* ctx)
+
+cdef inline init_ctx(unpack_context *ctx,
+ object object_hook, object object_pairs_hook,
+ object list_hook, object ext_hook,
+ bint use_list, char* encoding, char* unicode_errors,
+ Py_ssize_t max_str_len, Py_ssize_t max_bin_len,
+ Py_ssize_t max_array_len, Py_ssize_t max_map_len,
+ Py_ssize_t max_ext_len):
+ unpack_init(ctx)
+ ctx.user.use_list = use_list
+ ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
+ ctx.user.max_str_len = max_str_len
+ ctx.user.max_bin_len = max_bin_len
+ ctx.user.max_array_len = max_array_len
+ ctx.user.max_map_len = max_map_len
+ ctx.user.max_ext_len = max_ext_len
+
+ if object_hook is not None and object_pairs_hook is not None:
+ raise TypeError("object_pairs_hook and object_hook are mutually exclusive.")
+
+ if object_hook is not None:
+ if not PyCallable_Check(object_hook):
+ raise TypeError("object_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_hook
+
+ if object_pairs_hook is None:
+ ctx.user.has_pairs_hook = False
+ else:
+ if not PyCallable_Check(object_pairs_hook):
+ raise TypeError("object_pairs_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_pairs_hook
+ ctx.user.has_pairs_hook = True
+
+ if list_hook is not None:
+ if not PyCallable_Check(list_hook):
+ raise TypeError("list_hook must be a callable.")
+ ctx.user.list_hook = <PyObject*>list_hook
+
+ if ext_hook is not None:
+ if not PyCallable_Check(ext_hook):
+ raise TypeError("ext_hook must be a callable.")
+ ctx.user.ext_hook = <PyObject*>ext_hook
+
+ ctx.user.encoding = encoding
+ ctx.user.unicode_errors = unicode_errors
+
+def default_read_extended_type(typecode, data):
+ raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode)
+
+def unpackb(object packed, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None, ext_hook=ExtType,
+ Py_ssize_t max_str_len=2147483647, # 2**32-1
+ Py_ssize_t max_bin_len=2147483647,
+ Py_ssize_t max_array_len=2147483647,
+ Py_ssize_t max_map_len=2147483647,
+ Py_ssize_t max_ext_len=2147483647):
+ """
+ Unpack packed_bytes to object. Returns an unpacked object.
+
+ Raises `ValueError` when `packed` contains extra bytes.
+
+ See :class:`Unpacker` for options.
+ """
+ cdef unpack_context ctx
+ cdef size_t off = 0
+ cdef int ret
+
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ cdef char* cenc = NULL
+ cdef char* cerr = NULL
+
+ PyObject_AsReadBuffer(packed, <const void**>&buf, &buf_len)
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ encoding = encoding.encode('ascii')
+ cenc = PyBytes_AsString(encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ unicode_errors = unicode_errors.encode('ascii')
+ cerr = PyBytes_AsString(unicode_errors)
+
+ init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
+ use_list, cenc, cerr,
+ max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len)
+ ret = unpack_construct(&ctx, buf, buf_len, &off)
+ if ret == 1:
+ obj = unpack_data(&ctx)
+ if off < buf_len:
+ raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
+ return obj
+ else:
+ raise UnpackValueError("Unpack failed: error = %d" % (ret,))
+
+
+def unpack(object stream, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None,
+ ):
+ """
+ Unpack an object from `stream`.
+
+ Raises `ValueError` when `stream` has extra bytes.
+
+ See :class:`Unpacker` for options.
+ """
+ return unpackb(stream.read(), use_list=use_list,
+ object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
+ encoding=encoding, unicode_errors=unicode_errors,
+ )
+
+
+cdef class Unpacker(object):
+ """Streaming unpacker.
+
+ arguments:
+
+ :param file_like:
+ File-like object having `.read(n)` method.
+ If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
+
+ :param int read_size:
+ Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`)
+
+ :param bool use_list:
+ If true, unpack msgpack array to Python list.
+ Otherwise, unpack to Python tuple. (default: True)
+
+ :param callable object_hook:
+ When specified, it should be callable.
+ Unpacker calls it with a dict argument after unpacking msgpack map.
+ (See also simplejson)
+
+ :param callable object_pairs_hook:
+ When specified, it should be callable.
+ Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
+ (See also simplejson)
+
+ :param str encoding:
+ Encoding used for decoding msgpack raw.
+ If it is None (default), msgpack raw is deserialized to Python bytes.
+
+ :param str unicode_errors:
+ Used for decoding msgpack raw with *encoding*.
+ (default: `'strict'`)
+
+ :param int max_buffer_size:
+ Limits size of data waiting unpacked. 0 means system's INT_MAX (default).
+ Raises `BufferFull` exception when it is insufficient.
+ You shoud set this parameter when unpacking data from untrasted source.
+
+ :param int max_str_len:
+ Limits max length of str. (default: 2**31-1)
+
+ :param int max_bin_len:
+ Limits max length of bin. (default: 2**31-1)
+
+ :param int max_array_len:
+ Limits max length of array. (default: 2**31-1)
+
+ :param int max_map_len:
+ Limits max length of map. (default: 2**31-1)
+
+
+ example of streaming deserialize from file-like object::
+
+ unpacker = Unpacker(file_like)
+ for o in unpacker:
+ process(o)
+
+ example of streaming deserialize from socket::
+
+ unpacker = Unpacker()
+ while True:
+ buf = sock.recv(1024**2)
+ if not buf:
+ break
+ unpacker.feed(buf)
+ for o in unpacker:
+ process(o)
+ """
+ cdef unpack_context ctx
+ cdef char* buf
+ cdef size_t buf_size, buf_head, buf_tail
+ cdef object file_like
+ cdef object file_like_read
+ cdef Py_ssize_t read_size
+ # To maintain refcnt.
+ cdef object object_hook, object_pairs_hook, list_hook, ext_hook
+ cdef object encoding, unicode_errors
+ cdef size_t max_buffer_size
+
+ def __cinit__(self):
+ self.buf = NULL
+
+ def __dealloc__(self):
+ free(self.buf)
+ self.buf = NULL
+
+ def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
+ object object_hook=None, object object_pairs_hook=None, object list_hook=None,
+ encoding=None, unicode_errors='strict', int max_buffer_size=0,
+ object ext_hook=ExtType,
+ Py_ssize_t max_str_len=2147483647, # 2**32-1
+ Py_ssize_t max_bin_len=2147483647,
+ Py_ssize_t max_array_len=2147483647,
+ Py_ssize_t max_map_len=2147483647,
+ Py_ssize_t max_ext_len=2147483647):
+ cdef char *cenc=NULL,
+ cdef char *cerr=NULL
+
+ self.object_hook = object_hook
+ self.object_pairs_hook = object_pairs_hook
+ self.list_hook = list_hook
+ self.ext_hook = ext_hook
+
+ self.file_like = file_like
+ if file_like:
+ self.file_like_read = file_like.read
+ if not PyCallable_Check(self.file_like_read):
+ raise TypeError("`file_like.read` must be a callable.")
+ if not max_buffer_size:
+ max_buffer_size = INT_MAX
+ if read_size > max_buffer_size:
+ raise ValueError("read_size should be less or equal to max_buffer_size")
+ if not read_size:
+ read_size = min(max_buffer_size, 1024**2)
+ self.max_buffer_size = max_buffer_size
+ self.read_size = read_size
+ self.buf = <char*>malloc(read_size)
+ if self.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.buf_size = read_size
+ self.buf_head = 0
+ self.buf_tail = 0
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ self.encoding = encoding.encode('ascii')
+ elif isinstance(encoding, bytes):
+ self.encoding = encoding
+ else:
+ raise TypeError("encoding should be bytes or unicode")
+ cenc = PyBytes_AsString(self.encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ self.unicode_errors = unicode_errors.encode('ascii')
+ elif isinstance(unicode_errors, bytes):
+ self.unicode_errors = unicode_errors
+ else:
+ raise TypeError("unicode_errors should be bytes or unicode")
+ cerr = PyBytes_AsString(self.unicode_errors)
+
+ init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
+ ext_hook, use_list, cenc, cerr,
+ max_str_len, max_bin_len, max_array_len,
+ max_map_len, max_ext_len)
+
+ def feed(self, object next_bytes):
+ """Append `next_bytes` to internal buffer."""
+ cdef Py_buffer pybuff
+ if self.file_like is not None:
+ raise AssertionError(
+ "unpacker.feed() is not be able to use with `file_like`.")
+ PyObject_GetBuffer(next_bytes, &pybuff, PyBUF_SIMPLE)
+ try:
+ self.append_buffer(<char*>pybuff.buf, pybuff.len)
+ finally:
+ PyBuffer_Release(&pybuff)
+
+ cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+ cdef:
+ char* buf = self.buf
+ char* new_buf
+ size_t head = self.buf_head
+ size_t tail = self.buf_tail
+ size_t buf_size = self.buf_size
+ size_t new_size
+
+ if tail + _buf_len > buf_size:
+ if ((tail - head) + _buf_len) <= buf_size:
+ # move to front.
+ memmove(buf, buf + head, tail - head)
+ tail -= head
+ head = 0
+ else:
+ # expand buffer.
+ new_size = (tail-head) + _buf_len
+ if new_size > self.max_buffer_size:
+ raise BufferFull
+ new_size = min(new_size*2, self.max_buffer_size)
+ new_buf = <char*>malloc(new_size)
+ if new_buf == NULL:
+ # self.buf still holds old buffer and will be freed during
+ # obj destruction
+ raise MemoryError("Unable to enlarge internal buffer.")
+ memcpy(new_buf, buf + head, tail - head)
+ free(buf)
+
+ buf = new_buf
+ buf_size = new_size
+ tail -= head
+ head = 0
+
+ memcpy(buf + tail, <char*>(_buf), _buf_len)
+ self.buf = buf
+ self.buf_head = head
+ self.buf_size = buf_size
+ self.buf_tail = tail + _buf_len
+
+ cdef read_from_file(self):
+ next_bytes = self.file_like_read(
+ min(self.read_size,
+ self.max_buffer_size - (self.buf_tail - self.buf_head)
+ ))
+ if next_bytes:
+ self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
+ else:
+ self.file_like = None
+
+ cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
+ cdef int ret
+ cdef object obj
+ cdef size_t prev_head
+
+ if self.buf_head >= self.buf_tail and self.file_like is not None:
+ self.read_from_file()
+
+ while 1:
+ prev_head = self.buf_head
+ if prev_head >= self.buf_tail:
+ if iter:
+ raise StopIteration("No more data to unpack.")
+ else:
+ raise OutOfData("No more data to unpack.")
+
+ ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ if write_bytes is not None:
+ write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
+
+ if ret == 1:
+ obj = unpack_data(&self.ctx)
+ unpack_init(&self.ctx)
+ return obj
+ elif ret == 0:
+ if self.file_like is not None:
+ self.read_from_file()
+ continue
+ if iter:
+ raise StopIteration("No more data to unpack.")
+ else:
+ raise OutOfData("No more data to unpack.")
+ else:
+ raise ValueError("Unpack failed: error = %d" % (ret,))
+
+ def read_bytes(self, Py_ssize_t nbytes):
+ """Read a specified number of raw bytes from the stream"""
+ cdef size_t nread
+ nread = min(self.buf_tail - self.buf_head, nbytes)
+ ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
+ self.buf_head += nread
+ if len(ret) < nbytes and self.file_like is not None:
+ ret += self.file_like.read(nbytes - len(ret))
+ return ret
+
+ def unpack(self, object write_bytes=None):
+ """Unpack one object
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(unpack_construct, write_bytes)
+
+ def skip(self, object write_bytes=None):
+ """Read and ignore one object, returning None
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(unpack_skip, write_bytes)
+
+ def read_array_header(self, object write_bytes=None):
+ """assuming the next object is an array, return its size n, such that
+ the next n unpack() calls will iterate over its contents.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_array_header, write_bytes)
+
+ def read_map_header(self, object write_bytes=None):
+ """assuming the next object is a map, return its size n, such that the
+ next n * 2 unpack() calls will iterate over its key-value pairs.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_map_header, write_bytes)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._unpack(unpack_construct, None, 1)
+
+ # for debug.
+ #def _buf(self):
+ # return PyString_FromStringAndSize(self.buf, self.buf_tail)
+
+ #def _off(self):
+ # return self.buf_head
diff --git a/pandas/msgpack/_version.py b/pandas/msgpack/_version.py
new file mode 100644
index 0000000000000..2c1c96c0759a1
--- /dev/null
+++ b/pandas/msgpack/_version.py
@@ -0,0 +1 @@
+version = (0, 4, 6)
diff --git a/pandas/msgpack/exceptions.py b/pandas/msgpack/exceptions.py
new file mode 100644
index 0000000000000..f7678f135bd26
--- /dev/null
+++ b/pandas/msgpack/exceptions.py
@@ -0,0 +1,29 @@
+class UnpackException(Exception):
+ pass
+
+
+class BufferFull(UnpackException):
+ pass
+
+
+class OutOfData(UnpackException):
+ pass
+
+
+class UnpackValueError(UnpackException, ValueError):
+ pass
+
+
+class ExtraData(ValueError):
+ def __init__(self, unpacked, extra):
+ self.unpacked = unpacked
+ self.extra = extra
+
+ def __str__(self):
+ return "unpack(b) received extra data."
+
+class PackException(Exception):
+ pass
+
+class PackValueError(PackException, ValueError):
+ pass
diff --git a/pandas/src/msgpack/pack.h b/pandas/src/msgpack/pack.h
index e4c315c1161b1..02379c9188424 100644
--- a/pandas/src/msgpack/pack.h
+++ b/pandas/src/msgpack/pack.h
@@ -34,18 +34,18 @@ typedef struct msgpack_packer {
char *buf;
size_t length;
size_t buf_size;
+ bool use_bin_type;
} msgpack_packer;
typedef struct Packer Packer;
-static inline int msgpack_pack_short(msgpack_packer* pk, short d);
static inline int msgpack_pack_int(msgpack_packer* pk, int d);
static inline int msgpack_pack_long(msgpack_packer* pk, long d);
static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d);
static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d);
static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d);
static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d);
-static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
+//static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d);
static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d);
@@ -68,8 +68,11 @@ static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n);
static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n);
static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l);
+static inline int msgpack_pack_bin(msgpack_packer* pk, size_t l);
static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l);
+static inline int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l);
+
static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l)
{
char* buf = pk->buf;
@@ -90,14 +93,6 @@ static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_
return 0;
}
-#define msgpack_pack_inline_func(name) \
- static inline int msgpack_pack ## name
-
-#define msgpack_pack_inline_func_cint(name) \
- static inline int msgpack_pack ## name
-
-#define msgpack_pack_user msgpack_packer*
-
#define msgpack_pack_append_buffer(user, buf, len) \
return msgpack_pack_write(user, (const char*)buf, len)
diff --git a/pandas/src/msgpack/pack_template.h b/pandas/src/msgpack/pack_template.h
index 65c959dd8ce63..5d1088f4b7d78 100644
--- a/pandas/src/msgpack/pack_template.h
+++ b/pandas/src/msgpack/pack_template.h
@@ -28,14 +28,6 @@
#define TAKE8_64(d) ((uint8_t*)&d)[7]
#endif
-#ifndef msgpack_pack_inline_func
-#error msgpack_pack_inline_func template is not defined
-#endif
-
-#ifndef msgpack_pack_user
-#error msgpack_pack_user type is not defined
-#endif
-
#ifndef msgpack_pack_append_buffer
#error msgpack_pack_append_buffer callback is not defined
#endif
@@ -47,584 +39,524 @@
#define msgpack_pack_real_uint8(x, d) \
do { \
- if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
- } else { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
} while(0)
#define msgpack_pack_real_uint16(x, d) \
do { \
- if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
- } else if(d < (1<<8)) { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } else { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
} while(0)
#define msgpack_pack_real_uint32(x, d) \
do { \
- if(d < (1<<8)) { \
- if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
- } else { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
- } else { \
- if(d < (1<<16)) { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else { \
- /* unsigned 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } \
- } \
+ if(d < (1<<8)) { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
} while(0)
#define msgpack_pack_real_uint64(x, d) \
do { \
- if(d < (1ULL<<8)) { \
- if(d < (1ULL<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
- } else { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
- } else { \
- if(d < (1ULL<<16)) { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else if(d < (1ULL<<32)) { \
- /* unsigned 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } else { \
- /* unsigned 64 */ \
- unsigned char buf[9]; \
- buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
- msgpack_pack_append_buffer(x, buf, 9); \
- } \
- } \
+ if(d < (1ULL<<8)) { \
+ if(d < (1ULL<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1ULL<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else if(d < (1ULL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
} while(0)
#define msgpack_pack_real_int8(x, d) \
do { \
- if(d < -(1<<5)) { \
- /* signed 8 */ \
- unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } else { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
- } \
+ if(d < -(1<<5)) { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } \
} while(0)
#define msgpack_pack_real_int16(x, d) \
do { \
- if(d < -(1<<5)) { \
- if(d < -(1<<7)) { \
- /* signed 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else { \
- /* signed 8 */ \
- unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
- } else if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
- } else { \
- if(d < (1<<8)) { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } else { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } \
- } \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } \
} while(0)
#define msgpack_pack_real_int32(x, d) \
do { \
- if(d < -(1<<5)) { \
- if(d < -(1<<15)) { \
- /* signed 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } else if(d < -(1<<7)) { \
- /* signed 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else { \
- /* signed 8 */ \
- unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
- } else if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
- } else { \
- if(d < (1<<8)) { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } else if(d < (1<<16)) { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else { \
- /* unsigned 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } \
- } \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<15)) { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
} while(0)
#define msgpack_pack_real_int64(x, d) \
do { \
- if(d < -(1LL<<5)) { \
- if(d < -(1LL<<15)) { \
- if(d < -(1LL<<31)) { \
- /* signed 64 */ \
- unsigned char buf[9]; \
- buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
- msgpack_pack_append_buffer(x, buf, 9); \
- } else { \
- /* signed 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } \
- } else { \
- if(d < -(1<<7)) { \
- /* signed 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } else { \
- /* signed 8 */ \
- unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } \
- } \
- } else if(d < (1<<7)) { \
- /* fixnum */ \
- msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
- } else { \
- if(d < (1LL<<16)) { \
- if(d < (1<<8)) { \
- /* unsigned 8 */ \
- unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
- msgpack_pack_append_buffer(x, buf, 2); \
- } else { \
- /* unsigned 16 */ \
- unsigned char buf[3]; \
- buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
- msgpack_pack_append_buffer(x, buf, 3); \
- } \
- } else { \
- if(d < (1LL<<32)) { \
- /* unsigned 32 */ \
- unsigned char buf[5]; \
- buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
- msgpack_pack_append_buffer(x, buf, 5); \
- } else { \
- /* unsigned 64 */ \
- unsigned char buf[9]; \
- buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
- msgpack_pack_append_buffer(x, buf, 9); \
- } \
- } \
- } \
+ if(d < -(1LL<<5)) { \
+ if(d < -(1LL<<15)) { \
+ if(d < -(1LL<<31)) { \
+ /* signed 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } else { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } else { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ if(d < (1LL<<16)) { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } else { \
+ if(d < (1LL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
+ } \
} while(0)
-#ifdef msgpack_pack_inline_func_fixint
-
-msgpack_pack_inline_func_fixint(_uint8)(msgpack_pack_user x, uint8_t d)
-{
- unsigned char buf[2] = {0xcc, TAKE8_8(d)};
- msgpack_pack_append_buffer(x, buf, 2);
-}
-
-msgpack_pack_inline_func_fixint(_uint16)(msgpack_pack_user x, uint16_t d)
-{
- unsigned char buf[3];
- buf[0] = 0xcd; _msgpack_store16(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 3);
-}
-
-msgpack_pack_inline_func_fixint(_uint32)(msgpack_pack_user x, uint32_t d)
-{
- unsigned char buf[5];
- buf[0] = 0xce; _msgpack_store32(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 5);
-}
-
-msgpack_pack_inline_func_fixint(_uint64)(msgpack_pack_user x, uint64_t d)
-{
- unsigned char buf[9];
- buf[0] = 0xcf; _msgpack_store64(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 9);
-}
-
-msgpack_pack_inline_func_fixint(_int8)(msgpack_pack_user x, int8_t d)
+static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d)
{
- unsigned char buf[2] = {0xd0, TAKE8_8(d)};
- msgpack_pack_append_buffer(x, buf, 2);
+ msgpack_pack_real_uint8(x, d);
}
-msgpack_pack_inline_func_fixint(_int16)(msgpack_pack_user x, int16_t d)
+static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d)
{
- unsigned char buf[3];
- buf[0] = 0xd1; _msgpack_store16(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 3);
+ msgpack_pack_real_uint16(x, d);
}
-msgpack_pack_inline_func_fixint(_int32)(msgpack_pack_user x, int32_t d)
+static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d)
{
- unsigned char buf[5];
- buf[0] = 0xd2; _msgpack_store32(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 5);
+ msgpack_pack_real_uint32(x, d);
}
-msgpack_pack_inline_func_fixint(_int64)(msgpack_pack_user x, int64_t d)
+static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d)
{
- unsigned char buf[9];
- buf[0] = 0xd3; _msgpack_store64(&buf[1], d);
- msgpack_pack_append_buffer(x, buf, 9);
+ msgpack_pack_real_uint64(x, d);
}
-#undef msgpack_pack_inline_func_fixint
-#endif
-
-
-msgpack_pack_inline_func(_uint8)(msgpack_pack_user x, uint8_t d)
+static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d)
{
- msgpack_pack_real_uint8(x, d);
+ msgpack_pack_real_int8(x, d);
}
-msgpack_pack_inline_func(_uint16)(msgpack_pack_user x, uint16_t d)
+static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d)
{
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_int16(x, d);
}
-msgpack_pack_inline_func(_uint32)(msgpack_pack_user x, uint32_t d)
+static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d)
{
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_int32(x, d);
}
-msgpack_pack_inline_func(_uint64)(msgpack_pack_user x, uint64_t d)
+static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d)
{
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_int64(x, d);
}
-msgpack_pack_inline_func(_int8)(msgpack_pack_user x, int8_t d)
-{
- msgpack_pack_real_int8(x, d);
-}
-msgpack_pack_inline_func(_int16)(msgpack_pack_user x, int16_t d)
-{
- msgpack_pack_real_int16(x, d);
-}
+//#ifdef msgpack_pack_inline_func_cint
-msgpack_pack_inline_func(_int32)(msgpack_pack_user x, int32_t d)
-{
- msgpack_pack_real_int32(x, d);
-}
-
-msgpack_pack_inline_func(_int64)(msgpack_pack_user x, int64_t d)
-{
- msgpack_pack_real_int64(x, d);
-}
-
-
-#ifdef msgpack_pack_inline_func_cint
-
-msgpack_pack_inline_func_cint(_short)(msgpack_pack_user x, short d)
+static inline int msgpack_pack_short(msgpack_packer* x, short d)
{
#if defined(SIZEOF_SHORT)
#if SIZEOF_SHORT == 2
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif SIZEOF_SHORT == 4
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#elif defined(SHRT_MAX)
#if SHRT_MAX == 0x7fff
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif SHRT_MAX == 0x7fffffff
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#else
if(sizeof(short) == 2) {
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
} else if(sizeof(short) == 4) {
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
} else {
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_int)(msgpack_pack_user x, int d)
+static inline int msgpack_pack_int(msgpack_packer* x, int d)
{
#if defined(SIZEOF_INT)
#if SIZEOF_INT == 2
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif SIZEOF_INT == 4
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#elif defined(INT_MAX)
#if INT_MAX == 0x7fff
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif INT_MAX == 0x7fffffff
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#else
if(sizeof(int) == 2) {
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
} else if(sizeof(int) == 4) {
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
} else {
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_long)(msgpack_pack_user x, long d)
+static inline int msgpack_pack_long(msgpack_packer* x, long d)
{
#if defined(SIZEOF_LONG)
#if SIZEOF_LONG == 2
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif SIZEOF_LONG == 4
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#elif defined(LONG_MAX)
#if LONG_MAX == 0x7fffL
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif LONG_MAX == 0x7fffffffL
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#else
if(sizeof(long) == 2) {
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
} else if(sizeof(long) == 4) {
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
} else {
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_long_long)(msgpack_pack_user x, long long d)
+static inline int msgpack_pack_long_long(msgpack_packer* x, long long d)
{
#if defined(SIZEOF_LONG_LONG)
#if SIZEOF_LONG_LONG == 2
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif SIZEOF_LONG_LONG == 4
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#elif defined(LLONG_MAX)
#if LLONG_MAX == 0x7fffL
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
#elif LLONG_MAX == 0x7fffffffL
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
#else
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
#endif
#else
if(sizeof(long long) == 2) {
- msgpack_pack_real_int16(x, d);
+ msgpack_pack_real_int16(x, d);
} else if(sizeof(long long) == 4) {
- msgpack_pack_real_int32(x, d);
+ msgpack_pack_real_int32(x, d);
} else {
- msgpack_pack_real_int64(x, d);
+ msgpack_pack_real_int64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_unsigned_short)(msgpack_pack_user x, unsigned short d)
+static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d)
{
#if defined(SIZEOF_SHORT)
#if SIZEOF_SHORT == 2
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif SIZEOF_SHORT == 4
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#elif defined(USHRT_MAX)
#if USHRT_MAX == 0xffffU
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif USHRT_MAX == 0xffffffffU
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#else
if(sizeof(unsigned short) == 2) {
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
} else if(sizeof(unsigned short) == 4) {
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
} else {
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_unsigned_int)(msgpack_pack_user x, unsigned int d)
+static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d)
{
#if defined(SIZEOF_INT)
#if SIZEOF_INT == 2
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif SIZEOF_INT == 4
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#elif defined(UINT_MAX)
#if UINT_MAX == 0xffffU
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif UINT_MAX == 0xffffffffU
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#else
if(sizeof(unsigned int) == 2) {
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
} else if(sizeof(unsigned int) == 4) {
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
} else {
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_unsigned_long)(msgpack_pack_user x, unsigned long d)
+static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d)
{
#if defined(SIZEOF_LONG)
#if SIZEOF_LONG == 2
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif SIZEOF_LONG == 4
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#elif defined(ULONG_MAX)
#if ULONG_MAX == 0xffffUL
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif ULONG_MAX == 0xffffffffUL
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#else
if(sizeof(unsigned long) == 2) {
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
} else if(sizeof(unsigned long) == 4) {
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
} else {
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
}
#endif
}
-msgpack_pack_inline_func_cint(_unsigned_long_long)(msgpack_pack_user x, unsigned long long d)
+static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d)
{
#if defined(SIZEOF_LONG_LONG)
#if SIZEOF_LONG_LONG == 2
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif SIZEOF_LONG_LONG == 4
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#elif defined(ULLONG_MAX)
#if ULLONG_MAX == 0xffffUL
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
#elif ULLONG_MAX == 0xffffffffUL
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
#else
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
#endif
#else
if(sizeof(unsigned long long) == 2) {
- msgpack_pack_real_uint16(x, d);
+ msgpack_pack_real_uint16(x, d);
} else if(sizeof(unsigned long long) == 4) {
- msgpack_pack_real_uint32(x, d);
+ msgpack_pack_real_uint32(x, d);
} else {
- msgpack_pack_real_uint64(x, d);
+ msgpack_pack_real_uint64(x, d);
}
#endif
}
-#undef msgpack_pack_inline_func_cint
-#endif
+//#undef msgpack_pack_inline_func_cint
+//#endif
@@ -632,27 +564,27 @@ if(sizeof(unsigned long long) == 2) {
* Float
*/
-msgpack_pack_inline_func(_float)(msgpack_pack_user x, float d)
+static inline int msgpack_pack_float(msgpack_packer* x, float d)
{
- union { float f; uint32_t i; } mem;
- mem.f = d;
- unsigned char buf[5];
- buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
- msgpack_pack_append_buffer(x, buf, 5);
+ union { float f; uint32_t i; } mem;
+ mem.f = d;
+ unsigned char buf[5];
+ buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
+ msgpack_pack_append_buffer(x, buf, 5);
}
-msgpack_pack_inline_func(_double)(msgpack_pack_user x, double d)
+static inline int msgpack_pack_double(msgpack_packer* x, double d)
{
- union { double f; uint64_t i; } mem;
- mem.f = d;
- unsigned char buf[9];
- buf[0] = 0xcb;
+ union { double f; uint64_t i; } mem;
+ mem.f = d;
+ unsigned char buf[9];
+ buf[0] = 0xcb;
#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
// https://github.com/msgpack/msgpack-perl/pull/1
mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
#endif
_msgpack_store64(&buf[1], mem.i);
- msgpack_pack_append_buffer(x, buf, 9);
+ msgpack_pack_append_buffer(x, buf, 9);
}
@@ -660,10 +592,10 @@ msgpack_pack_inline_func(_double)(msgpack_pack_user x, double d)
* Nil
*/
-msgpack_pack_inline_func(_nil)(msgpack_pack_user x)
+static inline int msgpack_pack_nil(msgpack_packer* x)
{
- static const unsigned char d = 0xc0;
- msgpack_pack_append_buffer(x, &d, 1);
+ static const unsigned char d = 0xc0;
+ msgpack_pack_append_buffer(x, &d, 1);
}
@@ -671,16 +603,16 @@ msgpack_pack_inline_func(_nil)(msgpack_pack_user x)
* Boolean
*/
-msgpack_pack_inline_func(_true)(msgpack_pack_user x)
+static inline int msgpack_pack_true(msgpack_packer* x)
{
- static const unsigned char d = 0xc3;
- msgpack_pack_append_buffer(x, &d, 1);
+ static const unsigned char d = 0xc3;
+ msgpack_pack_append_buffer(x, &d, 1);
}
-msgpack_pack_inline_func(_false)(msgpack_pack_user x)
+static inline int msgpack_pack_false(msgpack_packer* x)
{
- static const unsigned char d = 0xc2;
- msgpack_pack_append_buffer(x, &d, 1);
+ static const unsigned char d = 0xc2;
+ msgpack_pack_append_buffer(x, &d, 1);
}
@@ -688,20 +620,20 @@ msgpack_pack_inline_func(_false)(msgpack_pack_user x)
* Array
*/
-msgpack_pack_inline_func(_array)(msgpack_pack_user x, unsigned int n)
+static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n)
{
- if(n < 16) {
- unsigned char d = 0x90 | n;
- msgpack_pack_append_buffer(x, &d, 1);
- } else if(n < 65536) {
- unsigned char buf[3];
- buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
- msgpack_pack_append_buffer(x, buf, 3);
- } else {
- unsigned char buf[5];
- buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
- msgpack_pack_append_buffer(x, buf, 5);
- }
+ if(n < 16) {
+ unsigned char d = 0x90 | n;
+ msgpack_pack_append_buffer(x, &d, 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
}
@@ -709,20 +641,20 @@ msgpack_pack_inline_func(_array)(msgpack_pack_user x, unsigned int n)
* Map
*/
-msgpack_pack_inline_func(_map)(msgpack_pack_user x, unsigned int n)
+static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n)
{
- if(n < 16) {
- unsigned char d = 0x80 | n;
- msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
- } else if(n < 65536) {
- unsigned char buf[3];
- buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
- msgpack_pack_append_buffer(x, buf, 3);
- } else {
- unsigned char buf[5];
- buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
- msgpack_pack_append_buffer(x, buf, 5);
- }
+ if(n < 16) {
+ unsigned char d = 0x80 | n;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
}
@@ -730,29 +662,112 @@ msgpack_pack_inline_func(_map)(msgpack_pack_user x, unsigned int n)
* Raw
*/
-msgpack_pack_inline_func(_raw)(msgpack_pack_user x, size_t l)
+static inline int msgpack_pack_raw(msgpack_packer* x, size_t l)
+{
+ if (l < 32) {
+ unsigned char d = 0xa0 | (uint8_t)l;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if (x->use_bin_type && l < 256) { // str8 is new format introduced with bin.
+ unsigned char buf[2] = {0xd9, (uint8_t)l};
+ msgpack_pack_append_buffer(x, buf, 2);
+ } else if (l < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+/*
+ * bin
+ */
+static inline int msgpack_pack_bin(msgpack_packer *x, size_t l)
{
- if(l < 32) {
- unsigned char d = 0xa0 | (uint8_t)l;
- msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
- } else if(l < 65536) {
- unsigned char buf[3];
- buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
- msgpack_pack_append_buffer(x, buf, 3);
- } else {
- unsigned char buf[5];
- buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
- msgpack_pack_append_buffer(x, buf, 5);
- }
-}
-
-msgpack_pack_inline_func(_raw_body)(msgpack_pack_user x, const void* b, size_t l)
+ if (!x->use_bin_type) {
+ return msgpack_pack_raw(x, l);
+ }
+ if (l < 256) {
+ unsigned char buf[2] = {0xc4, (unsigned char)l};
+ msgpack_pack_append_buffer(x, buf, 2);
+ } else if (l < 65536) {
+ unsigned char buf[3] = {0xc5};
+ _msgpack_store16(&buf[1], (uint16_t)l);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5] = {0xc6};
+ _msgpack_store32(&buf[1], (uint32_t)l);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l)
{
- msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
+ if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
+ return 0;
}
-#undef msgpack_pack_inline_func
-#undef msgpack_pack_user
+/*
+ * Ext
+ */
+static inline int msgpack_pack_ext(msgpack_packer* x, char typecode, size_t l)
+{
+ if (l == 1) {
+ unsigned char buf[2];
+ buf[0] = 0xd4;
+ buf[1] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 2);
+ }
+ else if(l == 2) {
+ unsigned char buf[2];
+ buf[0] = 0xd5;
+ buf[1] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 2);
+ }
+ else if(l == 4) {
+ unsigned char buf[2];
+ buf[0] = 0xd6;
+ buf[1] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 2);
+ }
+ else if(l == 8) {
+ unsigned char buf[2];
+ buf[0] = 0xd7;
+ buf[1] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 2);
+ }
+ else if(l == 16) {
+ unsigned char buf[2];
+ buf[0] = 0xd8;
+ buf[1] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 2);
+ }
+ else if(l < 256) {
+ unsigned char buf[3];
+ buf[0] = 0xc7;
+ buf[1] = l;
+ buf[2] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else if(l < 65536) {
+ unsigned char buf[4];
+ buf[0] = 0xc8;
+ _msgpack_store16(&buf[1], (uint16_t)l);
+ buf[3] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 4);
+ } else {
+ unsigned char buf[6];
+ buf[0] = 0xc9;
+ _msgpack_store32(&buf[1], (uint32_t)l);
+ buf[5] = (unsigned char)typecode;
+ msgpack_pack_append_buffer(x, buf, 6);
+ }
+
+}
+
+
+
#undef msgpack_pack_append_buffer
#undef TAKE8_8
@@ -768,4 +783,3 @@ msgpack_pack_inline_func(_raw_body)(msgpack_pack_user x, const void* b, size_t l
#undef msgpack_pack_real_int16
#undef msgpack_pack_real_int32
#undef msgpack_pack_real_int64
-
diff --git a/pandas/src/msgpack/sysdep.h b/pandas/src/msgpack/sysdep.h
index 4fedbd8ba472f..ed9c1bc0b8031 100644
--- a/pandas/src/msgpack/sysdep.h
+++ b/pandas/src/msgpack/sysdep.h
@@ -192,4 +192,3 @@ typedef unsigned int _msgpack_atomic_counter_t;
#endif /* msgpack/sysdep.h */
-
diff --git a/pandas/src/msgpack/unpack.h b/pandas/src/msgpack/unpack.h
index 3dc88e5fbded0..5deb7cde0b929 100644
--- a/pandas/src/msgpack/unpack.h
+++ b/pandas/src/msgpack/unpack.h
@@ -24,35 +24,23 @@ typedef struct unpack_user {
PyObject *object_hook;
bool has_pairs_hook;
PyObject *list_hook;
+ PyObject *ext_hook;
const char *encoding;
const char *unicode_errors;
+ Py_ssize_t max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len;
} unpack_user;
+typedef PyObject* msgpack_unpack_object;
+struct unpack_context;
+typedef struct unpack_context unpack_context;
+typedef int (*execute_fn)(unpack_context *ctx, const char* data, size_t len, size_t* off);
-#define msgpack_unpack_struct(name) \
- struct template ## name
-
-#define msgpack_unpack_func(ret, name) \
- static inline ret template ## name
-
-#define msgpack_unpack_callback(name) \
- template_callback ## name
-
-#define msgpack_unpack_object PyObject*
-
-#define msgpack_unpack_user unpack_user
-
-typedef int (*execute_fn)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off);
-
-struct template_context;
-typedef struct template_context template_context;
-
-static inline msgpack_unpack_object template_callback_root(unpack_user* u)
+static inline msgpack_unpack_object unpack_callback_root(unpack_user* u)
{
return NULL;
}
-static inline int template_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
{
PyObject *p = PyInt_FromLong((long)d);
if (!p)
@@ -60,36 +48,36 @@ static inline int template_callback_uint16(unpack_user* u, uint16_t d, msgpack_u
*o = p;
return 0;
}
-static inline int template_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
{
- return template_callback_uint16(u, d, o);
+ return unpack_callback_uint16(u, d, o);
}
-static inline int template_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
{
- PyObject *p;
- if (d > LONG_MAX) {
- p = PyLong_FromUnsignedLong((unsigned long)d);
- } else {
- p = PyInt_FromLong((long)d);
- }
+ PyObject *p = PyInt_FromSize_t((size_t)d);
if (!p)
return -1;
*o = p;
return 0;
}
-static inline int template_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
{
- PyObject *p = PyLong_FromUnsignedLongLong(d);
+ PyObject *p;
+ if (d > LONG_MAX) {
+ p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d);
+ } else {
+ p = PyInt_FromSize_t((size_t)d);
+ }
if (!p)
return -1;
*o = p;
return 0;
}
-static inline int template_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
{
PyObject *p = PyInt_FromLong(d);
if (!p)
@@ -98,26 +86,29 @@ static inline int template_callback_int32(unpack_user* u, int32_t d, msgpack_unp
return 0;
}
-static inline int template_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
{
- return template_callback_int32(u, d, o);
+ return unpack_callback_int32(u, d, o);
}
-static inline int template_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
{
- return template_callback_int32(u, d, o);
+ return unpack_callback_int32(u, d, o);
}
-static inline int template_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
+static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
{
- PyObject *p = PyLong_FromLongLong(d);
- if (!p)
- return -1;
+ PyObject *p;
+ if (d > LONG_MAX || d < LONG_MIN) {
+ p = PyLong_FromLongLong((unsigned PY_LONG_LONG)d);
+ } else {
+ p = PyInt_FromLong((long)d);
+ }
*o = p;
return 0;
}
-static inline int template_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
+static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
{
PyObject *p = PyFloat_FromDouble(d);
if (!p)
@@ -126,22 +117,26 @@ static inline int template_callback_double(unpack_user* u, double d, msgpack_unp
return 0;
}
-static inline int template_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
+static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
{
- return template_callback_double(u, d, o);
+ return unpack_callback_double(u, d, o);
}
-static inline int template_callback_nil(unpack_user* u, msgpack_unpack_object* o)
+static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o)
{ Py_INCREF(Py_None); *o = Py_None; return 0; }
-static inline int template_callback_true(unpack_user* u, msgpack_unpack_object* o)
+static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o)
{ Py_INCREF(Py_True); *o = Py_True; return 0; }
-static inline int template_callback_false(unpack_user* u, msgpack_unpack_object* o)
+static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o)
{ Py_INCREF(Py_False); *o = Py_False; return 0; }
-static inline int template_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
{
+ if (n > u->max_array_len) {
+ PyErr_Format(PyExc_ValueError, "%u exceeds max_array_len(%zd)", n, u->max_array_len);
+ return -1;
+ }
PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n);
if (!p)
@@ -150,7 +145,7 @@ static inline int template_callback_array(unpack_user* u, unsigned int n, msgpac
return 0;
}
-static inline int template_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
+static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
{
if (u->use_list)
PyList_SET_ITEM(*c, current, o);
@@ -159,10 +154,10 @@ static inline int template_callback_array_item(unpack_user* u, unsigned int curr
return 0;
}
-static inline int template_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
+static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
{
if (u->list_hook) {
- PyObject *new_c = PyEval_CallFunction(u->list_hook, "(O)", *c);
+ PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL);
if (!new_c)
return -1;
Py_DECREF(*c);
@@ -171,8 +166,12 @@ static inline int template_callback_array_end(unpack_user* u, msgpack_unpack_obj
return 0;
}
-static inline int template_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
{
+ if (n > u->max_map_len) {
+ PyErr_Format(PyExc_ValueError, "%u exceeds max_map_len(%zd)", n, u->max_map_len);
+ return -1;
+ }
PyObject *p;
if (u->has_pairs_hook) {
p = PyList_New(n); // Or use tuple?
@@ -186,7 +185,7 @@ static inline int template_callback_map(unpack_user* u, unsigned int n, msgpack_
return 0;
}
-static inline int template_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
+static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
{
if (u->has_pairs_hook) {
msgpack_unpack_object item = PyTuple_Pack(2, k, v);
@@ -205,10 +204,10 @@ static inline int template_callback_map_item(unpack_user* u, unsigned int curren
return -1;
}
-static inline int template_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
+static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
{
if (u->object_hook) {
- PyObject *new_c = PyEval_CallFunction(u->object_hook, "(O)", *c);
+ PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL);
if (!new_c)
return -1;
@@ -218,8 +217,13 @@ static inline int template_callback_map_end(unpack_user* u, msgpack_unpack_objec
return 0;
}
-static inline int template_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
{
+ if (l > u->max_str_len) {
+ PyErr_Format(PyExc_ValueError, "%u exceeds max_str_len(%zd)", l, u->max_str_len);
+ return -1;
+ }
+
PyObject *py;
if(u->encoding) {
py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
@@ -232,4 +236,43 @@ static inline int template_callback_raw(unpack_user* u, const char* b, const cha
return 0;
}
+static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+{
+ if (l > u->max_bin_len) {
+ PyErr_Format(PyExc_ValueError, "%u exceeds max_bin_len(%zd)", l, u->max_bin_len);
+ return -1;
+ }
+
+ PyObject *py = PyBytes_FromStringAndSize(p, l);
+ if (!py)
+ return -1;
+ *o = py;
+ return 0;
+}
+
+static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos,
+ unsigned int length, msgpack_unpack_object* o)
+{
+ PyObject *py;
+ int8_t typecode = (int8_t)*pos++;
+ if (!u->ext_hook) {
+ PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL");
+ return -1;
+ }
+ if (length-1 > u->max_ext_len) {
+ PyErr_Format(PyExc_ValueError, "%u exceeds max_ext_len(%zd)", length, u->max_ext_len);
+ return -1;
+ }
+ // length also includes the typecode, so the actual data is length-1
+#if PY_MAJOR_VERSION == 2
+ py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, length-1);
+#else
+ py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, length-1);
+#endif
+ if (!py)
+ return -1;
+ *o = py;
+ return 0;
+}
+
#include "unpack_template.h"
diff --git a/pandas/src/msgpack/unpack_define.h b/pandas/src/msgpack/unpack_define.h
index 959d3519e7b5c..0dd708d17c3d4 100644
--- a/pandas/src/msgpack/unpack_define.h
+++ b/pandas/src/msgpack/unpack_define.h
@@ -34,54 +34,57 @@ extern "C" {
#endif
+// CS is first byte & 0x1f
typedef enum {
- CS_HEADER = 0x00, // nil
-
- //CS_ = 0x01,
- //CS_ = 0x02, // false
- //CS_ = 0x03, // true
-
- //CS_ = 0x04,
- //CS_ = 0x05,
- //CS_ = 0x06,
- //CS_ = 0x07,
-
- //CS_ = 0x08,
- //CS_ = 0x09,
- CS_FLOAT = 0x0a,
- CS_DOUBLE = 0x0b,
- CS_UINT_8 = 0x0c,
- CS_UINT_16 = 0x0d,
- CS_UINT_32 = 0x0e,
- CS_UINT_64 = 0x0f,
- CS_INT_8 = 0x10,
- CS_INT_16 = 0x11,
- CS_INT_32 = 0x12,
- CS_INT_64 = 0x13,
-
- //CS_ = 0x14,
- //CS_ = 0x15,
- //CS_BIG_INT_16 = 0x16,
- //CS_BIG_INT_32 = 0x17,
- //CS_BIG_FLOAT_16 = 0x18,
- //CS_BIG_FLOAT_32 = 0x19,
- CS_RAW_16 = 0x1a,
- CS_RAW_32 = 0x1b,
- CS_ARRAY_16 = 0x1c,
- CS_ARRAY_32 = 0x1d,
- CS_MAP_16 = 0x1e,
- CS_MAP_32 = 0x1f,
-
- //ACS_BIG_INT_VALUE,
- //ACS_BIG_FLOAT_VALUE,
- ACS_RAW_VALUE,
+ CS_HEADER = 0x00, // nil
+
+ //CS_ = 0x01,
+ //CS_ = 0x02, // false
+ //CS_ = 0x03, // true
+
+ CS_BIN_8 = 0x04,
+ CS_BIN_16 = 0x05,
+ CS_BIN_32 = 0x06,
+
+ CS_EXT_8 = 0x07,
+ CS_EXT_16 = 0x08,
+ CS_EXT_32 = 0x09,
+
+ CS_FLOAT = 0x0a,
+ CS_DOUBLE = 0x0b,
+ CS_UINT_8 = 0x0c,
+ CS_UINT_16 = 0x0d,
+ CS_UINT_32 = 0x0e,
+ CS_UINT_64 = 0x0f,
+ CS_INT_8 = 0x10,
+ CS_INT_16 = 0x11,
+ CS_INT_32 = 0x12,
+ CS_INT_64 = 0x13,
+
+ //CS_FIXEXT1 = 0x14,
+ //CS_FIXEXT2 = 0x15,
+ //CS_FIXEXT4 = 0x16,
+ //CS_FIXEXT8 = 0x17,
+ //CS_FIXEXT16 = 0x18,
+
+ CS_RAW_8 = 0x19,
+ CS_RAW_16 = 0x1a,
+ CS_RAW_32 = 0x1b,
+ CS_ARRAY_16 = 0x1c,
+ CS_ARRAY_32 = 0x1d,
+ CS_MAP_16 = 0x1e,
+ CS_MAP_32 = 0x1f,
+
+ ACS_RAW_VALUE,
+ ACS_BIN_VALUE,
+ ACS_EXT_VALUE,
} msgpack_unpack_state;
typedef enum {
- CT_ARRAY_ITEM,
- CT_MAP_KEY,
- CT_MAP_VALUE,
+ CT_ARRAY_ITEM,
+ CT_MAP_KEY,
+ CT_MAP_VALUE,
} msgpack_container_type;
@@ -90,4 +93,3 @@ typedef enum {
#endif
#endif /* msgpack/unpack_define.h */
-
diff --git a/pandas/src/msgpack/unpack_template.h b/pandas/src/msgpack/unpack_template.h
index 83b6918dc6686..d34eceda6ab69 100644
--- a/pandas/src/msgpack/unpack_template.h
+++ b/pandas/src/msgpack/unpack_template.h
@@ -16,167 +16,142 @@
* limitations under the License.
*/
-#ifndef msgpack_unpack_func
-#error msgpack_unpack_func template is not defined
-#endif
-
-#ifndef msgpack_unpack_callback
-#error msgpack_unpack_callback template is not defined
-#endif
-
-#ifndef msgpack_unpack_struct
-#error msgpack_unpack_struct template is not defined
-#endif
-
-#ifndef msgpack_unpack_struct_decl
-#define msgpack_unpack_struct_decl(name) msgpack_unpack_struct(name)
-#endif
-
-#ifndef msgpack_unpack_object
-#error msgpack_unpack_object type is not defined
-#endif
-
-#ifndef msgpack_unpack_user
-#error msgpack_unpack_user type is not defined
-#endif
-
#ifndef USE_CASE_RANGE
#if !defined(_MSC_VER)
#define USE_CASE_RANGE
#endif
#endif
-msgpack_unpack_struct_decl(_stack) {
- msgpack_unpack_object obj;
- size_t size;
- size_t count;
- unsigned int ct;
- msgpack_unpack_object map_key;
-};
-
-msgpack_unpack_struct_decl(_context) {
- msgpack_unpack_user user;
- unsigned int cs;
- unsigned int trail;
- unsigned int top;
- /*
- msgpack_unpack_struct(_stack)* stack;
- unsigned int stack_size;
- msgpack_unpack_struct(_stack) embed_stack[MSGPACK_EMBED_STACK_SIZE];
- */
- msgpack_unpack_struct(_stack) stack[MSGPACK_EMBED_STACK_SIZE];
+typedef struct unpack_stack {
+ PyObject* obj;
+ size_t size;
+ size_t count;
+ unsigned int ct;
+ PyObject* map_key;
+} unpack_stack;
+
+struct unpack_context {
+ unpack_user user;
+ unsigned int cs;
+ unsigned int trail;
+ unsigned int top;
+ /*
+ unpack_stack* stack;
+ unsigned int stack_size;
+ unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE];
+ */
+ unpack_stack stack[MSGPACK_EMBED_STACK_SIZE];
};
-msgpack_unpack_func(void, _init)(msgpack_unpack_struct(_context)* ctx)
+static inline void unpack_init(unpack_context* ctx)
{
- ctx->cs = CS_HEADER;
- ctx->trail = 0;
- ctx->top = 0;
- /*
- ctx->stack = ctx->embed_stack;
- ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
- */
- ctx->stack[0].obj = msgpack_unpack_callback(_root)(&ctx->user);
+ ctx->cs = CS_HEADER;
+ ctx->trail = 0;
+ ctx->top = 0;
+ /*
+ ctx->stack = ctx->embed_stack;
+ ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
+ */
+ ctx->stack[0].obj = unpack_callback_root(&ctx->user);
}
/*
-msgpack_unpack_func(void, _destroy)(msgpack_unpack_struct(_context)* ctx)
+static inline void unpack_destroy(unpack_context* ctx)
{
- if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
- free(ctx->stack);
- }
+ if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
+ free(ctx->stack);
+ }
}
*/
-msgpack_unpack_func(msgpack_unpack_object, _data)(msgpack_unpack_struct(_context)* ctx)
+static inline PyObject* unpack_data(unpack_context* ctx)
{
- return (ctx)->stack[0].obj;
+ return (ctx)->stack[0].obj;
}
template <bool construct>
-msgpack_unpack_func(int, _execute)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+static inline int unpack_execute(unpack_context* ctx, const char* data, size_t len, size_t* off)
{
- assert(len >= *off);
+ assert(len >= *off);
- const unsigned char* p = (unsigned char*)data + *off;
- const unsigned char* const pe = (unsigned char*)data + len;
- const void* n = NULL;
+ const unsigned char* p = (unsigned char*)data + *off;
+ const unsigned char* const pe = (unsigned char*)data + len;
+ const void* n = NULL;
- unsigned int trail = ctx->trail;
- unsigned int cs = ctx->cs;
- unsigned int top = ctx->top;
- msgpack_unpack_struct(_stack)* stack = ctx->stack;
- /*
- unsigned int stack_size = ctx->stack_size;
- */
- msgpack_unpack_user* user = &ctx->user;
+ unsigned int trail = ctx->trail;
+ unsigned int cs = ctx->cs;
+ unsigned int top = ctx->top;
+ unpack_stack* stack = ctx->stack;
+ /*
+ unsigned int stack_size = ctx->stack_size;
+ */
+ unpack_user* user = &ctx->user;
- msgpack_unpack_object obj;
- msgpack_unpack_struct(_stack)* c = NULL;
+ PyObject* obj;
+ unpack_stack* c = NULL;
- int ret;
+ int ret;
#define construct_cb(name) \
- construct && msgpack_unpack_callback(name)
+ construct && unpack_callback ## name
#define push_simple_value(func) \
- if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
- goto _push
+ if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
+ goto _push
#define push_fixed_value(func, arg) \
- if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
- goto _push
+ if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
+ goto _push
#define push_variable_value(func, base, pos, len) \
- if(construct_cb(func)(user, \
- (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
- goto _push
+ if(construct_cb(func)(user, \
+ (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
+ goto _push
#define again_fixed_trail(_cs, trail_len) \
- trail = trail_len; \
- cs = _cs; \
- goto _fixed_trail_again
+ trail = trail_len; \
+ cs = _cs; \
+ goto _fixed_trail_again
#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \
- trail = trail_len; \
- if(trail == 0) { goto ifzero; } \
- cs = _cs; \
- goto _fixed_trail_again
+ trail = trail_len; \
+ if(trail == 0) { goto ifzero; } \
+ cs = _cs; \
+ goto _fixed_trail_again
#define start_container(func, count_, ct_) \
- if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
- if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
- if((count_) == 0) { obj = stack[top].obj; \
- if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
- goto _push; } \
- stack[top].ct = ct_; \
- stack[top].size = count_; \
- stack[top].count = 0; \
- ++top; \
- /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
- /*printf("stack push %d\n", top);*/ \
- /* FIXME \
- if(top >= stack_size) { \
- if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
- size_t csize = sizeof(msgpack_unpack_struct(_stack)) * MSGPACK_EMBED_STACK_SIZE; \
- size_t nsize = csize * 2; \
- msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)malloc(nsize); \
- if(tmp == NULL) { goto _failed; } \
- memcpy(tmp, ctx->stack, csize); \
- ctx->stack = stack = tmp; \
- ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
- } else { \
- size_t nsize = sizeof(msgpack_unpack_struct(_stack)) * ctx->stack_size * 2; \
- msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)realloc(ctx->stack, nsize); \
- if(tmp == NULL) { goto _failed; } \
- ctx->stack = stack = tmp; \
- ctx->stack_size = stack_size = stack_size * 2; \
- } \
- } \
- */ \
- goto _header_again
-
-#define NEXT_CS(p) \
- ((unsigned int)*p & 0x1f)
+ if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
+ if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
+ if((count_) == 0) { obj = stack[top].obj; \
+ if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
+ goto _push; } \
+ stack[top].ct = ct_; \
+ stack[top].size = count_; \
+ stack[top].count = 0; \
+ ++top; \
+ /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
+ /*printf("stack push %d\n", top);*/ \
+ /* FIXME \
+ if(top >= stack_size) { \
+ if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
+ size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \
+ size_t nsize = csize * 2; \
+ unpack_stack* tmp = (unpack_stack*)malloc(nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ memcpy(tmp, ctx->stack, csize); \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
+ } else { \
+ size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \
+ unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = stack_size * 2; \
+ } \
+ } \
+ */ \
+ goto _header_again
+
+#define NEXT_CS(p) ((unsigned int)*p & 0x1f)
#ifdef USE_CASE_RANGE
#define SWITCH_RANGE_BEGIN switch(*p) {
@@ -190,221 +165,235 @@ msgpack_unpack_func(int, _execute)(msgpack_unpack_struct(_context)* ctx, const c
#define SWITCH_RANGE_END } }
#endif
- if(p == pe) { goto _out; }
- do {
- switch(cs) {
- case CS_HEADER:
- SWITCH_RANGE_BEGIN
- SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum
- push_fixed_value(_uint8, *(uint8_t*)p);
- SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum
- push_fixed_value(_int8, *(int8_t*)p);
- SWITCH_RANGE(0xc0, 0xdf) // Variable
- switch(*p) {
- case 0xc0: // nil
- push_simple_value(_nil);
- //case 0xc1: // string
- // again_terminal_trail(NEXT_CS(p), p+1);
- case 0xc2: // false
- push_simple_value(_false);
- case 0xc3: // true
- push_simple_value(_true);
- //case 0xc4:
- //case 0xc5:
- //case 0xc6:
- //case 0xc7:
- //case 0xc8:
- //case 0xc9:
- case 0xca: // float
- case 0xcb: // double
- case 0xcc: // unsigned int 8
- case 0xcd: // unsigned int 16
- case 0xce: // unsigned int 32
- case 0xcf: // unsigned int 64
- case 0xd0: // signed int 8
- case 0xd1: // signed int 16
- case 0xd2: // signed int 32
- case 0xd3: // signed int 64
- again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
- //case 0xd4:
- //case 0xd5:
- //case 0xd6: // big integer 16
- //case 0xd7: // big integer 32
- //case 0xd8: // big float 16
- //case 0xd9: // big float 32
- case 0xda: // raw 16
- case 0xdb: // raw 32
- case 0xdc: // array 16
- case 0xdd: // array 32
- case 0xde: // map 16
- case 0xdf: // map 32
- again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
- default:
- goto _failed;
- }
- SWITCH_RANGE(0xa0, 0xbf) // FixRaw
- again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
- SWITCH_RANGE(0x90, 0x9f) // FixArray
- start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
- SWITCH_RANGE(0x80, 0x8f) // FixMap
- start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
-
- SWITCH_RANGE_DEFAULT
- goto _failed;
- SWITCH_RANGE_END
- // end CS_HEADER
-
-
- _fixed_trail_again:
- ++p;
-
- default:
- if((size_t)(pe - p) < trail) { goto _out; }
- n = p; p += trail - 1;
- switch(cs) {
- //case CS_
- //case CS_
- case CS_FLOAT: {
- union { uint32_t i; float f; } mem;
- mem.i = _msgpack_load32(uint32_t,n);
- push_fixed_value(_float, mem.f); }
- case CS_DOUBLE: {
- union { uint64_t i; double f; } mem;
- mem.i = _msgpack_load64(uint64_t,n);
+ if(p == pe) { goto _out; }
+ do {
+ switch(cs) {
+ case CS_HEADER:
+ SWITCH_RANGE_BEGIN
+ SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum
+ push_fixed_value(_uint8, *(uint8_t*)p);
+ SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum
+ push_fixed_value(_int8, *(int8_t*)p);
+ SWITCH_RANGE(0xc0, 0xdf) // Variable
+ switch(*p) {
+ case 0xc0: // nil
+ push_simple_value(_nil);
+ //case 0xc1: // never used
+ case 0xc2: // false
+ push_simple_value(_false);
+ case 0xc3: // true
+ push_simple_value(_true);
+ case 0xc4: // bin 8
+ again_fixed_trail(NEXT_CS(p), 1);
+ case 0xc5: // bin 16
+ again_fixed_trail(NEXT_CS(p), 2);
+ case 0xc6: // bin 32
+ again_fixed_trail(NEXT_CS(p), 4);
+ case 0xc7: // ext 8
+ again_fixed_trail(NEXT_CS(p), 1);
+ case 0xc8: // ext 16
+ again_fixed_trail(NEXT_CS(p), 2);
+ case 0xc9: // ext 32
+ again_fixed_trail(NEXT_CS(p), 4);
+ case 0xca: // float
+ case 0xcb: // double
+ case 0xcc: // unsigned int 8
+ case 0xcd: // unsigned int 16
+ case 0xce: // unsigned int 32
+ case 0xcf: // unsigned int 64
+ case 0xd0: // signed int 8
+ case 0xd1: // signed int 16
+ case 0xd2: // signed int 32
+ case 0xd3: // signed int 64
+ again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
+ case 0xd4: // fixext 1
+ case 0xd5: // fixext 2
+ case 0xd6: // fixext 4
+ case 0xd7: // fixext 8
+ again_fixed_trail_if_zero(ACS_EXT_VALUE,
+ (1 << (((unsigned int)*p) & 0x03))+1,
+ _ext_zero);
+ case 0xd8: // fixext 16
+ again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero);
+ case 0xd9: // str 8
+ again_fixed_trail(NEXT_CS(p), 1);
+ case 0xda: // raw 16
+ case 0xdb: // raw 32
+ case 0xdc: // array 16
+ case 0xdd: // array 32
+ case 0xde: // map 16
+ case 0xdf: // map 32
+ again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
+ default:
+ goto _failed;
+ }
+ SWITCH_RANGE(0xa0, 0xbf) // FixRaw
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
+ SWITCH_RANGE(0x90, 0x9f) // FixArray
+ start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
+ SWITCH_RANGE(0x80, 0x8f) // FixMap
+ start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
+
+ SWITCH_RANGE_DEFAULT
+ goto _failed;
+ SWITCH_RANGE_END
+ // end CS_HEADER
+
+
+ _fixed_trail_again:
+ ++p;
+
+ default:
+ if((size_t)(pe - p) < trail) { goto _out; }
+ n = p; p += trail - 1;
+ switch(cs) {
+ case CS_EXT_8:
+ again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero);
+ case CS_EXT_16:
+ again_fixed_trail_if_zero(ACS_EXT_VALUE,
+ _msgpack_load16(uint16_t,n)+1,
+ _ext_zero);
+ case CS_EXT_32:
+ again_fixed_trail_if_zero(ACS_EXT_VALUE,
+ _msgpack_load32(uint32_t,n)+1,
+ _ext_zero);
+ case CS_FLOAT: {
+ union { uint32_t i; float f; } mem;
+ mem.i = _msgpack_load32(uint32_t,n);
+ push_fixed_value(_float, mem.f); }
+ case CS_DOUBLE: {
+ union { uint64_t i; double f; } mem;
+ mem.i = _msgpack_load64(uint64_t,n);
#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
// https://github.com/msgpack/msgpack-perl/pull/1
mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
#endif
- push_fixed_value(_double, mem.f); }
- case CS_UINT_8:
- push_fixed_value(_uint8, *(uint8_t*)n);
- case CS_UINT_16:
- push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
- case CS_UINT_32:
- push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
- case CS_UINT_64:
- push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
-
- case CS_INT_8:
- push_fixed_value(_int8, *(int8_t*)n);
- case CS_INT_16:
- push_fixed_value(_int16, _msgpack_load16(int16_t,n));
- case CS_INT_32:
- push_fixed_value(_int32, _msgpack_load32(int32_t,n));
- case CS_INT_64:
- push_fixed_value(_int64, _msgpack_load64(int64_t,n));
-
- //case CS_
- //case CS_
- //case CS_BIG_INT_16:
- // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load16(uint16_t,n), _big_int_zero);
- //case CS_BIG_INT_32:
- // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load32(uint32_t,n), _big_int_zero);
- //case ACS_BIG_INT_VALUE:
- //_big_int_zero:
- // // FIXME
- // push_variable_value(_big_int, data, n, trail);
-
- //case CS_BIG_FLOAT_16:
- // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load16(uint16_t,n), _big_float_zero);
- //case CS_BIG_FLOAT_32:
- // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load32(uint32_t,n), _big_float_zero);
- //case ACS_BIG_FLOAT_VALUE:
- //_big_float_zero:
- // // FIXME
- // push_variable_value(_big_float, data, n, trail);
-
- case CS_RAW_16:
- again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
- case CS_RAW_32:
- again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
- case ACS_RAW_VALUE:
- _raw_zero:
- push_variable_value(_raw, data, n, trail);
-
- case CS_ARRAY_16:
- start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
- case CS_ARRAY_32:
- /* FIXME security guard */
- start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
-
- case CS_MAP_16:
- start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
- case CS_MAP_32:
- /* FIXME security guard */
- start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
-
- default:
- goto _failed;
- }
- }
+ push_fixed_value(_double, mem.f); }
+ case CS_UINT_8:
+ push_fixed_value(_uint8, *(uint8_t*)n);
+ case CS_UINT_16:
+ push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
+ case CS_UINT_32:
+ push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
+ case CS_UINT_64:
+ push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
+
+ case CS_INT_8:
+ push_fixed_value(_int8, *(int8_t*)n);
+ case CS_INT_16:
+ push_fixed_value(_int16, _msgpack_load16(int16_t,n));
+ case CS_INT_32:
+ push_fixed_value(_int32, _msgpack_load32(int32_t,n));
+ case CS_INT_64:
+ push_fixed_value(_int64, _msgpack_load64(int64_t,n));
+
+ case CS_BIN_8:
+ again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero);
+ case CS_BIN_16:
+ again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero);
+ case CS_BIN_32:
+ again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero);
+ case ACS_BIN_VALUE:
+ _bin_zero:
+ push_variable_value(_bin, data, n, trail);
+
+ case CS_RAW_8:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero);
+ case CS_RAW_16:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
+ case CS_RAW_32:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
+ case ACS_RAW_VALUE:
+ _raw_zero:
+ push_variable_value(_raw, data, n, trail);
+
+ case ACS_EXT_VALUE:
+ _ext_zero:
+ push_variable_value(_ext, data, n, trail);
+
+ case CS_ARRAY_16:
+ start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
+ case CS_ARRAY_32:
+ /* FIXME security guard */
+ start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
+
+ case CS_MAP_16:
+ start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
+ case CS_MAP_32:
+ /* FIXME security guard */
+ start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
+
+ default:
+ goto _failed;
+ }
+ }
_push:
- if(top == 0) { goto _finish; }
- c = &stack[top-1];
- switch(c->ct) {
- case CT_ARRAY_ITEM:
- if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
- if(++c->count == c->size) {
- obj = c->obj;
- if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
- --top;
- /*printf("stack pop %d\n", top);*/
- goto _push;
- }
- goto _header_again;
- case CT_MAP_KEY:
- c->map_key = obj;
- c->ct = CT_MAP_VALUE;
- goto _header_again;
- case CT_MAP_VALUE:
- if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
- if(++c->count == c->size) {
- obj = c->obj;
- if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
- --top;
- /*printf("stack pop %d\n", top);*/
- goto _push;
- }
- c->ct = CT_MAP_KEY;
- goto _header_again;
-
- default:
- goto _failed;
- }
+ if(top == 0) { goto _finish; }
+ c = &stack[top-1];
+ switch(c->ct) {
+ case CT_ARRAY_ITEM:
+ if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ goto _header_again;
+ case CT_MAP_KEY:
+ c->map_key = obj;
+ c->ct = CT_MAP_VALUE;
+ goto _header_again;
+ case CT_MAP_VALUE:
+ if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ c->ct = CT_MAP_KEY;
+ goto _header_again;
+
+ default:
+ goto _failed;
+ }
_header_again:
- cs = CS_HEADER;
- ++p;
- } while(p != pe);
- goto _out;
+ cs = CS_HEADER;
+ ++p;
+ } while(p != pe);
+ goto _out;
_finish:
- if (!construct)
- msgpack_unpack_callback(_nil)(user, &obj);
- stack[0].obj = obj;
- ++p;
- ret = 1;
- /*printf("-- finish --\n"); */
- goto _end;
+ if (!construct)
+ unpack_callback_nil(user, &obj);
+ stack[0].obj = obj;
+ ++p;
+ ret = 1;
+ /*printf("-- finish --\n"); */
+ goto _end;
_failed:
- /*printf("** FAILED **\n"); */
- ret = -1;
- goto _end;
+ /*printf("** FAILED **\n"); */
+ ret = -1;
+ goto _end;
_out:
- ret = 0;
- goto _end;
+ ret = 0;
+ goto _end;
_end:
- ctx->cs = cs;
- ctx->trail = trail;
- ctx->top = top;
- *off = p - (const unsigned char*)data;
+ ctx->cs = cs;
+ ctx->trail = trail;
+ ctx->top = top;
+ *off = p - (const unsigned char*)data;
- return ret;
+ return ret;
#undef construct_cb
}
@@ -420,55 +409,55 @@ msgpack_unpack_func(int, _execute)(msgpack_unpack_struct(_context)* ctx, const c
#undef start_container
template <unsigned int fixed_offset, unsigned int var_offset>
-msgpack_unpack_func(int, _container_header)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+static inline int unpack_container_header(unpack_context* ctx, const char* data, size_t len, size_t* off)
{
- assert(len >= *off);
- uint32_t size;
- const unsigned char *const p = (unsigned char*)data + *off;
+ assert(len >= *off);
+ uint32_t size;
+ const unsigned char *const p = (unsigned char*)data + *off;
#define inc_offset(inc) \
- if (len - *off < inc) \
- return 0; \
- *off += inc;
-
- switch (*p) {
- case var_offset:
- inc_offset(3);
- size = _msgpack_load16(uint16_t, p + 1);
- break;
- case var_offset + 1:
- inc_offset(5);
- size = _msgpack_load32(uint32_t, p + 1);
- break;
+ if (len - *off < inc) \
+ return 0; \
+ *off += inc;
+
+ switch (*p) {
+ case var_offset:
+ inc_offset(3);
+ size = _msgpack_load16(uint16_t, p + 1);
+ break;
+ case var_offset + 1:
+ inc_offset(5);
+ size = _msgpack_load32(uint32_t, p + 1);
+ break;
#ifdef USE_CASE_RANGE
- case fixed_offset + 0x0 ... fixed_offset + 0xf:
+ case fixed_offset + 0x0 ... fixed_offset + 0xf:
#else
- case fixed_offset + 0x0:
- case fixed_offset + 0x1:
- case fixed_offset + 0x2:
- case fixed_offset + 0x3:
- case fixed_offset + 0x4:
- case fixed_offset + 0x5:
- case fixed_offset + 0x6:
- case fixed_offset + 0x7:
- case fixed_offset + 0x8:
- case fixed_offset + 0x9:
- case fixed_offset + 0xa:
- case fixed_offset + 0xb:
- case fixed_offset + 0xc:
- case fixed_offset + 0xd:
- case fixed_offset + 0xe:
- case fixed_offset + 0xf:
+ case fixed_offset + 0x0:
+ case fixed_offset + 0x1:
+ case fixed_offset + 0x2:
+ case fixed_offset + 0x3:
+ case fixed_offset + 0x4:
+ case fixed_offset + 0x5:
+ case fixed_offset + 0x6:
+ case fixed_offset + 0x7:
+ case fixed_offset + 0x8:
+ case fixed_offset + 0x9:
+ case fixed_offset + 0xa:
+ case fixed_offset + 0xb:
+ case fixed_offset + 0xc:
+ case fixed_offset + 0xd:
+ case fixed_offset + 0xe:
+ case fixed_offset + 0xf:
#endif
- ++*off;
- size = ((unsigned int)*p) & 0x0f;
- break;
- default:
- PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
- return -1;
+ ++*off;
+ size = ((unsigned int)*p) & 0x0f;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
+ return -1;
}
- msgpack_unpack_callback(_uint32)(&ctx->user, size, &ctx->stack[0].obj);
- return 1;
+ unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj);
+ return 1;
}
#undef SWITCH_RANGE_BEGIN
@@ -476,17 +465,11 @@ msgpack_unpack_func(int, _container_header)(msgpack_unpack_struct(_context)* ctx
#undef SWITCH_RANGE_DEFAULT
#undef SWITCH_RANGE_END
-static const execute_fn template_construct = &template_execute<true>;
-static const execute_fn template_skip = &template_execute<false>;
-static const execute_fn read_array_header = &template_container_header<0x90, 0xdc>;
-static const execute_fn read_map_header = &template_container_header<0x80, 0xde>;
-
-#undef msgpack_unpack_func
-#undef msgpack_unpack_callback
-#undef msgpack_unpack_struct
-#undef msgpack_unpack_object
-#undef msgpack_unpack_user
+static const execute_fn unpack_construct = &unpack_execute<true>;
+static const execute_fn unpack_skip = &unpack_execute<false>;
+static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>;
+static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>;
#undef NEXT_CS
-/* vim: set ts=4 sw=4 noexpandtab */
+/* vim: set ts=4 sw=4 sts=4 expandtab */
diff --git a/pandas/tests/test_msgpack/test_buffer.py b/pandas/tests/test_msgpack/test_buffer.py
index 940b65406103e..43f5e64012885 100644
--- a/pandas/tests/test_msgpack/test_buffer.py
+++ b/pandas/tests/test_msgpack/test_buffer.py
@@ -7,6 +7,14 @@
def test_unpack_buffer():
from array import array
buf = array('b')
- buf.fromstring(packb(('foo', 'bar')))
+ buf.fromstring(packb((b'foo', b'bar')))
obj = unpackb(buf, use_list=1)
assert [b'foo', b'bar'] == obj
+
+
+def test_unpack_bytearray():
+ buf = bytearray(packb(('foo', 'bar')))
+ obj = unpackb(buf, use_list=1)
+ assert [b'foo', b'bar'] == obj
+ expected_type = bytes
+ assert all(type(s) == expected_type for s in obj)
diff --git a/pandas/tests/test_msgpack/test_case.py b/pandas/tests/test_msgpack/test_case.py
index e78456b2ddb62..187668b242495 100644
--- a/pandas/tests/test_msgpack/test_case.py
+++ b/pandas/tests/test_msgpack/test_case.py
@@ -99,3 +99,4 @@ def test_match():
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
+
diff --git a/pandas/tests/test_msgpack/test_extension.py b/pandas/tests/test_msgpack/test_extension.py
new file mode 100644
index 0000000000000..3172605c0aae1
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_extension.py
@@ -0,0 +1,57 @@
+from __future__ import print_function
+import array
+import pandas.msgpack as msgpack
+from pandas.msgpack import ExtType
+
+
+def test_pack_ext_type():
+ def p(s):
+ packer = msgpack.Packer()
+ packer.pack_ext_type(0x42, s)
+ return packer.bytes()
+ assert p(b'A') == b'\xd4\x42A' # fixext 1
+ assert p(b'AB') == b'\xd5\x42AB' # fixext 2
+ assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
+ assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
+ assert p(b'A'*16) == b'\xd8\x42' + b'A'*16 # fixext 16
+ assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
+ assert p(b'A'*0x0123) == b'\xc8\x01\x23\x42' + b'A'*0x0123 # ext 16
+ assert p(b'A'*0x00012345) == b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345 # ext 32
+
+
+def test_unpack_ext_type():
+ def check(b, expected):
+ assert msgpack.unpackb(b) == expected
+
+ check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
+ check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
+ check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
+ check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
+ check(b'\xd8\x42' + b'A'*16, ExtType(0x42, b'A'*16)) # fixext 16
+ check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
+ check(b'\xc8\x01\x23\x42' + b'A'*0x0123,
+ ExtType(0x42, b'A'*0x0123)) # ext 16
+ check(b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345,
+ ExtType(0x42, b'A'*0x00012345)) # ext 32
+
+
+def test_extension_type():
+ def default(obj):
+ print('default called', obj)
+ if isinstance(obj, array.array):
+ typecode = 123 # application specific typecode
+ data = obj.tostring()
+ return ExtType(typecode, data)
+ raise TypeError("Unknwon type object %r" % (obj,))
+
+ def ext_hook(code, data):
+ print('ext_hook called', code, data)
+ assert code == 123
+ obj = array.array('d')
+ obj.fromstring(data)
+ return obj
+
+ obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
+ s = msgpack.packb(obj, default=default)
+ obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
+ assert obj == obj2
diff --git a/pandas/tests/test_msgpack/test_format.py b/pandas/tests/test_msgpack/test_format.py
index a3a3afd046ce2..706c48436d7d3 100644
--- a/pandas/tests/test_msgpack/test_format.py
+++ b/pandas/tests/test_msgpack/test_format.py
@@ -7,7 +7,7 @@ def check(src, should, use_list=0):
assert unpackb(src, use_list=use_list) == should
def testSimpleValue():
- check(b"\x93\xc0\xc2\xc3",
+ check(b"\x93\xc0\xc2\xc3",
(None, False, True,))
def testFixnum():
diff --git a/pandas/tests/test_msgpack/test_limits.py b/pandas/tests/test_msgpack/test_limits.py
new file mode 100644
index 0000000000000..d9aa957182d65
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_limits.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+import pandas.util.testing as tm
+
+from pandas.msgpack import packb, unpackb, Packer, Unpacker, ExtType
+
+class TestLimits(tm.TestCase):
+ def test_integer(self):
+ x = -(2 ** 63)
+ assert unpackb(packb(x)) == x
+ self.assertRaises((OverflowError, ValueError), packb, x-1)
+ x = 2 ** 64 - 1
+ assert unpackb(packb(x)) == x
+ self.assertRaises((OverflowError, ValueError), packb, x+1)
+
+
+ def test_array_header(self):
+ packer = Packer()
+ packer.pack_array_header(2**32-1)
+ self.assertRaises((OverflowError, ValueError),
+ packer.pack_array_header, 2**32)
+
+
+ def test_map_header(self):
+ packer = Packer()
+ packer.pack_map_header(2**32-1)
+ self.assertRaises((OverflowError, ValueError),
+ packer.pack_array_header, 2**32)
+
+
+ def test_max_str_len(self):
+ d = 'x' * 3
+ packed = packb(d)
+
+ unpacker = Unpacker(max_str_len=3, encoding='utf-8')
+ unpacker.feed(packed)
+ assert unpacker.unpack() == d
+
+ unpacker = Unpacker(max_str_len=2, encoding='utf-8')
+ unpacker.feed(packed)
+ self.assertRaises(ValueError, unpacker.unpack)
+
+
+ def test_max_bin_len(self):
+ d = b'x' * 3
+ packed = packb(d, use_bin_type=True)
+
+ unpacker = Unpacker(max_bin_len=3)
+ unpacker.feed(packed)
+ assert unpacker.unpack() == d
+
+ unpacker = Unpacker(max_bin_len=2)
+ unpacker.feed(packed)
+ self.assertRaises(ValueError, unpacker.unpack)
+
+
+ def test_max_array_len(self):
+ d = [1, 2, 3]
+ packed = packb(d)
+
+ unpacker = Unpacker(max_array_len=3)
+ unpacker.feed(packed)
+ assert unpacker.unpack() == d
+
+ unpacker = Unpacker(max_array_len=2)
+ unpacker.feed(packed)
+ self.assertRaises(ValueError, unpacker.unpack)
+
+
+ def test_max_map_len(self):
+ d = {1: 2, 3: 4, 5: 6}
+ packed = packb(d)
+
+ unpacker = Unpacker(max_map_len=3)
+ unpacker.feed(packed)
+ assert unpacker.unpack() == d
+
+ unpacker = Unpacker(max_map_len=2)
+ unpacker.feed(packed)
+ self.assertRaises(ValueError, unpacker.unpack)
+
+
+ def test_max_ext_len(self):
+ d = ExtType(42, b"abc")
+ packed = packb(d)
+
+ unpacker = Unpacker(max_ext_len=3)
+ unpacker.feed(packed)
+ assert unpacker.unpack() == d
+
+ unpacker = Unpacker(max_ext_len=2)
+ unpacker.feed(packed)
+ self.assertRaises(ValueError, unpacker.unpack)
diff --git a/pandas/tests/test_msgpack/test_newspec.py b/pandas/tests/test_msgpack/test_newspec.py
new file mode 100644
index 0000000000000..8532ab8cfb1a4
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_newspec.py
@@ -0,0 +1,88 @@
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb, ExtType
+
+
+def test_str8():
+ header = b'\xd9'
+ data = b'x' * 32
+ b = packb(data.decode(), use_bin_type=True)
+ assert len(b) == len(data) + 2
+ assert b[0:2] == header + b'\x20'
+ assert b[2:] == data
+ assert unpackb(b) == data
+
+ data = b'x' * 255
+ b = packb(data.decode(), use_bin_type=True)
+ assert len(b) == len(data) + 2
+ assert b[0:2] == header + b'\xff'
+ assert b[2:] == data
+ assert unpackb(b) == data
+
+
+def test_bin8():
+ header = b'\xc4'
+ data = b''
+ b = packb(data, use_bin_type=True)
+ assert len(b) == len(data) + 2
+ assert b[0:2] == header + b'\x00'
+ assert b[2:] == data
+ assert unpackb(b) == data
+
+ data = b'x' * 255
+ b = packb(data, use_bin_type=True)
+ assert len(b) == len(data) + 2
+ assert b[0:2] == header + b'\xff'
+ assert b[2:] == data
+ assert unpackb(b) == data
+
+
+def test_bin16():
+ header = b'\xc5'
+ data = b'x' * 256
+ b = packb(data, use_bin_type=True)
+ assert len(b) == len(data) + 3
+ assert b[0:1] == header
+ assert b[1:3] == b'\x01\x00'
+ assert b[3:] == data
+ assert unpackb(b) == data
+
+ data = b'x' * 65535
+ b = packb(data, use_bin_type=True)
+ assert len(b) == len(data) + 3
+ assert b[0:1] == header
+ assert b[1:3] == b'\xff\xff'
+ assert b[3:] == data
+ assert unpackb(b) == data
+
+
+def test_bin32():
+ header = b'\xc6'
+ data = b'x' * 65536
+ b = packb(data, use_bin_type=True)
+ assert len(b) == len(data) + 5
+ assert b[0:1] == header
+ assert b[1:5] == b'\x00\x01\x00\x00'
+ assert b[5:] == data
+ assert unpackb(b) == data
+
+def test_ext():
+ def check(ext, packed):
+ assert packb(ext) == packed
+ assert unpackb(packed) == ext
+ check(ExtType(0x42, b'Z'), b'\xd4\x42Z') # fixext 1
+ check(ExtType(0x42, b'ZZ'), b'\xd5\x42ZZ') # fixext 2
+ check(ExtType(0x42, b'Z'*4), b'\xd6\x42' + b'Z'*4) # fixext 4
+ check(ExtType(0x42, b'Z'*8), b'\xd7\x42' + b'Z'*8) # fixext 8
+ check(ExtType(0x42, b'Z'*16), b'\xd8\x42' + b'Z'*16) # fixext 16
+ # ext 8
+ check(ExtType(0x42, b''), b'\xc7\x00\x42')
+ check(ExtType(0x42, b'Z'*255), b'\xc7\xff\x42' + b'Z'*255)
+ # ext 16
+ check(ExtType(0x42, b'Z'*256), b'\xc8\x01\x00\x42' + b'Z'*256)
+ check(ExtType(0x42, b'Z'*0xffff), b'\xc8\xff\xff\x42' + b'Z'*0xffff)
+ # ext 32
+ check(ExtType(0x42, b'Z'*0x10000), b'\xc9\x00\x01\x00\x00\x42' + b'Z'*0x10000)
+ # needs large memory
+ #check(ExtType(0x42, b'Z'*0xffffffff),
+ # b'\xc9\xff\xff\xff\xff\x42' + b'Z'*0xffffffff)
diff --git a/pandas/tests/test_msgpack/test_obj.py b/pandas/tests/test_msgpack/test_obj.py
index 4a018bc8b87f1..886fec522d4f3 100644
--- a/pandas/tests/test_msgpack/test_obj.py
+++ b/pandas/tests/test_msgpack/test_obj.py
@@ -44,13 +44,13 @@ def test_decode_pairs_hook(self):
assert unpacked[1] == prod_sum
def test_only_one_obj_hook(self):
- self.assertRaises(ValueError, unpackb, b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x)
+ self.assertRaises(TypeError, unpackb, b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x)
def test_bad_hook(self):
def f():
packed = packb([3, 1+2j], default=lambda o: o)
unpacked = unpackb(packed, use_list=1)
- self.assertRaises(ValueError, f)
+ self.assertRaises(TypeError, f)
def test_array_hook(self):
packed = packb([1,2,3])
diff --git a/pandas/tests/test_msgpack/test_read_size.py b/pandas/tests/test_msgpack/test_read_size.py
index db3e1deb04f8f..7cbb9c9807201 100644
--- a/pandas/tests/test_msgpack/test_read_size.py
+++ b/pandas/tests/test_msgpack/test_read_size.py
@@ -63,3 +63,4 @@ def test_incorrect_type_nested_map():
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
+
diff --git a/pandas/tests/test_msgpack/test_seq.py b/pandas/tests/test_msgpack/test_seq.py
index e5ee68c4cab84..464ff6d0174af 100644
--- a/pandas/tests/test_msgpack/test_seq.py
+++ b/pandas/tests/test_msgpack/test_seq.py
@@ -1,21 +1,18 @@
#!/usr/bin/env python
# coding: utf-8
-from pandas import compat
-from pandas.compat import u
+import io
import pandas.msgpack as msgpack
-binarydata = [chr(i) for i in range(256)]
-binarydata = "".join(binarydata)
-if compat.PY3:
- binarydata = binarydata.encode('utf-8')
+
+binarydata = bytes(bytearray(range(256)))
def gen_binary_data(idx):
- data = binarydata[:idx % 300]
- return data
+ return binarydata[:idx % 300]
+
def test_exceeding_unpacker_read_size():
- dumpf = compat.BytesIO()
+ dumpf = io.BytesIO()
packer = msgpack.Packer()
@@ -30,7 +27,7 @@ def test_exceeding_unpacker_read_size():
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
- f = compat.BytesIO(dumpf.getvalue())
+ f = io.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
diff --git a/pandas/tests/test_msgpack/test_sequnpack.py b/pandas/tests/test_msgpack/test_sequnpack.py
index 4c3ad363e5b6e..72ceed0471437 100644
--- a/pandas/tests/test_msgpack/test_sequnpack.py
+++ b/pandas/tests/test_msgpack/test_sequnpack.py
@@ -82,3 +82,15 @@ def test_readbytes(self):
assert unpacker.read_bytes(3) == b'oob'
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
+
+ def test_issue124(self):
+ unpacker = Unpacker()
+ unpacker.feed(b'\xa1?\xa1!')
+ assert tuple(unpacker) == (b'?', b'!')
+ assert tuple(unpacker) == ()
+ unpacker.feed(b"\xa1?\xa1")
+ assert tuple(unpacker) == (b'?',)
+ assert tuple(unpacker) == ()
+ unpacker.feed(b"!")
+ assert tuple(unpacker) == (b'!',)
+ assert tuple(unpacker) == ()
diff --git a/pandas/tests/test_msgpack/test_unpack.py b/pandas/tests/test_msgpack/test_unpack.py
new file mode 100644
index 0000000000000..fe840083ae1c2
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_unpack.py
@@ -0,0 +1,65 @@
+from io import BytesIO
+import sys
+from pandas.msgpack import Unpacker, packb, OutOfData, ExtType
+import pandas.util.testing as tm
+import nose
+
+class TestUnpack(tm.TestCase):
+ def test_unpack_array_header_from_file(self):
+ f = BytesIO(packb([1,2,3,4]))
+ unpacker = Unpacker(f)
+ assert unpacker.read_array_header() == 4
+ assert unpacker.unpack() == 1
+ assert unpacker.unpack() == 2
+ assert unpacker.unpack() == 3
+ assert unpacker.unpack() == 4
+ self.assertRaises(OutOfData, unpacker.unpack)
+
+
+ def test_unpacker_hook_refcnt(self):
+ if not hasattr(sys, 'getrefcount'):
+ raise nose.SkipTest('no sys.getrefcount()')
+ result = []
+
+ def hook(x):
+ result.append(x)
+ return x
+
+ basecnt = sys.getrefcount(hook)
+
+ up = Unpacker(object_hook=hook, list_hook=hook)
+
+ assert sys.getrefcount(hook) >= basecnt + 2
+
+ up.feed(packb([{}]))
+ up.feed(packb([{}]))
+ assert up.unpack() == [{}]
+ assert up.unpack() == [{}]
+ assert result == [{}, [{}], {}, [{}]]
+
+ del up
+
+ assert sys.getrefcount(hook) == basecnt
+
+
+ def test_unpacker_ext_hook(self):
+
+ class MyUnpacker(Unpacker):
+
+ def __init__(self):
+ super(MyUnpacker, self).__init__(ext_hook=self._hook,
+ encoding='utf-8')
+
+ def _hook(self, code, data):
+ if code == 1:
+ return int(data)
+ else:
+ return ExtType(code, data)
+
+ unpacker = MyUnpacker()
+ unpacker.feed(packb({'a': 1}, encoding='utf-8'))
+ assert unpacker.unpack() == {'a': 1}
+ unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8'))
+ assert unpacker.unpack() == {'a': 123}
+ unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8'))
+ assert unpacker.unpack() == {'a': ExtType(2, b'321')}
diff --git a/pandas/tests/test_msgpack/test_unpack_raw.py b/pandas/tests/test_msgpack/test_unpack_raw.py
index 0e96a79cf190a..c6bf747c8d992 100644
--- a/pandas/tests/test_msgpack/test_unpack_raw.py
+++ b/pandas/tests/test_msgpack/test_unpack_raw.py
@@ -1,18 +1,19 @@
"""Tests for cases where the user seeks to obtain packed msgpack objects"""
-from pandas import compat
+import io
from pandas.msgpack import Unpacker, packb
+
def test_write_bytes():
unpacker = Unpacker()
unpacker.feed(b'abc')
- f = compat.BytesIO()
+ f = io.BytesIO()
assert unpacker.unpack(f.write) == ord('a')
assert f.getvalue() == b'a'
- f = compat.BytesIO()
+ f = io.BytesIO()
assert unpacker.skip(f.write) is None
assert f.getvalue() == b'b'
- f = compat.BytesIO()
+ f = io.BytesIO()
assert unpacker.skip() is None
assert f.getvalue() == b''
@@ -20,9 +21,9 @@ def test_write_bytes():
def test_write_bytes_multi_buffer():
long_val = (5) * 100
expected = packb(long_val)
- unpacker = Unpacker(compat.BytesIO(expected), read_size=3, max_buffer_size=3)
+ unpacker = Unpacker(io.BytesIO(expected), read_size=3, max_buffer_size=3)
- f = compat.BytesIO()
+ f = io.BytesIO()
unpacked = unpacker.unpack(f.write)
assert unpacked == long_val
assert f.getvalue() == expected
diff --git a/setup.py b/setup.py
index 594e62bc622b9..0a680fe5d756d 100755
--- a/setup.py
+++ b/setup.py
@@ -456,16 +456,22 @@ def pxd(name):
else:
macros = [('__LITTLE_ENDIAN__', '1')]
-msgpack_ext = Extension('pandas.msgpack',
- sources = [srcpath('msgpack',
+packer_ext = Extension('pandas.msgpack._packer',
+ sources = [srcpath('_packer',
suffix=suffix if suffix == '.pyx' else '.cpp',
- subdir='')],
+ subdir='msgpack')],
language='c++',
- include_dirs=common_include,
+ include_dirs=['pandas/src/msgpack'] + common_include,
define_macros=macros)
-
-extensions.append(msgpack_ext)
-
+unpacker_ext = Extension('pandas.msgpack._unpacker',
+ sources = [srcpath('_unpacker',
+ suffix=suffix if suffix == '.pyx' else '.cpp',
+ subdir='msgpack')],
+ language='c++',
+ include_dirs=['pandas/src/msgpack'] + common_include,
+ define_macros=macros)
+extensions.append(packer_ext)
+extensions.append(unpacker_ext)
# if not ISRELEASED:
# extensions.extend([sandbox_ext])
@@ -525,6 +531,7 @@ def pxd(name):
'pandas.io.tests',
'pandas.io.tests.test_json',
'pandas.stats.tests',
+ 'pandas.msgpack'
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/*/*.pickle',
| Close #10581 by upgrading to `msgpack` 0.4.6
| https://api.github.com/repos/pandas-dev/pandas/pulls/10686 | 2015-07-28T07:11:55Z | 2015-08-18T11:09:49Z | 2015-08-18T11:09:49Z | 2015-08-18T11:09:58Z |
Fix Visual Studio 2015 compile errors | diff --git a/pandas/src/headers/math.h b/pandas/src/headers/math.h
index 8ccf11d07c3fe..34ad9f24a58f9 100644
--- a/pandas/src/headers/math.h
+++ b/pandas/src/headers/math.h
@@ -1,7 +1,7 @@
#ifndef _PANDAS_MATH_H_
#define _PANDAS_MATH_H_
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include <math.h>
__inline int signbit(double num) { return _copysign(1.0, num) < 0; }
#else
diff --git a/pandas/src/headers/stdint.h b/pandas/src/headers/stdint.h
index b0fd235adc036..8746bf132d0f7 100644
--- a/pandas/src/headers/stdint.h
+++ b/pandas/src/headers/stdint.h
@@ -1,7 +1,7 @@
#ifndef _PANDAS_STDINT_H_
#define _PANDAS_STDINT_H_
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
#include "ms_stdint.h"
#else
#include <stdint.h>
diff --git a/pandas/src/msgpack/pack.h b/pandas/src/msgpack/pack.h
index bb939d93ebeca..e4c315c1161b1 100644
--- a/pandas/src/msgpack/pack.h
+++ b/pandas/src/msgpack/pack.h
@@ -26,7 +26,7 @@
extern "C" {
#endif
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
#define inline __inline
#endif
| Together with #10681 this allows building Pandas for Python 3.5b4 using Visual Studio 2015.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10683 | 2015-07-27T02:39:31Z | 2015-07-27T13:10:34Z | null | 2015-07-27T13:10:34Z |
Remove duplicate code | diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h
index d3777e858b6ca..eef94e0616769 100644
--- a/pandas/src/parser/tokenizer.h
+++ b/pandas/src/parser/tokenizer.h
@@ -27,11 +27,7 @@ See LICENSE for the license
#define ERROR_INVALID_CHARS 3
#define ERROR_MINUS_SIGN 4
-#if defined(_MSC_VER)
-#include "../headers/ms_stdint.h"
-#else
-#include <stdint.h>
-#endif
+#include "../headers/stdint.h"
#include "khash.h"
| Use pandas internal [stdint.h](https://github.com/pydata/pandas/blob/master/pandas/src/headers/stdint.h).
| https://api.github.com/repos/pandas-dev/pandas/pulls/10681 | 2015-07-27T02:19:49Z | 2015-07-27T13:10:49Z | null | 2015-07-27T13:11:20Z |
DOC: improve docs on iteration | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index aae931a4b8319..d415db88b9cb6 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1151,24 +1151,81 @@ parameter that is by default ``False`` and copies the underlying data. Pass
The Panel class has a related :meth:`~Panel.rename_axis` class which can rename
any of its three axes.
+.. _basics.iteration:
+
Iteration
---------
-Because Series is array-like, basic iteration produces the values. Other data
-structures follow the dict-like convention of iterating over the "keys" of the
-objects. In short:
+The behavior of basic iteration over pandas objects depends on the type.
+When iterating over a Series, it is regarded as array-like, and basic iteration
+produces the values. Other data structures, like DataFrame and Panel,
+follow the dict-like convention of iterating over the "keys" of the
+objects.
+
+In short, basic iteration (``for i in object``) produces:
- * **Series**: values
- * **DataFrame**: column labels
- * **Panel**: item labels
+* **Series**: values
+* **DataFrame**: column labels
+* **Panel**: item labels
-Thus, for example:
+Thus, for example, iterating over a DataFrame gives you the column names:
.. ipython::
- In [0]: for col in df:
- ...: print(col)
- ...:
+ In [0]: df = pd.DataFrame({'col1' : np.random.randn(3), 'col2' : np.random.randn(3)},
+ ...: index=['a', 'b', 'c'])
+
+ In [0]: for col in df:
+ ...: print(col)
+ ...:
+
+Pandas objects also have the dict-like :meth:`~DataFrame.iteritems` method to
+iterate over the (key, value) pairs.
+
+To iterate over the rows of a DataFrame, you can use the following methods:
+
+* :meth:`~DataFrame.iterrows`: Iterate over the rows of a DataFrame as (index, Series) pairs.
+ This converts the rows to Series objects, which can change the dtypes and has some
+ performance implications.
+* :meth:`~DataFrame.itertuples`: Iterate over the rows of a DataFrame as tuples of the values.
+ This is a lot faster as :meth:`~DataFrame.iterrows`, and is in most cases preferable to
+ use to iterate over the values of a DataFrame.
+
+.. warning::
+
+ Iterating through pandas objects is generally **slow**. In many cases,
+ iterating manually over the rows is not needed and can be avoided with
+ one of the following approaches:
+
+ * Look for a *vectorized* solution: many operations can be performed using
+ built-in methods or numpy functions, (boolean) indexing, ...
+
+ * When you have a function that cannot work on the full DataFrame/Series
+ at once, it is better to use :meth:`~DataFrame.apply` instead of iterating
+ over the values. See the docs on :ref:`function application <basics.apply>`.
+
+ * If you need to do iterative manipulations on the values but performance is
+ important, consider writing the inner loop using e.g. cython or numba.
+ See the :ref:`enhancing performance <enhancingperf>` section for some
+ examples of this approach.
+
+.. warning::
+
+ You should **never modify** something you are iterating over.
+ This is not guaranteed to work in all cases. Depending on the
+ data types, the iterator returns a copy and not a view, and writing
+ to it will have no effect!
+
+ For example, in the following case setting the value has no effect:
+
+ .. ipython:: python
+
+ df = pd.DataFrame({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})
+
+ for index, row in df.iterrows():
+ row['a'] = 10
+
+ df
iteritems
~~~~~~~~~
@@ -1176,9 +1233,9 @@ iteritems
Consistent with the dict-like interface, :meth:`~DataFrame.iteritems` iterates
through key-value pairs:
- * **Series**: (index, scalar value) pairs
- * **DataFrame**: (column, Series) pairs
- * **Panel**: (item, DataFrame) pairs
+* **Series**: (index, scalar value) pairs
+* **DataFrame**: (column, Series) pairs
+* **Panel**: (item, DataFrame) pairs
For example:
@@ -1189,22 +1246,46 @@ For example:
...: print(frame)
...:
-
.. _basics.iterrows:
iterrows
~~~~~~~~
-New in v0.7 is the ability to iterate efficiently through rows of a
-DataFrame with :meth:`~DataFrame.iterrows`. It returns an iterator yielding each
+:meth:`~DataFrame.iterrows` allows you to iterate through the rows of a
+DataFrame as Series objects. It returns an iterator yielding each
index value along with a Series containing the data in each row:
.. ipython::
- In [0]: for row_index, row in df2.iterrows():
+ In [0]: for row_index, row in df.iterrows():
...: print('%s\n%s' % (row_index, row))
...:
+.. note::
+
+ Because :meth:`~DataFrame.iterrows` returns a Series for each row,
+ it does **not** preserve dtypes across the rows (dtypes are
+ preserved across columns for DataFrames). For example,
+
+ .. ipython:: python
+
+ df_orig = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
+ df_orig.dtypes
+ row = next(df_orig.iterrows())[1]
+ row
+
+ All values in ``row``, returned as a Series, are now upcasted
+ to floats, also the original integer value in column `x`:
+
+ .. ipython:: python
+
+ row['int'].dtype
+ df_orig['int'].dtype
+
+ To preserve dtypes while iterating over the rows, it is better
+ to use :meth:`~DataFrame.itertuples` which returns tuples of the values
+ and which is generally much faster as ``iterrows``.
+
For instance, a contrived way to transpose the DataFrame would be:
.. ipython:: python
@@ -1216,36 +1297,29 @@ For instance, a contrived way to transpose the DataFrame would be:
df2_t = pd.DataFrame(dict((idx,values) for idx, values in df2.iterrows()))
print(df2_t)
-.. note::
-
- ``iterrows`` does **not** preserve dtypes across the rows (dtypes are
- preserved across columns for DataFrames). For example,
-
- .. ipython:: python
-
- df_iter = pd.DataFrame([[1, 1.0]], columns=['x', 'y'])
- row = next(df_iter.iterrows())[1]
- print(row['x'].dtype)
- print(df_iter['x'].dtype)
-
itertuples
~~~~~~~~~~
-The :meth:`~DataFrame.itertuples` method will return an iterator yielding a tuple for each row in the
-DataFrame. The first element of the tuple will be the row's corresponding index
-value, while the remaining values are the row values proper.
+The :meth:`~DataFrame.itertuples` method will return an iterator
+yielding a tuple for each row in the DataFrame. The first element
+of the tuple will be the row's corresponding index value,
+while the remaining values are the row values.
For instance,
.. ipython:: python
- for r in df2.itertuples():
- print(r)
+ for row in df.itertuples():
+ print(row)
+
+This method does not convert the row to a Series object but just returns the
+values inside a tuple. Therefore, :meth:`~DataFrame.itertuples` preserves the
+data type of the values and is generally faster as :meth:`~DataFrame.iterrows`.
.. _basics.dt_accessors:
.dt accessor
-~~~~~~~~~~~~
+------------
``Series`` has an accessor to succinctly return datetime like properties for the
*values* of the Series, if its a datetime/period like Series.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d52a859086aae..f6ea00b3714af 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -547,7 +547,15 @@ def _repr_html_(self):
return None
def iteritems(self):
- """Iterator over (column, series) pairs"""
+ """
+ Iterator over (column name, Series) pairs.
+
+ See also
+ --------
+ iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.
+ itertuples : Iterate over the rows of a DataFrame as tuples of the values.
+
+ """
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
@@ -557,25 +565,45 @@ def iteritems(self):
def iterrows(self):
"""
- Iterate over rows of DataFrame as (index, Series) pairs.
+ Iterate over the rows of a DataFrame as (index, Series) pairs.
Notes
-----
- * ``iterrows`` does **not** preserve dtypes across the rows (dtypes
- are preserved across columns for DataFrames). For example,
-
- >>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])
- >>> row = next(df.iterrows())[1]
- >>> print(row['x'].dtype)
- float64
- >>> print(df['x'].dtype)
- int64
+ 1. Because ``iterrows` returns a Series for each row,
+ it does **not** preserve dtypes across the rows (dtypes are
+ preserved across columns for DataFrames). For example,
+
+ >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
+ >>> row = next(df.iterrows())[1]
+ >>> row
+ int 1.0
+ float 1.5
+ Name: 0, dtype: float64
+ >>> print(row['int'].dtype)
+ float64
+ >>> print(df['int'].dtype)
+ int64
+
+ To preserve dtypes while iterating over the rows, it is better
+ to use :meth:`itertuples` which returns tuples of the values
+ and which is generally faster as ``iterrows``.
+
+ 2. You should **never modify** something you are iterating over.
+ This is not guaranteed to work in all cases. Depending on the
+ data types, the iterator returns a copy and not a view, and writing
+ to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
+
+ See also
+ --------
+ itertuples : Iterate over the rows of a DataFrame as tuples of the values.
+ iteritems : Iterate over (column name, Series) pairs.
+
"""
columns = self.columns
for k, v in zip(self.index, self.values):
@@ -584,8 +612,32 @@ def iterrows(self):
def itertuples(self, index=True):
"""
- Iterate over rows of DataFrame as tuples, with index value
- as first element of the tuple
+ Iterate over the rows of DataFrame as tuples, with index value
+ as first element of the tuple.
+
+ Parameters
+ ----------
+ index : boolean, default True
+ If True, return the index as the first element of the tuple.
+
+ See also
+ --------
+ iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.
+ iteritems : Iterate over (column name, Series) pairs.
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]}, index=['a', 'b'])
+ >>> df
+ col1 col2
+ a 1 0.1
+ b 2 0.2
+ >>> for row in df.itertuples():
+ ... print(row)
+ ('a', 1, 0.10000000000000001)
+ ('b', 2, 0.20000000000000001)
+
"""
arrays = []
if index:
| As this comes up once in a while, I tried to improve/clarify the docs somewhat.
Further suggestions to add or make it clearer are welcome!
xref #10334
closes #7194
| https://api.github.com/repos/pandas-dev/pandas/pulls/10680 | 2015-07-26T23:14:34Z | 2015-08-02T21:46:24Z | 2015-08-02T21:46:24Z | 2015-08-02T21:46:24Z |
BUG: Use stable algorithm for _nanvar. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3f4a13a09e5e6..cafb7eb1d8446 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -923,3 +923,5 @@ Bug Fixes
- Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`)
- Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`)
- Bug in ``groupby`` incorrect computation for aggregation on ``DataFrame`` with ``NaT`` (E.g ``first``, ``last``, ``min``). (:issue:`10590`)
+- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
+- Bug in ``.var()`` causing roundoff errors for highly similar values (:issue:`10242`)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 447a273a1e171..e5ef5456e4977 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -346,11 +346,22 @@ def _get_counts_nanvar(mask, axis, ddof, dtype=float):
return count, d
-def _nanvar(values, axis=None, skipna=True, ddof=1):
- # private nanvar calculator
+@disallow('M8')
+@bottleneck_switch(ddof=1)
+def nanstd(values, axis=None, skipna=True, ddof=1):
+ result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
+ return _wrap_results(result, values.dtype)
+
+
+@disallow('M8')
+@bottleneck_switch(ddof=1)
+def nanvar(values, axis=None, skipna=True, ddof=1):
+
+ dtype = values.dtype
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
+ values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
@@ -361,29 +372,27 @@ def _nanvar(values, axis=None, skipna=True, ddof=1):
values = values.copy()
np.putmask(values, mask, 0)
- X = _ensure_numeric(values.sum(axis))
- XX = _ensure_numeric((values ** 2).sum(axis))
- result = np.fabs((XX - X * X / count) / d)
- return result
-
-@disallow('M8')
-@bottleneck_switch(ddof=1)
-def nanstd(values, axis=None, skipna=True, ddof=1):
-
- result = np.sqrt(_nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
+ # Compute variance via two-pass algorithm, which is stable against
+ # cancellation errors and relatively accurate for small numbers of
+ # observations.
+ #
+ # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
+ if axis is not None:
+ avg = np.expand_dims(avg, axis)
+ sqr = _ensure_numeric((avg - values) ** 2)
+ np.putmask(sqr, mask, 0)
+ result = sqr.sum(axis=axis, dtype=np.float64) / d
+
+ # Return variance as np.float64 (the datatype used in the accumulator),
+ # unless we were dealing with a float array, in which case use the same
+ # precision as the original values array.
+ if is_float_dtype(dtype):
+ result = result.astype(dtype)
return _wrap_results(result, values.dtype)
-@disallow('M8','m8')
-@bottleneck_switch(ddof=1)
-def nanvar(values, axis=None, skipna=True, ddof=1):
- # we are going to allow timedelta64[ns] here
- # but NOT going to coerce them to the Timedelta type
- # as this could cause overflow
- # so var cannot be computed (but std can!)
- return _nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
-
-@disallow('M8','m8')
+@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
@@ -391,6 +400,7 @@ def nansem(values, axis=None, skipna=True, ddof=1):
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
+ var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index aea165b907c05..16620671f71f0 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12589,11 +12589,11 @@ def test_numeric_only_flag(self):
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
- assertRaisesRegexp(TypeError, 'float',
- getattr(df1, meth), axis=1, numeric_only=False)
-
- assertRaisesRegexp(TypeError, 'float',
- getattr(df2, meth), axis=1, numeric_only=False)
+ try:
+ getattr(df1, meth)(axis=1, numeric_only=False)
+ getattr(df2, meth)(axis=1, numeric_only=False)
+ except (TypeError, ValueError) as e:
+ self.assertIn('float', str(e))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index a903b76b3ac7f..8dc5f886eb4c3 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -182,13 +182,13 @@ def check_fun_data(self, testfunc, targfunc,
**kwargs)
self.check_results(targ, res, axis)
if skipna:
- res = testfunc(testarval, axis=axis)
+ res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis)
if axis is None:
- res = testfunc(testarval, skipna=skipna)
+ res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis)
if skipna and axis is None:
- res = testfunc(testarval)
+ res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim-1),
@@ -291,12 +291,13 @@ def check_funs_ddof(self, testfunc, targfunc,
allow_date=False, allow_tdelta=False, allow_obj=True,):
for ddof in range(3):
try:
- self.check_funs(self, testfunc, targfunc,
+ self.check_funs(testfunc, targfunc,
allow_complex, allow_all_nan, allow_str,
allow_date, allow_tdelta, allow_obj,
ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof,)
+ raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
@@ -366,16 +367,29 @@ def test_nanmedian(self):
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var,
- allow_complex=False, allow_date=False, allow_tdelta=False)
+ allow_complex=False,
+ allow_str=False,
+ allow_date=False,
+ allow_tdelta=True,
+ allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std,
- allow_complex=False, allow_date=False, allow_tdelta=True)
+ allow_complex=False,
+ allow_str=False,
+ allow_date=False,
+ allow_tdelta=True,
+ allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
- self.check_funs_ddof(nanops.nansem, np.var,
- allow_complex=False, allow_date=False, allow_tdelta=False)
+ from scipy.stats import sem
+ self.check_funs_ddof(nanops.nansem, sem,
+ allow_complex=False,
+ allow_str=False,
+ allow_date=False,
+ allow_tdelta=True,
+ allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -817,6 +831,121 @@ def test_non_convertable_values(self):
lambda: nanops._ensure_numeric([]))
+class TestNanvarFixedValues(tm.TestCase):
+
+ def setUp(self):
+ # Samples from a normal distribution.
+ self.variance = variance = 3.0
+ self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
+
+ def test_nanvar_all_finite(self):
+ samples = self.samples
+ actual_variance = nanops.nanvar(samples)
+ np.testing.assert_almost_equal(
+ actual_variance, self.variance, decimal=2)
+
+ def test_nanvar_nans(self):
+ samples = np.nan * np.ones(2 * self.samples.shape[0])
+ samples[::2] = self.samples
+
+ actual_variance = nanops.nanvar(samples, skipna=True)
+ np.testing.assert_almost_equal(
+ actual_variance, self.variance, decimal=2)
+
+ actual_variance = nanops.nanvar(samples, skipna=False)
+ np.testing.assert_almost_equal(
+ actual_variance, np.nan, decimal=2)
+
+ def test_nanstd_nans(self):
+ samples = np.nan * np.ones(2 * self.samples.shape[0])
+ samples[::2] = self.samples
+
+ actual_std = nanops.nanstd(samples, skipna=True)
+ np.testing.assert_almost_equal(
+ actual_std, self.variance ** 0.5, decimal=2)
+
+ actual_std = nanops.nanvar(samples, skipna=False)
+ np.testing.assert_almost_equal(
+ actual_std, np.nan, decimal=2)
+
+ def test_nanvar_axis(self):
+ # Generate some sample data.
+ samples_norm = self.samples
+ samples_unif = self.prng.uniform(size=samples_norm.shape[0])
+ samples = np.vstack([samples_norm, samples_unif])
+
+ actual_variance = nanops.nanvar(samples, axis=1)
+ np.testing.assert_array_almost_equal(
+ actual_variance, np.array([self.variance, 1.0 / 12]), decimal=2)
+
+ def test_nanvar_ddof(self):
+ n = 5
+ samples = self.prng.uniform(size=(10000, n+1))
+ samples[:, -1] = np.nan # Force use of our own algorithm.
+
+ variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
+ variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
+ variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
+
+ # The unbiased estimate.
+ var = 1.0 / 12
+ np.testing.assert_almost_equal(variance_1, var, decimal=2)
+ # The underestimated variance.
+ np.testing.assert_almost_equal(
+ variance_0, (n - 1.0) / n * var, decimal=2)
+ # The overestimated variance.
+ np.testing.assert_almost_equal(
+ variance_2, (n - 1.0) / (n - 2.0) * var, decimal=2)
+
+ def test_ground_truth(self):
+ # Test against values that were precomputed with Numpy.
+ samples = np.empty((4, 4))
+ samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287],
+ [0.72980153, 0.03109364, 0.99155171],
+ [0.09317602, 0.60078248, 0.15871292]])
+ samples[3] = samples[:, 3] = np.nan
+
+ # Actual variances along axis=0, 1 for ddof=0, 1, 2
+ variance = np.array(
+ [[[0.13762259, 0.05619224, 0.11568816],
+ [0.20643388, 0.08428837, 0.17353224],
+ [0.41286776, 0.16857673, 0.34706449]],
+ [[0.09519783, 0.16435395, 0.05082054],
+ [0.14279674, 0.24653093, 0.07623082],
+ [0.28559348, 0.49306186, 0.15246163]]]
+ )
+
+ # Test nanvar.
+ for axis in range(2):
+ for ddof in range(3):
+ var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
+ np.testing.assert_array_almost_equal(
+ var[:3], variance[axis, ddof]
+ )
+ np.testing.assert_equal(var[3], np.nan)
+
+ # Test nanstd.
+ for axis in range(2):
+ for ddof in range(3):
+ std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
+ np.testing.assert_array_almost_equal(
+ std[:3], variance[axis, ddof] ** 0.5
+ )
+ np.testing.assert_equal(std[3], np.nan)
+
+ def test_nanstd_roundoff(self):
+ # Regression test for GH 10242 (test data taken from GH 10489). Ensure
+ # that variance is stable.
+ data = Series(766897346 * np.ones(10))
+ for ddof in range(3):
+ result = data.std(ddof=ddof)
+ self.assertEqual(result, 0.0)
+
+ @property
+ def prng(self):
+ return np.random.RandomState(1234)
+
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure',
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 06533a4ef85ff..a2a8f1484f70e 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -684,8 +684,8 @@ def test_timedelta_ops(self):
self.assertEqual(result[0], expected)
# invalid ops
- for op in ['skew','kurt','sem','var','prod']:
- self.assertRaises(TypeError, lambda : getattr(td,op)())
+ for op in ['skew','kurt','sem','prod']:
+ self.assertRaises(TypeError, getattr(td,op))
# GH 10040
# make sure NaT is properly handled by median()
| closes #10242
This PR replaces the sum-of-squares algorithm used to compute the variance by a more stable algorithm. The algorithm here is essentially the same as used in numpy 1.8 and up, and I've added a TODO to replace the implementation with a direct call to numpy when that version is the default.
Somewhat counter to the discussion in #10242, I chose not to go with the Welford algorithm, for two reasons: numpy, and the fact that `_nanvar` needs to be able to deal with arrays of different shape, which is tricky to get right in Cython.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10679 | 2015-07-26T18:00:31Z | 2015-09-04T12:15:22Z | null | 2015-09-04T19:14:29Z |
DEPR: add stacklevel to FutureWarnings (GH9584) | diff --git a/pandas/computation/align.py b/pandas/computation/align.py
index 9834dd1a9e7fc..b5f730378c3cf 100644
--- a/pandas/computation/align.py
+++ b/pandas/computation/align.py
@@ -101,7 +101,8 @@ def _align_core(terms):
'than an order of magnitude on term {1!r}, '
'by more than {2:.4g}; performance may '
'suffer'.format(axis, terms[i].name, ordm),
- category=pd.io.common.PerformanceWarning)
+ category=pd.io.common.PerformanceWarning,
+ stacklevel=6)
if transpose:
f = partial(ti.reindex, index=reindexer, copy=False)
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 4290be3e1abba..bc4e60f70f2b4 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -535,7 +535,7 @@ def parse_back_compat(self, w, op=None, value=None):
w, op, value = w
warnings.warn("passing a tuple into Expr is deprecated, "
"pass the where as a single string",
- DeprecationWarning)
+ DeprecationWarning, stacklevel=10)
if op is not None:
if not isinstance(w, string_types):
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 36d31d493b10d..8f1dab4f8b511 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -125,8 +125,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex
"""
if order is not None:
- warn("order is deprecated."
- "See https://github.com/pydata/pandas/issues/6926", FutureWarning)
+ msg = "order is deprecated. See https://github.com/pydata/pandas/issues/6926"
+ warn(msg, FutureWarning, stacklevel=2)
from pandas.core.index import Index
from pandas.core.series import Series
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 4a6a26f21b5bf..2f465ded12bd6 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -414,7 +414,7 @@ def _get_labels(self):
Deprecated, use .codes!
"""
- warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=3)
+ warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
@@ -456,7 +456,7 @@ def _validate_categories(cls, categories, fastpath=False):
# NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
- warn(msg, FutureWarning, stacklevel=5)
+ warn(msg, FutureWarning, stacklevel=3)
# categories must be unique
@@ -491,12 +491,12 @@ def _get_categories(self):
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
- warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3)
+ warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
- warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3)
+ warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
@@ -507,7 +507,7 @@ def _get_levels(self):
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning,
- stacklevel=3)
+ stacklevel=2)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
@@ -1200,7 +1200,7 @@ def order(self, inplace=False, ascending=True, na_position='last'):
Category.sort
"""
warn("order is deprecated, use sort_values(...)",
- FutureWarning, stacklevel=3)
+ FutureWarning, stacklevel=2)
return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last'):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1cf2de69b2a66..c5c0f9e82fa94 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1468,7 +1468,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
- FutureWarning)
+ FutureWarning, stacklevel=2)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
@@ -1517,7 +1517,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
- FutureWarning)
+ FutureWarning, stacklevel=2)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
@@ -2919,7 +2919,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
return result
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
+ @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
@@ -2953,7 +2953,7 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False):
return self[-duplicated]
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
+ @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9c170286006f2..d3a63f9f5d851 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -706,7 +706,7 @@ def iterkv(self, *args, **kwargs):
"iteritems alias used to get around 2to3. Deprecated"
warnings.warn("iterkv is deprecated and will be removed in a future "
"release, use ``iteritems`` instead.",
- FutureWarning)
+ FutureWarning, stacklevel=2)
return self.iteritems(*args, **kwargs)
def __len__(self):
@@ -3376,11 +3376,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
-
+
Examples
--------
-
+
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
@@ -3409,11 +3409,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
- which it labels. For example, in the original series the
+ which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
- value in the resampled bucket with the label``2000-01-01 00:03:00``
+ value in the resampled bucket with the label``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
- To include this value close the right side of the bin interval as
+ To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', how='sum', label='right')
@@ -3424,7 +3424,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
-
+
>>> series.resample('3T', how='sum', label='right', closed='right')
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
@@ -3453,7 +3453,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
- Upsample the series into 30 second bins and fill the
+ Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S', fill_method='bfill')[0:5]
@@ -3468,7 +3468,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
>>> def custom_resampler(array_like):
... return np.sum(array_like)+5
-
+
>>> series.resample('3T', how=custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 4d0b395a401ac..14ba2dea0b76c 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -842,14 +842,14 @@ def to_int():
elif is_float(key):
key = to_int()
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
- type(self).__name__),FutureWarning, stacklevel=8)
+ type(self).__name__), FutureWarning, stacklevel=5)
return key
return self._invalid_indexer('label', key)
if is_float(key):
if not self.is_floating():
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
- type(self).__name__),FutureWarning, stacklevel=8)
+ type(self).__name__), FutureWarning, stacklevel=3)
return to_int()
return key
@@ -887,7 +887,7 @@ def f(c):
# warn if it's a convertible float
if v == int(v):
warnings.warn("slice indexers when using iloc should be integers "
- "and not floating point",FutureWarning)
+ "and not floating point", FutureWarning, stacklevel=7)
return int(v)
self._invalid_indexer('slice {0} value'.format(c), v)
@@ -1415,7 +1415,7 @@ def argsort(self, *args, **kwargs):
def __add__(self, other):
if com.is_list_like(other):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
- "use '|' or .union()", FutureWarning)
+ "use '|' or .union()", FutureWarning, stacklevel=2)
if isinstance(other, Index):
return self.union(other)
return Index(np.array(self) + other)
@@ -1423,14 +1423,14 @@ def __add__(self, other):
def __radd__(self, other):
if com.is_list_like(other):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
- "use '|' or .union()", FutureWarning)
+ "use '|' or .union()", FutureWarning, stacklevel=2)
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
- "use .difference()",FutureWarning)
+ "use .difference()",FutureWarning, stacklevel=2)
return self.difference(other)
def __and__(self, other):
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 7837fb60da9d6..dddc1f4898908 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -204,7 +204,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
- " groups, use str.extract.", UserWarning)
+ " groups, use str.extract.", UserWarning, stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
@@ -377,11 +377,12 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
# Do this first, to make sure it happens even if the re.compile
# raises below.
warnings.warn("In future versions of pandas, match will change to"
- " always return a bool indexer.", UserWarning)
+ " always return a bool indexer.", FutureWarning,
+ stacklevel=3)
if as_indexer and regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
- " groups, use str.extract.", UserWarning)
+ " groups, use str.extract.", UserWarning, stacklevel=3)
# If not as_indexer and regex.groups == 0, this returns empty lists
# and is basically useless, so we will not warn.
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 829ff4f28ca1b..1a4c45628a256 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -608,7 +608,7 @@ def __init__(self, symbol, data_source=None):
self.symbol = symbol.upper()
if data_source is None:
warnings.warn("Options(symbol) is deprecated, use Options(symbol,"
- " data_source) instead", FutureWarning)
+ " data_source) instead", FutureWarning, stacklevel=2)
data_source = "yahoo"
if data_source != "yahoo":
raise NotImplementedError("currently only yahoo supported")
@@ -1072,7 +1072,8 @@ def get_forward_data(self, months, call=True, put=False, near=False,
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
- warnings.warn("get_forward_data() is deprecated", FutureWarning)
+ warnings.warn("get_forward_data() is deprecated", FutureWarning,
+ stacklevel=2)
end_date = dt.date.today() + MonthEnd(months)
dates = (date for date in self.expiry_dates if date <= end_date.date())
data = self._get_data_in_date_range(dates, call=call, put=put)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6801e8935e079..f0c994ba17e27 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -647,7 +647,7 @@ def _clean_options(self, options, engine):
warnings.warn(("Falling back to the 'python' engine because"
" {0}; you can avoid this warning by specifying"
" engine='python'.").format(fallback_reason),
- ParserWarning)
+ ParserWarning, stacklevel=5)
index_col = options['index_col']
names = options['names']
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ea0a59ce2ab31..b5a3577b36d4c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1596,7 +1596,7 @@ def update_info(self, info):
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
- warnings.warn(ws, AttributeConflictWarning)
+ warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
@@ -2581,7 +2581,7 @@ def write_array(self, key, value, items=None):
except:
pass
ws = performance_doc % (inferred_type, key, items)
- warnings.warn(ws, PerformanceWarning)
+ warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
@@ -3716,7 +3716,7 @@ def read(self, where=None, columns=None, **kwargs):
objs.append(obj)
else:
- warnings.warn(duplicate_doc, DuplicateWarning)
+ warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b587ec128c016..c0b69e435f494 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -200,7 +200,7 @@ def tquery(sql, con=None, cur=None, retry=True):
warnings.warn(
"tquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
- FutureWarning)
+ FutureWarning, stacklevel=2)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
@@ -255,7 +255,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None):
warnings.warn(
"uquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
- FutureWarning)
+ FutureWarning, stacklevel=2)
cur = execute(sql, con, cur=cur, params=params)
@@ -328,7 +328,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
read_sql
"""
-
+
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
@@ -364,7 +364,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string
SQL query to be executed
- con : SQLAlchemy connectable(engine/connection) or database string URI
+ con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
@@ -618,7 +618,7 @@ def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
return SQLDatabase(con, schema=schema, meta=meta)
else:
if flavor == 'mysql':
- warnings.warn(_MYSQL_WARNING, FutureWarning)
+ warnings.warn(_MYSQL_WARNING, FutureWarning, stacklevel=3)
return SQLiteDatabase(con, flavor, is_cursor=is_cursor)
@@ -957,7 +957,7 @@ def _sqlalchemy_type(self, col):
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
- "database.", UserWarning)
+ "database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
@@ -1409,7 +1409,7 @@ def _create_table_setup(self):
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
- warnings.warn(_SAFE_NAMES_WARNING)
+ warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
flv = self.pd_sql.flavor
escape = _SQL_GET_IDENTIFIER[flv]
@@ -1450,7 +1450,7 @@ def _sql_type_name(self, col):
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
- "database.", UserWarning)
+ "database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
@@ -1672,7 +1672,8 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None, dtype=None):
def read_frame(*args, **kwargs):
"""DEPRECATED - use read_sql
"""
- warnings.warn("read_frame is deprecated, use read_sql", FutureWarning)
+ warnings.warn("read_frame is deprecated, use read_sql", FutureWarning,
+ stacklevel=2)
return read_sql(*args, **kwargs)
@@ -1680,7 +1681,8 @@ def read_frame(*args, **kwargs):
def frame_query(*args, **kwargs):
"""DEPRECATED - use read_sql
"""
- warnings.warn("frame_query is deprecated, use read_sql", FutureWarning)
+ warnings.warn("frame_query is deprecated, use read_sql", FutureWarning,
+ stacklevel=2)
return read_sql(*args, **kwargs)
@@ -1718,7 +1720,8 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
pandas.DataFrame.to_sql
"""
- warnings.warn("write_frame is deprecated, use to_sql", FutureWarning)
+ warnings.warn("write_frame is deprecated, use to_sql", FutureWarning,
+ stacklevel=2)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index b4f1e6a429198..0fb3237152db0 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -122,7 +122,7 @@ def _maybe_remove(store, key):
pass
-def compat_assert_produces_warning(w,f):
+def compat_assert_produces_warning(w, f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
@@ -2516,7 +2516,8 @@ def test_terms(self):
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=False):
Term(t)
# valid terms
@@ -2609,7 +2610,8 @@ def test_backwards_compat_without_term_object(self):
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
@@ -2628,20 +2630,24 @@ def test_backwards_compat_without_term_object(self):
store.append('wp',wp)
# stringified datetimes
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=DeprecationWarning):
+ with tm.assert_produces_warning(expected_warning=DeprecationWarning,
+ check_stacklevel=not compat.PY3):
result = store.select('wp', [('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
@@ -4528,7 +4534,7 @@ def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
- compat_assert_produces_warning(PerformanceWarning,f)
+ compat_assert_produces_warning(PerformanceWarning, f)
def test_store_datetime_mixed(self):
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 5baef2e4f0225..c2916f2c0cfb8 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -1838,7 +1838,7 @@ def _concatenate_chunks(list chunks):
warning_message = " ".join(["Columns (%s) have mixed types." % warning_names,
"Specify dtype option on import or set low_memory=False."
])
- warnings.warn(warning_message, DtypeWarning)
+ warnings.warn(warning_message, DtypeWarning, stacklevel=8)
return result
#----------------------------------------------------------------------
diff --git a/pandas/rpy/__init__.py b/pandas/rpy/__init__.py
index bad7ebc580ce2..8c92ce5842e15 100644
--- a/pandas/rpy/__init__.py
+++ b/pandas/rpy/__init__.py
@@ -8,7 +8,7 @@
"like rpy2. "
"\nSee here for a guide on how to port your code to rpy2: "
"http://pandas.pydata.org/pandas-docs/stable/r_interface.html",
- FutureWarning)
+ FutureWarning, stacklevel=2)
try:
from .common import importr, r, load_data
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 586d507b27493..2e13082ee5366 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -122,7 +122,7 @@
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
-the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
+the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
"""
@@ -344,7 +344,8 @@ def dataframe_from_int_dict(data, frame_template):
def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None,
freq=None, center=False):
import warnings
- warnings.warn("rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)", FutureWarning)
+ msg = "rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)"
+ warnings.warn(msg, FutureWarning, stacklevel=2)
return rolling_corr(df1, df2, window=window, min_periods=min_periods,
freq=freq, center=center,
pairwise=True)
@@ -399,7 +400,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
if center:
result = _center_window(result, window, axis)
-
+
return return_hook(result)
@@ -998,7 +999,8 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
@Appender(_doc_template)
def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None):
import warnings
- warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning)
+ msg = "expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)"
+ warnings.warn(msg, FutureWarning, stacklevel=2)
return expanding_corr(df1, df2, min_periods=min_periods,
freq=freq, pairwise=True)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index d847638ff105e..f687ecbef35cb 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1590,7 +1590,7 @@ def test_nan_handling(self):
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a","b","c","a"]))
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 19fd45cdf6ad2..3bd76dfb9da61 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -382,32 +382,32 @@ def test_bool_ops_warn_on_arithmetic(self):
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
- with tm.assert_produces_warning():
+ with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 7877ee3c5a6cc..6c92bb7095d8b 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2734,7 +2734,7 @@ def test_round_dataframe(self):
non_int_round_dict = {'col1': 1, 'col2': 0.5}
if sys.version < LooseVersion('2.7'):
# np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
- with self.assert_produces_warning(DeprecationWarning):
+ with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
df.round(non_int_round_dict)
else:
with self.assertRaises(TypeError):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 659a5925be6f1..aea165b907c05 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1440,7 +1440,8 @@ def test_getitem_setitem_float_labels(self):
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
- with tm.assert_produces_warning(FutureWarning):
+ # stacklevel=False -> needed stacklevel depends on index type
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.iloc[1.0:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
diff --git a/pandas/tests/test_graphics_others.py b/pandas/tests/test_graphics_others.py
index f461a8ab624dc..641180c8010c0 100644
--- a/pandas/tests/test_graphics_others.py
+++ b/pandas/tests/test_graphics_others.py
@@ -677,7 +677,7 @@ def test_grouped_box_return_type(self):
expected_keys=['height', 'weight', 'category'])
# now for groupby
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.groupby('gender').boxplot()
self._check_box_return_type(result, 'dict', expected_keys=['Male', 'Female'])
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 30a5716831087..9b2c1bf1a09ee 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1884,7 +1884,7 @@ def test_contains(self):
self.assertFalse(0 in ci)
self.assertFalse(1 in ci)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef') + [np.nan])
self.assertFalse(np.nan in ci)
@@ -2101,7 +2101,7 @@ def test_equals(self):
# tests
# make sure that we are testing for category inclusion properly
self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b']).equals(list('aabca')))
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b',np.nan]).equals(list('aabca')))
self.assertFalse(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b']).equals(list('aabca')))
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 6a9d4096ad4b3..c48807365913c 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -4180,11 +4180,16 @@ def test_slice_indexer(self):
def check_iloc_compat(s):
# invalid type for iloc (but works with a warning)
- with self.assert_produces_warning(FutureWarning):
+ # check_stacklevel=False -> impossible to get it right for all
+ # index types
+ with self.assert_produces_warning(
+ FutureWarning, check_stacklevel=False):
s.iloc[6.0:8]
- with self.assert_produces_warning(FutureWarning):
+ with self.assert_produces_warning(
+ FutureWarning, check_stacklevel=False):
s.iloc[6.0:8.0]
- with self.assert_produces_warning(FutureWarning):
+ with self.assert_produces_warning(
+ FutureWarning, check_stacklevel=False):
s.iloc[6:8.0]
def check_slicing_positional(index):
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 7886a63c6df46..31623d5c277c4 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1896,10 +1896,8 @@ def test_match_findall_flags(self):
pat = pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
+ with tm.assert_produces_warning(FutureWarning):
result = data.str.match(pat, flags=re.IGNORECASE)
- assert issubclass(w[-1].category, UserWarning)
self.assertEqual(result[0], ('dave', 'google', 'com'))
result = data.str.findall(pat, flags=re.IGNORECASE)
@@ -1908,10 +1906,8 @@ def test_match_findall_flags(self):
result = data.str.count(pat, flags=re.IGNORECASE)
self.assertEqual(result[0], 1)
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
+ with tm.assert_produces_warning(UserWarning):
result = data.str.contains(pat, flags=re.IGNORECASE)
- assert issubclass(w[-1].category, UserWarning)
self.assertEqual(result[0], True)
def test_encode_decode(self):
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 466ad3f220020..2b5443e6ff0d2 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -597,22 +597,22 @@ class TestDeprecatedTests(tm.TestCase):
def test_warning(self):
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertEquals(1, 1)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertNotEquals(1, 2)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assert_(True)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertAlmostEquals(1.0, 1.0000000001)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertNotAlmostEquals(1, 2)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_isinstance(Series([1, 2]), Series, msg='xxx')
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 041c747286c51..e0d13287fcf3b 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -592,7 +592,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
-@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
+@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
@@ -2624,7 +2624,7 @@ def plot_group(keys, values, ax):
"now, set return_type='axes'.\n To keep the previous "
"behavior and silence this warning, set "
"return_type='dict'.")
- warnings.warn(msg, FutureWarning)
+ warnings.warn(msg, FutureWarning, stacklevel=3)
return_type = 'dict'
if ax is None:
ax = _gca()
@@ -2972,7 +2972,7 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
- "size by tuple instead", FutureWarning)
+ "size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
diff --git a/pandas/tools/rplot.py b/pandas/tools/rplot.py
index 5996fceff8877..bc834689ffce8 100644
--- a/pandas/tools/rplot.py
+++ b/pandas/tools/rplot.py
@@ -17,7 +17,7 @@
"like seaborn for similar but more refined functionality. \n\n"
"See our docs http://pandas.pydata.org/pandas-docs/stable/visualization.html#rplot "
"for some example how to convert your existing code to these "
- "packages.", FutureWarning)
+ "packages.", FutureWarning, stacklevel=2)
class Scale:
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 912a0c3f88405..a6b289b76af11 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -398,7 +398,7 @@ def __add__(self, other):
raise TypeError("cannot add TimedeltaIndex and {typ}".format(typ=type(other)))
elif isinstance(other, Index):
warnings.warn("using '+' to provide set union with datetimelike Indexes is deprecated, "
- "use .union()",FutureWarning)
+ "use .union()",FutureWarning, stacklevel=2)
return self.union(other)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)):
return self._add_delta(other)
@@ -423,7 +423,7 @@ def __sub__(self, other):
return self._add_delta(-other)
elif isinstance(other, Index):
warnings.warn("using '-' to provide set differences with datetimelike Indexes is deprecated, "
- "use .difference()",FutureWarning)
+ "use .difference()",FutureWarning, stacklevel=2)
return self.difference(other)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)):
return self._add_delta(-other)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 9349e440eb9e9..e471e66616711 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -525,12 +525,12 @@ def get_offset(name):
if name in _rule_aliases:
new = _rule_aliases[name]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
- FutureWarning)
+ FutureWarning, stacklevel=2)
name = new
elif name.lower() in _rule_aliases:
new = _rule_aliases[name.lower()]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
- FutureWarning)
+ FutureWarning, stacklevel=2)
name = new
name = _lite_rule_alias.get(name, name)
@@ -540,7 +540,7 @@ def get_offset(name):
if name in _rule_aliases:
new = _rule_aliases[name]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
- FutureWarning)
+ FutureWarning, stacklevel=2)
name = new
name = _lite_rule_alias.get(name, name)
@@ -784,7 +784,7 @@ def _period_str_to_code(freqstr):
if freqstr in _rule_aliases:
new = _rule_aliases[freqstr]
warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new),
- FutureWarning)
+ FutureWarning, stacklevel=6)
freqstr = new
freqstr = _lite_rule_alias.get(freqstr, freqstr)
@@ -793,7 +793,7 @@ def _period_str_to_code(freqstr):
if lower in _rule_aliases:
new = _rule_aliases[lower]
warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new),
- FutureWarning)
+ FutureWarning, stacklevel=6)
freqstr = new
freqstr = _lite_rule_alias.get(lower, freqstr)
@@ -805,7 +805,7 @@ def _period_str_to_code(freqstr):
try:
alias = _period_alias_dict[freqstr]
warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, alias),
- FutureWarning)
+ FutureWarning, stacklevel=3)
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index b783459cbfe95..a642c12786940 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -589,7 +589,7 @@ def test_series(self):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
@@ -610,7 +610,7 @@ def test_legacy_offset_warnings(self):
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index b3ec88f4d0988..fada4a966c10b 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -3670,14 +3670,14 @@ def test_get_standard_freq():
assert fstr == get_standard_freq('1w')
assert fstr == get_standard_freq(('W', 1))
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = get_standard_freq('WeEk')
assert fstr == result
fstr = get_standard_freq('5Q')
assert fstr == get_standard_freq('5q')
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = get_standard_freq('5QuarTer')
assert fstr == result
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index c828d6d7effb6..4b5d5dfedeee7 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -2715,7 +2715,7 @@ def test_to_period_monthish(self):
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng = date_range('01-Jan-2012', periods=8, freq='EOM')
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 4544c3cdb8919..49806491ed1c6 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -10,12 +10,12 @@ def deprecate(name, alternative, alt_name=None):
def wrapper(*args, **kwargs):
warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
- FutureWarning)
+ FutureWarning, stacklevel=2)
return alternative(*args, **kwargs)
return wrapper
-def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):
+def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""Decorator to deprecate a keyword argument of a function
Parameters
@@ -79,7 +79,7 @@ def wrapper(*args, **kwargs):
msg = "the '%s' keyword is deprecated, " \
"use '%s' instead" % (old_arg_name, new_arg_name)
- warnings.warn(msg, FutureWarning)
+ warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
(old_arg_name, new_arg_name)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index aaa83da036c2f..a195455c116fb 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1918,7 +1918,8 @@ def handle_success(self, exc_type, exc_value, traceback):
@contextmanager
-def assert_produces_warning(expected_warning=Warning, filter_level="always", clear=None):
+def assert_produces_warning(expected_warning=Warning, filter_level="always",
+ clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
@@ -1966,6 +1967,16 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", cle
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
+
+ if check_stacklevel and issubclass(actual_warning.category,
+ (FutureWarning, DeprecationWarning)):
+ from inspect import getframeinfo, stack
+ caller = getframeinfo(stack()[2][0])
+ msg = ("Warning not set with correct stacklevel. File were warning"
+ " is raised: {0} != {1}. Warning message: {2}".format(
+ actual_warning.filename, caller.filename,
+ actual_warning.message))
+ assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
| Closes #9584
| https://api.github.com/repos/pandas-dev/pandas/pulls/10676 | 2015-07-26T16:29:27Z | 2015-09-04T09:33:57Z | 2015-09-04T09:33:56Z | 2015-09-04T09:33:57Z |
BUG: #10645 in using MultiIndex.__contains__ | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 322f431a37a79..a5bd0dbe95808 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -404,4 +404,5 @@ Bug Fixes
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
+- Bug in ``MultiIndex.__contains__`` throws an ``IndexError`` for large multiindices (:issue:`10645`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 1678e3b280ee5..9259ed922eb13 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -143,6 +143,11 @@ cdef class IndexEngine:
return self._get_loc_duplicates(val)
values = self._get_index_values()
loc = _bin_search(values, val) # .searchsorted(val, side='left')
+
+ # GH10675
+ if len(values) <= loc or 0 > loc:
+ raise KeyError(val)
+
if util.get_value_at(values, loc) != val:
raise KeyError(val)
return loc
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 81c6366b4cb41..9c0b488a53265 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1253,6 +1253,14 @@ def test_get_loc(self):
with tm.assertRaises(TypeError):
idx.get_loc('a', method='nearest')
+ def test_get_loc_keyerror(self):
+ # GH10645
+ mi = pd.MultiIndex.from_arrays([range(100), range(100)])
+ self.assertRaises(KeyError, lambda: mi.get_loc((1000001, 0)))
+
+ mi = pd.MultiIndex.from_arrays([range(1000000), range(1000000)])
+ self.assertRaises(KeyError, lambda: mi.get_loc((1000001, 0)))
+
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
| This PR fix a BUG #10645
@sinhrks would you review my PR?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10675 | 2015-07-26T14:26:58Z | 2015-09-10T11:59:39Z | null | 2015-10-12T02:45:53Z |
API: #10636, changing default of to_datetime to raise, deprecating coerce in favor of errors | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index a07991d69d48b..1b5a4586e59e7 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -197,18 +197,30 @@ or ``format``, use ``to_datetime`` if these are required.
Invalid Data
~~~~~~~~~~~~
-Pass ``coerce=True`` to convert invalid data to ``NaT`` (not a time):
+.. note::
+
+ In version 0.17.0, the default for ``to_datetime`` is now ``errors='raise'``, rather than ``errors='ignore'``. This means
+ that invalid parsing will raise rather that return the original input as in previous versions.
+
+Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time):
.. ipython:: python
+ :okexcept:
+
+ # this is the default, raise when unparseable
+ to_datetime(['2009-07-31', 'asd'], errors='raise')
- to_datetime(['2009-07-31', 'asd'])
+ # return the original input when unparseable
+ to_datetime(['2009-07-31', 'asd'], errors='ignore')
- to_datetime(['2009-07-31', 'asd'], coerce=True)
+ # return NaT for input when unparseable
+ to_datetime(['2009-07-31', 'asd'], errors='coerce')
Take care, ``to_datetime`` may not act as you expect on mixed data:
.. ipython:: python
+ :okexcept:
to_datetime([1, '1'])
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index aec9c37be4b4f..974c6c31535f9 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -65,10 +65,11 @@ Other enhancements
- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`)
- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`)
-- The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent" (:issue:`7599`)
+- The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent. (:issue:`7599`)
- Prior to v0.17.0, ``Timestamp`` and ``to_datetime`` may parse year-only datetime-string incorrectly using today's date, otherwise ``DatetimeIndex`` uses the beginning of the year.
- ``Timestamp`` and ``to_datetime`` may raise ``ValueError`` in some types of datetime-string which ``DatetimeIndex`` can parse, such as quarterly string.
+ Prior to v0.17.0, ``Timestamp`` and ``to_datetime`` may parse year-only datetime-string incorrectly using today's date, otherwise ``DatetimeIndex``
+ uses the beginning of the year. ``Timestamp`` and ``to_datetime`` may raise ``ValueError`` in some types of datetime-string which ``DatetimeIndex``
+ can parse, such as a quarterly string.
Previous Behavior
@@ -119,6 +120,45 @@ Backwards incompatible API changes
- Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`)
+.. _whatsnew_0170.api_breaking.to_datetime
+
+Changes to to_datetime and to_timedelta
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The default for ``pd.to_datetime`` error handling has changed to ``errors='raise'``. In prior versions it was ``errors='ignore'``.
+Furthermore, the ``coerce`` argument has been deprecated in favor of ``errors='coerce'``. This means that invalid parsing will raise rather that return the original
+input as in previous versions. (:issue:`10636`)
+
+Previous Behavior:
+
+ .. code-block:: python
+
+ In [2]: pd.to_datetime(['2009-07-31', 'asd'])
+ Out[2]: array(['2009-07-31', 'asd'], dtype=object)
+
+New Behavior:
+
+ .. ipython:: python
+ :okexcept:
+
+ pd.to_datetime(['2009-07-31', 'asd'])
+
+ Of course you can coerce this as well.
+
+ .. ipython:: python
+
+ to_datetime(['2009-07-31', 'asd'], errors='coerce')
+
+ To keep the previous behaviour, you can use `errors='ignore'`:
+
+ .. ipython:: python
+ :okexcept:
+
+ to_datetime(['2009-07-31', 'asd'], errors='ignore')
+
+``pd.to_timedelta`` gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword
+has been deprecated in favor of ``errors='coerce'``.
+
.. _whatsnew_0170.api_breaking.convert_objects:
Changes to convert_objects
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 873e6a79f741e..aaa341240f538 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1903,9 +1903,9 @@ def _possibly_convert_objects(values,
# Immediate return if coerce
if datetime:
- return pd.to_datetime(values, coerce=True, box=False)
+ return pd.to_datetime(values, errors='coerce', box=False)
elif timedelta:
- return pd.to_timedelta(values, coerce=True, box=False)
+ return pd.to_timedelta(values, errors='coerce', box=False)
elif numeric:
return lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
@@ -1958,7 +1958,7 @@ def _possibly_convert_platform(values):
return values
-def _possibly_cast_to_datetime(value, dtype, coerce=False):
+def _possibly_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
@@ -2002,9 +2002,9 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
elif np.prod(value.shape) and value.dtype != dtype:
try:
if is_datetime64:
- value = to_datetime(value, coerce=coerce).values
+ value = to_datetime(value, errors=errors).values
elif is_timedelta64:
- value = to_timedelta(value, coerce=coerce).values
+ value = to_timedelta(value, errors=errors).values
except (AttributeError, ValueError):
pass
@@ -2066,7 +2066,7 @@ def _possibly_infer_to_datetimelike(value, convert_dates=False):
def _try_datetime(v):
# safe coerce to datetime64
try:
- return tslib.array_to_datetime(v, raise_=True).reshape(shape)
+ return tslib.array_to_datetime(v, errors='raise').reshape(shape)
except:
return v
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 2c6a23e492ab2..6a278e0e44306 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -341,7 +341,6 @@ def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
- coerce = True
if not is_list_like(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
@@ -362,7 +361,7 @@ def _convert_to_array(self, values, name=None, other=None):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
- values = to_timedelta(values, coerce=coerce)
+ values = to_timedelta(values, errors='coerce')
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
@@ -381,7 +380,7 @@ def _convert_to_array(self, values, name=None, other=None):
"datetime/timedelta operations [{0}]".format(
', '.join([com.pprint_thing(v)
for v in values[mask]])))
- values = to_timedelta(os, coerce=coerce)
+ values = to_timedelta(os, errors='coerce')
elif inferred_type == 'floating':
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a6bb115ac0906..275c765c4cb92 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2057,6 +2057,7 @@ def converter(*date_cols):
utc=None,
box=False,
dayfirst=dayfirst,
+ errors='ignore',
infer_datetime_format=infer_datetime_format
)
except:
@@ -2064,7 +2065,7 @@ def converter(*date_cols):
lib.try_parse_dates(strs, dayfirst=dayfirst))
else:
try:
- result = tools.to_datetime(date_parser(*date_cols))
+ result = tools.to_datetime(date_parser(*date_cols), errors='ignore')
if isinstance(result, datetime.datetime):
raise Exception('scalar parser')
return result
@@ -2073,7 +2074,8 @@ def converter(*date_cols):
return tools.to_datetime(
lib.try_parse_dates(_concat_date_cols(date_cols),
parser=date_parser,
- dayfirst=dayfirst))
+ dayfirst=dayfirst),
+ errors='ignore')
except Exception:
return generic_parser(date_parser, *date_cols)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 6cc4b73ed7bbe..8eefe4ba98876 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -80,17 +80,17 @@ def _convert_params(sql, params):
def _handle_date_column(col, format=None):
if isinstance(format, dict):
- return to_datetime(col, **format)
+ return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
- return to_datetime(col, coerce=True, unit=format, utc=True)
+ return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
- return to_datetime(col, coerce=True, unit=format, utc=True)
+ return to_datetime(col, errors='coerce', unit=format, utc=True)
else:
- return to_datetime(col, coerce=True, format=format, utc=True)
+ return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 18dd13f9b896e..859c6d3250121 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -216,7 +216,7 @@ def _get_all_tables(self):
def _close_conn(self):
pass
-
+
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
@@ -1271,7 +1271,7 @@ def test_datetime_NaT(self):
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
- result['A'] = to_datetime(result['A'], coerce=True)
+ result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
@@ -1720,7 +1720,7 @@ class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
-class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
+class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index cc9ab977241f9..9345b86758c99 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -419,7 +419,7 @@ def test_read_write_reread_dta14(self):
for col in cols:
expected[col] = expected[col].convert_objects(datetime=True, numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
- expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
+ expected['date_td'] = pd.to_datetime(expected['date_td'], errors='coerce')
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
@@ -464,7 +464,7 @@ def test_timestamp_and_label(self):
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
-
+
with StataReader(path) as reader:
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 680456df104e4..d364206017c7e 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -68,7 +68,7 @@ def test_to_datetime1():
# unparseable
s = 'Month 1, 1999'
- assert to_datetime(s) == s
+ assert to_datetime(s, errors='ignore') == s
def test_normalize_date():
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index dd820394d40a0..bcfeeded3abc9 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -607,12 +607,22 @@ def testit(unit, transform):
# ms
testit('L',lambda x: 'ms')
+ def test_to_timedelta_invalid(self):
+
# these will error
self.assertRaises(ValueError, lambda : to_timedelta([1,2],unit='foo'))
self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda :to_timedelta(time(second=1)))
+ self.assertTrue(to_timedelta(time(second=1), errors='coerce') is pd.NaT)
+
+ self.assertRaises(ValueError, lambda : to_timedelta(['foo','bar']))
+ tm.assert_index_equal(TimedeltaIndex([pd.NaT,pd.NaT]),
+ to_timedelta(['foo','bar'], errors='coerce'))
+
+ tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
+ to_timedelta(['1 day','bar','1 min'], errors='coerce'))
def test_to_timedelta_via_apply(self):
# GH 5458
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 85aaf32e4dae2..26acbb2073ab8 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -851,7 +851,11 @@ def test_string_na_nat_conversion(self):
tm.assert_numpy_array_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
- result = to_datetime(malformed)
+
+ # GH 10636, default is now 'raise'
+ self.assertRaises(ValueError, lambda : to_datetime(malformed, errors='raise'))
+
+ result = to_datetime(malformed, errors='ignore')
tm.assert_numpy_array_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
@@ -920,9 +924,9 @@ def test_to_datetime_with_apply(self):
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise'))
- expected = pd.to_datetime(td, format='%b %y', coerce=True)
+ expected = pd.to_datetime(td, format='%b %y', errors='coerce')
- result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
+ result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
@@ -1002,7 +1006,7 @@ def test_to_datetime_types(self):
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
- to_datetime([1, '1']),
+ to_datetime([1, '1'], errors='ignore'),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
@@ -1048,7 +1052,7 @@ def test_to_datetime_dt64s(self):
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
- self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
+ self.assertIs(pd.to_datetime(dt, errors='coerce'), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
@@ -1070,12 +1074,11 @@ def test_to_datetime_array_of_dt64s(self):
ValueError,
pd.to_datetime,
dts_with_oob,
- coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
- pd.to_datetime(dts_with_oob, box=False, coerce=True),
+ pd.to_datetime(dts_with_oob, box=False, errors='coerce'),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
@@ -1086,11 +1089,11 @@ def test_to_datetime_array_of_dt64s(self):
)
)
- # With coerce=False and errors='ignore', out of bounds datetime64s
+ # With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
- pd.to_datetime(dts_with_oob, box=False, coerce=False),
+ pd.to_datetime(dts_with_oob, box=False, errors='ignore'),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
@@ -4188,11 +4191,11 @@ def test_to_datetime_format_YYYYMMDD(self):
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
- result = pd.to_datetime(s,format='%Y%m%d')
+ result = pd.to_datetime(s,format='%Y%m%d',errors='ignore')
expected = np.array([ datetime(2012,12,31), datetime(2014,12,31), datetime(9999,12,31) ], dtype=object)
self.assert_numpy_array_equal(result, expected)
- result = pd.to_datetime(s,format='%Y%m%d', coerce=True)
+ result = pd.to_datetime(s,format='%Y%m%d', errors='coerce')
expected = Series(['20121231','20141231','NaT'],dtype='M8[ns]')
assert_series_equal(result, expected)
@@ -4521,25 +4524,37 @@ def test_second(self):
class TestDaysInMonth(tm.TestCase):
- # tests for issue #10154
+ def test_coerce_deprecation(self):
- def test_day_not_in_month_coerce_true_NaT(self):
- self.assertTrue(isnull(to_datetime('2015-02-29', coerce=True)))
- self.assertTrue(isnull(to_datetime('2015-02-29', format="%Y-%m-%d", coerce=True)))
- self.assertTrue(isnull(to_datetime('2015-02-32', format="%Y-%m-%d", coerce=True)))
- self.assertTrue(isnull(to_datetime('2015-04-31', format="%Y-%m-%d", coerce=True)))
-
- def test_day_not_in_month_coerce_false_raise(self):
- self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', coerce=False)
- self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', format="%Y-%m-%d", coerce=False)
- self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='raise', format="%Y-%m-%d", coerce=False)
- self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='raise', format="%Y-%m-%d", coerce=False)
-
- def test_day_not_in_month_coerce_false_ignore(self):
- self.assertEqual(to_datetime('2015-02-29', errors='ignore', coerce=False), '2015-02-29')
- self.assertEqual(to_datetime('2015-02-29', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-02-29')
- self.assertEqual(to_datetime('2015-02-32', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-02-32')
- self.assertEqual(to_datetime('2015-04-31', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-04-31')
+ # deprecation of coerce
+ with tm.assert_produces_warning(FutureWarning):
+ to_datetime('2015-02-29', coerce=True)
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertRaises(ValueError, lambda : to_datetime('2015-02-29', coerce=False))
+
+ # multiple arguments
+ for e, c in zip(['raise','ignore','coerce'],[True,False]):
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertRaises(TypeError, lambda : to_datetime('2015-02-29', errors=e, coerce=c))
+
+ # tests for issue #10154
+ def test_day_not_in_month_coerce(self):
+ self.assertTrue(isnull(to_datetime('2015-02-29', errors='coerce')))
+ self.assertTrue(isnull(to_datetime('2015-02-29', format="%Y-%m-%d", errors='coerce')))
+ self.assertTrue(isnull(to_datetime('2015-02-32', format="%Y-%m-%d", errors='coerce')))
+ self.assertTrue(isnull(to_datetime('2015-04-31', format="%Y-%m-%d", errors='coerce')))
+
+ def test_day_not_in_month_raise(self):
+ self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise')
+ self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', format="%Y-%m-%d")
+ self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='raise', format="%Y-%m-%d")
+ self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='raise', format="%Y-%m-%d")
+
+ def test_day_not_in_month_ignore(self):
+ self.assertEqual(to_datetime('2015-02-29', errors='ignore'), '2015-02-29')
+ self.assertEqual(to_datetime('2015-02-29', errors='ignore', format="%Y-%m-%d"), '2015-02-29')
+ self.assertEqual(to_datetime('2015-02-32', errors='ignore', format="%Y-%m-%d"), '2015-02-32')
+ self.assertEqual(to_datetime('2015-04-31', errors='ignore', format="%Y-%m-%d"), '2015-04-31')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 397d3f7d2656f..85bae42e7a492 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -652,10 +652,10 @@ def test_number_looking_strings_not_into_datetime(self):
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
- self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
+ self.assert_numpy_array_equal(tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
- self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
+ self.assert_numpy_array_equal(tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
@@ -671,13 +671,12 @@ def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
- coerce=False,
- raise_=True,
+ errors='raise',
)
self.assertTrue(
np.array_equal(
tslib.array_to_datetime(
- np.array([invalid_date], dtype='object'), coerce=True
+ np.array([invalid_date], dtype='object'), errors='coerce',
),
np.array([tslib.iNaT], dtype='M8[ns]')
)
@@ -685,7 +684,7 @@ def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
self.assert_numpy_array_equal(
- tslib.array_to_datetime(arr, coerce=True),
+ tslib.array_to_datetime(arr, errors='coerce'),
np.array(
[
tslib.iNaT,
@@ -700,11 +699,11 @@ def test_coerce_of_invalid_datetimes(self):
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
- self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
+ self.assert_numpy_array_equal(tslib.array_to_datetime(arr,errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
self.assert_numpy_array_equal(
- tslib.array_to_datetime(arr, coerce=True),
+ tslib.array_to_datetime(arr, errors='coerce'),
np.array(
[
'2013-01-01T00:00:00.000000000-0000',
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 60005ef6f2d6f..886d6ff42ced6 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -9,8 +9,11 @@
from pandas.core.common import (ABCSeries, is_integer_dtype,
is_timedelta64_dtype, is_list_like,
isnull, _ensure_object)
+from pandas.util.decorators import deprecate_kwarg
-def to_timedelta(arg, unit='ns', box=True, coerce=False):
+@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
+ mapping={True: 'coerce', False: 'raise'})
+def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None):
"""
Convert argument to timedelta
@@ -19,9 +22,12 @@ def to_timedelta(arg, unit='ns', box=True, coerce=False):
arg : string, timedelta, array of strings (with possible NAs)
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an integer/float number
box : boolean, default True
- If True returns a Timedelta/TimedeltaIndex of the results
- if False returns a np.timedelta64 or ndarray of values of dtype timedelta64[ns]
- coerce : force errors to NaT (False by default)
+ - If True returns a Timedelta/TimedeltaIndex of the results
+ - if False returns a np.timedelta64 or ndarray of values of dtype timedelta64[ns]
+ errors : {'ignore', 'raise', 'coerce'}, default 'raise'
+ - If 'raise', then invalid parsing will raise an exception
+ - If 'coerce', then invalid parsing will be set as NaT
+ - If 'ignore', then invalid parsing will return the input
Returns
-------
@@ -40,7 +46,7 @@ def _convert_listlike(arg, box, unit):
elif is_integer_dtype(arg):
value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]', copy=False)
else:
- value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, coerce=coerce)
+ value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, errors=errors)
value = value.astype('timedelta64[ns]', copy=False)
if box:
@@ -58,7 +64,7 @@ def _convert_listlike(arg, box, unit):
return _convert_listlike(arg, box=box, unit=unit)
# ...so it must be a scalar value. Return scalar.
- return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, coerce=coerce)
+ return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, errors=errors)
_unit_map = {
'Y' : 'Y',
@@ -96,10 +102,10 @@ def _validate_timedelta_unit(arg):
return 'ns'
raise ValueError("invalid timedelta unit {0} provided".format(arg))
-def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, coerce=False):
+def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
""" convert strings to timedelta; coerce to Timedelta (if box), else np.timedelta64"""
- result = tslib.convert_to_timedelta(r,unit,coerce)
+ result = tslib.convert_to_timedelta(r,unit,errors)
if box:
result = tslib.Timedelta(result)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 6a1dd934d6bce..6f08448b47b1e 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -9,6 +9,7 @@
import pandas.core.common as com
from pandas.compat import StringIO, callable
import pandas.compat as compat
+from pandas.util.decorators import deprecate_kwarg
try:
import dateutil
@@ -171,8 +172,10 @@ def _guess_datetime_format_for_array(arr, **kwargs):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
-def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
- utc=None, box=True, format=None, exact=True, coerce=False,
+@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
+ mapping={True: 'coerce', False: 'raise'})
+def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
+ utc=None, box=True, format=None, exact=True, coerce=None,
unit='ns', infer_datetime_format=False):
"""
Convert argument to datetime.
@@ -180,8 +183,10 @@ def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
Parameters
----------
arg : string, datetime, array of strings (with possible NAs)
- errors : {'ignore', 'raise'}, default 'ignore'
- Errors are ignored by default (values left untouched).
+ errors : {'ignore', 'raise', 'coerce'}, default 'raise'
+ - If 'raise', then invalid parsing will raise an exception
+ - If 'coerce', then invalid parsing will be set as NaT
+ - If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10.
@@ -189,24 +194,22 @@ def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
+ - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
+ - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex, if False returns ndarray of values.
+ - If True returns a DatetimeIndex
+ - If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
- coerce : force errors to NaT (False by default)
- Timestamps outside the interval between Timestamp.min and Timestamp.max
- (approximately 1677-09-22 to 2262-04-11) will be also forced to NaT.
+ - If True, require an exact format match.
+ - If False, allow the format to match anywhere in the target string.
unit : unit of the arg (D,s,ms,us,ns) denote the unit in epoch
(e.g. a unix timestamp), which is an integer/float number.
infer_datetime_format : boolean, default False
@@ -256,16 +259,16 @@ def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
>>> pd.to_datetime('13000101', format='%Y%m%d')
datetime.datetime(1300, 1, 1, 0, 0)
- >>> pd.to_datetime('13000101', format='%Y%m%d', coerce=True)
+ >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
"""
return _to_datetime(arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst,
- utc=utc, box=box, format=format, exact=exact, coerce=coerce,
+ utc=utc, box=box, format=format, exact=exact,
unit=unit, infer_datetime_format=infer_datetime_format)
-def _to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
- utc=None, box=True, format=None, exact=True, coerce=False,
+def _to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
+ utc=None, box=True, format=None, exact=True,
unit='ns', freq=None, infer_datetime_format=False):
"""
Same as to_datetime, but accept freq for
@@ -322,7 +325,7 @@ def _convert_listlike(arg, box, format):
# shortcut formatting here
if format == '%Y%m%d':
try:
- result = _attempt_YYYYMMDD(arg, coerce=coerce)
+ result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
@@ -330,8 +333,7 @@ def _convert_listlike(arg, box, format):
if result is None:
try:
result = tslib.array_strptime(
- arg, format, exact=exact, coerce=coerce
- )
+ arg, format, exact=exact, errors=errors)
except (tslib.OutOfBoundsDatetime):
if errors == 'raise':
raise
@@ -346,10 +348,10 @@ def _convert_listlike(arg, box, format):
result = arg
if result is None and (format is None or infer_datetime_format):
- result = tslib.array_to_datetime(arg, raise_=errors=='raise',
+ result = tslib.array_to_datetime(arg, errors=errors,
utc=utc, dayfirst=dayfirst,
yearfirst=yearfirst, freq=freq,
- coerce=coerce, unit=unit,
+ unit=unit,
require_iso8601=require_iso8601)
if com.is_datetime64_dtype(result) and box:
@@ -376,14 +378,20 @@ def _convert_listlike(arg, box, format):
return _convert_listlike(np.array([ arg ]), box, format)[0]
-def _attempt_YYYYMMDD(arg, coerce):
+def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
- arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) """
+ arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan)
+
+ Parameters
+ ----------
+ arg : passed value
+ errors : 'raise','ignore','coerce'
+ """
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
- return tslib.array_to_datetime(lib.try_parse_year_month_day(carg/10000,carg/100 % 100, carg % 100), coerce=coerce)
+ return tslib.array_to_datetime(lib.try_parse_year_month_day(carg/10000,carg/100 % 100, carg % 100), errors=errors)
def calc_with_mask(carg,mask):
result = np.empty(carg.shape, dtype='M8[ns]')
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index da7cc05621775..bf134a0a6d996 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -1806,9 +1806,9 @@ cpdef object _get_rule_month(object source, object default='DEC'):
return source.split('-')[1]
-cpdef array_to_datetime(ndarray[object] values, raise_=False,
+cpdef array_to_datetime(ndarray[object] values, errors='raise',
dayfirst=False, yearfirst=False, freq=None,
- format=None, utc=None, coerce=False, unit=None,
+ format=None, utc=None, unit=None,
require_iso8601=False):
cdef:
Py_ssize_t i, n = len(values)
@@ -1817,10 +1817,14 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
ndarray[object] oresult
pandas_datetimestruct dts
bint utc_convert = bool(utc), seen_integer=0, seen_datetime=0
+ bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
_TSObject _ts
int64_t m = cast_from_unit(None,unit)
int out_local = 0, out_tzoffset = 0
+ # specify error conditions
+ assert is_raise or is_ignore or is_coerce
+
try:
result = np.empty(n, dtype='M8[ns]')
iresult = result.view('i8')
@@ -1837,7 +1841,7 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
try:
_check_dts_bounds(&_ts.dts)
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
@@ -1852,7 +1856,7 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
try:
_check_dts_bounds(&dts)
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
@@ -1862,7 +1866,7 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
_check_dts_bounds(&dts)
seen_datetime=1
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
@@ -1874,19 +1878,19 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
iresult[i] = _get_datetime64_nanos(val)
seen_datetime=1
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
# if we are coercing, dont' allow integers
- elif is_integer_object(val) and not coerce:
+ elif is_integer_object(val) and not is_coerce:
if val == iNaT:
iresult[i] = iNaT
else:
iresult[i] = val*m
seen_integer=1
- elif is_float_object(val) and not coerce:
+ elif is_float_object(val) and not is_coerce:
if val != val or val == iNaT:
iresult[i] = iNaT
else:
@@ -1911,10 +1915,10 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
except ValueError:
# if requiring iso8601 strings, skip trying other formats
if require_iso8601:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
- elif raise_:
+ elif is_raise:
raise ValueError("time data %r does match format specified" %
(val,))
else:
@@ -1924,34 +1928,34 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
py_dt = parse_datetime_string(val, dayfirst=dayfirst,
yearfirst=yearfirst, freq=freq)
except Exception:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
- raise TypeError
+ raise TypeError("invalid string coercion to datetime")
try:
_ts = convert_to_tsobject(py_dt, None, None)
iresult[i] = _ts.value
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
except:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
# don't allow mixed integers and datetime like
- # higher levels can catch and coerce to object, for
+ # higher levels can catch and is_coerce to object, for
# example
if seen_integer and seen_datetime:
raise ValueError("mixed datetimes and integers in passed array")
return result
except OutOfBoundsDatetime:
- if raise_:
+ if is_raise:
raise
oresult = np.empty(n, dtype=object)
@@ -1987,12 +1991,12 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
_pydatetime_to_dts(oresult[i], &dts)
_check_dts_bounds(&dts)
except Exception:
- if raise_:
+ if is_raise:
raise
return values
# oresult[i] = val
else:
- if raise_:
+ if is_raise:
raise
return values
@@ -2548,13 +2552,16 @@ cdef PyTypeObject* td_type = <PyTypeObject*> Timedelta
cdef inline bint is_timedelta(object o):
return Py_TYPE(o) == td_type # isinstance(o, Timedelta)
-def array_to_timedelta64(ndarray[object] values, unit='ns', coerce=False):
+def array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
""" convert an ndarray to an array of ints that are timedeltas
force conversion if coerce = True,
else will raise if cannot convert """
cdef:
Py_ssize_t i, n
ndarray[int64_t] iresult
+ bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
+
+ assert is_raise or is_ignore or is_coerce
n = values.shape[0]
result = np.empty(n, dtype='m8[ns]')
@@ -2564,15 +2571,18 @@ def array_to_timedelta64(ndarray[object] values, unit='ns', coerce=False):
# if so then we hit the fast path
try:
for i in range(n):
- result[i] = parse_timedelta_string(values[i], coerce)
+ result[i] = parse_timedelta_string(values[i], is_coerce)
except:
for i in range(n):
- result[i] = convert_to_timedelta64(values[i], unit, coerce)
+ result[i] = convert_to_timedelta64(values[i], unit, is_coerce)
return iresult
-def convert_to_timedelta(object ts, object unit='ns', coerce=False):
- return convert_to_timedelta64(ts, unit, coerce)
+def convert_to_timedelta(object ts, object unit='ns', errors='raise'):
+ cdef bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
+
+ assert is_raise or is_ignore or is_coerce
+ return convert_to_timedelta64(ts, unit, is_coerce)
cdef dict timedelta_abbrevs = { 'd' : 'd',
'days' : 'd',
@@ -2892,7 +2902,7 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
return ts.astype('timedelta64[ns]')
-def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coerce=False):
+def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='raise'):
"""
Parameters
----------
@@ -2911,6 +2921,9 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
int64_t us, ns
object val, group_key, ampm, found
dict found_key
+ bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
+
+ assert is_raise or is_ignore or is_coerce
global _TimeRE_cache, _regex_cache
with _cache_lock:
@@ -2983,13 +2996,13 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
if exact:
found = format_regex.match(val)
if not found:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise ValueError("time data %r does not match format %r (match)" %
(values[i], fmt))
if len(val) != found.end():
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise ValueError("unconverted data remains: %s" %
@@ -2999,7 +3012,7 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
else:
found = format_regex.search(val)
if not found:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise ValueError("time data %r does not match format %r (search)" %
@@ -3134,7 +3147,7 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
month = datetime_result.month
day = datetime_result.day
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
@@ -3154,7 +3167,7 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
try:
_check_dts_bounds(&dts)
except ValueError:
- if coerce:
+ if is_coerce:
iresult[i] = iNaT
continue
raise
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 9cd538511e946..4544c3cdb8919 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -43,7 +43,7 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
- >>> f(cols='should error', columns="can't pass do both")
+ >>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
@@ -78,6 +78,7 @@ def wrapper(*args, **kwargs):
new_arg_value = old_arg_value
msg = "the '%s' keyword is deprecated, " \
"use '%s' instead" % (old_arg_name, new_arg_name)
+
warnings.warn(msg, FutureWarning)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
@@ -287,4 +288,3 @@ def make_signature(func) :
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
-
| closes #10636
changes `to_timedelta` API as well to similar effect.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10674 | 2015-07-26T01:31:52Z | 2015-07-31T22:35:22Z | 2015-07-31T22:35:22Z | 2015-08-01T00:23:50Z |
read_sql/to_sql can accept database URI as con parameter | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index face3a1002bae..ff6759730da8f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -36,6 +36,8 @@ New features
Other enhancements
^^^^^^^^^^^^^^^^^^
+- `read_sql` and `to_sql` can accept database URI as con parameter (:issue:`10214`)
+
- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`)
- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index ef8360f0ff459..6cc4b73ed7bbe 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -289,7 +289,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
----------
table_name : string
Name of SQL table in database
- con : SQLAlchemy connectable
+ con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
@@ -328,6 +328,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
read_sql
"""
+
+ con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
@@ -362,7 +364,8 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string
SQL query to be executed
- con : SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
+ con : SQLAlchemy connectable(engine/connection) or database string URI
+ or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
@@ -420,7 +423,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string
SQL query to be executed or database table name.
- con : SQLAlchemy connectable(engine/connection) or DBAPI2 connection (fallback mode)
+ con : SQLAlchemy connectable(engine/connection) or database string URI
+ or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
@@ -504,7 +508,8 @@ def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
frame : DataFrame
name : string
Name of SQL table
- con : SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
+ con : SQLAlchemy connectable(engine/connection) or database string URI
+ or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
@@ -584,6 +589,22 @@ def has_table(table_name, con, flavor='sqlite', schema=None):
"MySQL will be further supported with SQLAlchemy connectables.")
+def _engine_builder(con):
+ """
+ Returns a SQLAlchemy engine from a URI (if con is a string)
+ else it just return con without modifying it
+ """
+ if isinstance(con, string_types):
+ try:
+ import sqlalchemy
+ con = sqlalchemy.create_engine(con)
+ return con
+
+ except ImportError:
+ _SQLALCHEMY_INSTALLED = False
+
+ return con
+
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
@@ -592,6 +613,7 @@ def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
+ con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
else:
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index d95babff2653b..18dd13f9b896e 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -918,6 +918,23 @@ def test_sqlalchemy_type_mapping(self):
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
+ def test_to_sql_read_sql_with_database_uri(self):
+
+ # Test read_sql and .to_sql method with a database URI (GH10654)
+ test_frame1 = self.test_frame1
+ #db_uri = 'sqlite:///:memory:' # raises sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near "iris": syntax error [SQL: 'iris']
+ with tm.ensure_clean() as name:
+ db_uri = 'sqlite:///' + name
+ table = 'iris'
+ test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
+ test_frame2 = sql.read_sql(table, db_uri)
+ test_frame3 = sql.read_sql_table(table, db_uri)
+ query = 'SELECT * FROM iris'
+ test_frame4 = sql.read_sql_query(query, db_uri)
+ tm.assert_frame_equal(test_frame1, test_frame2)
+ tm.assert_frame_equal(test_frame1, test_frame3)
+ tm.assert_frame_equal(test_frame1, test_frame4)
+
class _EngineToConnMixin(object):
"""
| It should fix https://github.com/pydata/pandas/issues/10654
| https://api.github.com/repos/pandas-dev/pandas/pulls/10666 | 2015-07-24T11:58:54Z | 2015-07-27T08:32:22Z | 2015-07-27T08:32:22Z | 2015-07-27T08:32:23Z |
Small doc update for Stata 118 support | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d8b6a43f684f4..65f887288cc6d 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3989,8 +3989,11 @@ missing values are represented as ``np.nan``. If ``True``, missing values are
represented using ``StataMissingValue`` objects, and columns containing missing
values will have ``object`` data type.
-:func:`~pandas.read_stata` and :class:`~pandas.io.stata.StataReader` supports .dta
-formats 104, 105, 108, 113-115 (Stata 10-12) and 117 (Stata 13+).
+.. note::
+
+ :func:`~pandas.read_stata` and
+ :class:`~pandas.io.stata.StataReader` support .dta formats 113-115
+ (Stata 10-12), 117 (Stata 13), and 118 (Stata 14).
.. note::
| https://api.github.com/repos/pandas-dev/pandas/pulls/10662 | 2015-07-23T14:13:10Z | 2015-07-24T23:09:30Z | 2015-07-24T23:09:30Z | 2015-11-12T23:44:01Z | |
BUG: #10565 Series.name lost in rolling_* funcions | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d59b6120163ff..3c89bb3b78c07 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -348,8 +348,7 @@ Bug Fixes
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
-
-
+- Bug in ``pd.rolling_*`` where ``Series.name`` would be lost in the output (:issue:`10565`)
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 41a768783b1cb..586d507b27493 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -426,7 +426,7 @@ def _process_data_structure(arg, kill_inf=True):
values = arg.values
elif isinstance(arg, Series):
values = arg.values
- return_hook = lambda v: Series(v, arg.index)
+ return_hook = lambda v: Series(v, arg.index, name=arg.name)
else:
return_hook = lambda v: v
values = arg
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index a0e4d5663fde9..1741676abf773 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -725,6 +725,14 @@ def test_ewma_halflife_arg(self):
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
+ def test_moment_preserve_series_name(self):
+ # GH 10565
+ s = Series(np.arange(100), name='foo')
+ s2 = mom.rolling_mean(s, 30)
+ s3 = mom.rolling_sum(s, 20)
+ self.assertEqual(s2.name, 'foo')
+ self.assertEqual(s3.name, 'foo')
+
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
| Addresses #10565
| https://api.github.com/repos/pandas-dev/pandas/pulls/10659 | 2015-07-23T00:21:58Z | 2015-07-23T11:16:39Z | 2015-07-23T11:16:39Z | 2015-08-17T23:42:53Z |
BUG: GH9428 promote string dtype to object dtype for empty DataFrame | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 8048b5fea9e66..47b331adea362 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -395,4 +395,5 @@ Bug Fixes
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
+- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a7ecb74a67485..d52a859086aae 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -322,6 +322,8 @@ def _init_dict(self, data, index, columns, dtype=None):
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
+ elif np.issubdtype(dtype, np.flexible):
+ v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 4e78e1549fb0e..2a7022da4fdc4 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3617,6 +3617,20 @@ def test_constructor_column_duplicates(self):
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
+ def test_constructor_empty_with_string_dtype(self):
+ # GH 9428
+ expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
+
+ df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
+ assert_frame_equal(df, expected)
+ df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
+ assert_frame_equal(df, expected)
+ df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
+ assert_frame_equal(df, expected)
+ df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
+ assert_frame_equal(df, expected)
+
+
def test_column_dups_operations(self):
def check(result, expected=None):
| Addresses [GH9428](https://github.com/pydata/pandas/issues/9428).
When constructing an empty DataFrame with a string dtype (e.g. `str`, `np.unicode_`, `U5`), the dtype is now promoted to the `object` dtype. This is now consistent with Series and avoids the confusing behaviour described in the original issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10658 | 2015-07-22T22:13:01Z | 2015-07-25T14:52:12Z | 2015-07-25T14:52:12Z | 2015-07-27T20:45:02Z |
BUG: #10652 google-api-python-client minimum version check | diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt
index 951c8798bef15..d2a3a8ea3cf03 100644
--- a/ci/requirements-2.7.txt
+++ b/ci/requirements-2.7.txt
@@ -22,4 +22,4 @@ html5lib=1.0b2
beautiful-soup=4.2.1
httplib2=0.8
python-gflags=2.0
-google-api-python-client=1.2
+google-api-python-client=1.2.0
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 06ad8827a5642..fb19ba0ee766e 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -1,4 +1,5 @@
from datetime import datetime
+import re
import json
import logging
import sys
@@ -25,8 +26,10 @@ def _check_google_client_version():
raise ImportError('Could not import pkg_resources (setuptools).')
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
+ _GOOGLE_API_CLIENT_VERSION = re.sub(
+ r'(\.0+)*$', '', _GOOGLE_API_CLIENT_VERSION)
- if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < '1.2.0':
+ if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < '1.2':
raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google "
"BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION)
@@ -121,7 +124,7 @@ def get_service(self, credentials):
try:
from apiclient.discovery import build
-
+
except ImportError:
raise ImportError('Could not import Google API Client.')
| Fixes [bug](https://github.com/pydata/pandas/issues/10652) with version check for google-api-python-client
| https://api.github.com/repos/pandas-dev/pandas/pulls/10656 | 2015-07-22T19:20:42Z | 2015-09-13T16:16:13Z | null | 2015-09-13T16:16:13Z |
BUG: to_dense now preserves dtype in SparseArray | diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index b765fdb8d67be..a5a911700577d 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -242,7 +242,7 @@ def values(self):
"""
Dense values
"""
- output = np.empty(len(self), dtype=np.float64)
+ output = np.empty(len(self), dtype=self.dtype)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
@@ -266,7 +266,8 @@ def to_dense(self, fill=None):
# fill the nans
if fill is None:
fill = self.fill_value
- if not np.isnan(fill):
+ # nans can only occur arrays of floating scalars
+ if np.issubdtype(self.dtype, np.inexact) and not np.isnan(fill):
values[np.isnan(values)] = fill
return values
diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py
index 4ffc0b98ebc71..6f48b52c868ca 100644
--- a/pandas/sparse/tests/test_array.py
+++ b/pandas/sparse/tests/test_array.py
@@ -27,6 +27,10 @@ class TestSparseArray(tm.TestCase):
def setUp(self):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
+
+ self.barr_data = np.array([False, False, True, True, False, False])
+ self.barr = SparseArray(self.barr_data, fill_value=False, dtype=bool)
+
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_get_item(self):
@@ -62,6 +66,10 @@ def test_constructor_copy(self):
not_copy.sp_values[:3] = 0
self.assertTrue((self.arr.sp_values[:3] == 0).all())
+ def test_constructor_match_dtype(self):
+ res = SparseArray(self.barr_data, dtype=bool)
+ self.assertEqual(res.dtype, bool)
+
def test_astype(self):
res = self.arr.astype('f8')
res.sp_values[:3] = 27
@@ -81,9 +89,19 @@ def _get_base(values):
assert(_get_base(arr2) is _get_base(self.arr))
def test_values_asarray(self):
- assert_almost_equal(self.arr.values, self.arr_data)
- assert_almost_equal(self.arr.to_dense(), self.arr_data)
- assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
+ for arr, arr_data in ((self.arr, self.arr_data),
+ (self.barr, self.barr_data)):
+ vals = arr.values
+ assert_almost_equal(vals, arr_data)
+ self.assertEqual(vals.dtype, arr_data.dtype)
+
+ dense = arr.to_dense()
+ assert_almost_equal(dense, arr_data)
+ self.assertEqual(dense.dtype, arr_data.dtype)
+
+ sp_vals = arr.sp_values
+ assert_almost_equal(sp_vals, np.asarray(arr))
+ self.assertEqual(sp_vals.dtype, arr_data.dtype)
def test_getitem(self):
def _checkit(i):
| Also fixes values and get_values.
fixes #10648
| https://api.github.com/repos/pandas-dev/pandas/pulls/10655 | 2015-07-22T17:23:09Z | 2015-11-10T01:21:15Z | null | 2022-10-13T00:16:43Z |
ENH: allow gzip de-compression for files specified by a url | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..f0dd787654e67 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -29,6 +29,7 @@ New features
- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`)
- Enable writing complex values to HDF stores when using table format (:issue:`10447`)
+- Enable reading gzip compressed files via URL, either by explicitly setting the compression parameter or by inferring from the presence of the HTTP Content-Encoding header in the response (:issue:`8685`)
.. _whatsnew_0170.enhancements.other:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 65cfdff1df14b..b7b663ba61a55 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -73,7 +73,7 @@ def _is_s3_url(url):
return False
-def maybe_read_encoded_stream(reader, encoding=None):
+def maybe_read_encoded_stream(reader, encoding=None, compression=None):
"""read an encoded stream from the reader and transform the bytes to
unicode if required based on the encoding
@@ -94,8 +94,14 @@ def maybe_read_encoded_stream(reader, encoding=None):
else:
errors = 'replace'
encoding = 'utf-8'
- reader = StringIO(reader.read().decode(encoding, errors))
+
+ if compression == 'gzip':
+ reader = BytesIO(reader.read())
+ else:
+ reader = StringIO(reader.read().decode(encoding, errors))
else:
+ if compression == 'gzip':
+ reader = BytesIO(reader.read())
encoding = None
return reader, encoding
@@ -118,7 +124,8 @@ def _expand_user(filepath_or_buffer):
return filepath_or_buffer
-def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
+def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
+ compression=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer
passthru otherwise.
@@ -130,12 +137,19 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
Returns
-------
- a filepath_or_buffer, the encoding
+ a filepath_or_buffer, the encoding, the compression
"""
if _is_url(filepath_or_buffer):
req = _urlopen(str(filepath_or_buffer))
- return maybe_read_encoded_stream(req, encoding)
+ if compression == 'infer':
+ content_encoding = req.headers.get('Content-Encoding', None)
+ if content_encoding == 'gzip':
+ compression = 'gzip'
+ # cat on the compression to the tuple returned by the function
+ to_return = list(maybe_read_encoded_stream(req, encoding, compression)) + \
+ [compression]
+ return tuple(to_return)
if _is_s3_url(filepath_or_buffer):
try:
@@ -156,10 +170,9 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
k.key = parsed_url.path
filepath_or_buffer = BytesIO(k.get_contents_as_string(
encoding=encoding))
- return filepath_or_buffer, None
-
+ return filepath_or_buffer, None, compression
- return _expand_user(filepath_or_buffer), None
+ return _expand_user(filepath_or_buffer), None, compression
def file_path_to_url(path):
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 2c1333326b701..81a916e058b3d 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -172,7 +172,7 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
result : Series or DataFrame
"""
- filepath_or_buffer, _ = get_filepath_or_buffer(path_or_buf)
+ filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 847a7c4f90216..f761ea6bf62e3 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -126,7 +126,7 @@ def read_msgpack(path_or_buf, iterator=False, **kwargs):
obj : type of object stored in file
"""
- path_or_buf, _ = get_filepath_or_buffer(path_or_buf)
+ path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 62d51fc510f97..73ffefd089647 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -26,6 +26,7 @@
import pandas.tslib as tslib
import pandas.parser as _parser
+
class ParserWarning(Warning):
pass
@@ -234,8 +235,10 @@ def _read(filepath_or_buffer, kwds):
if skipfooter is not None:
kwds['skip_footer'] = skipfooter
- filepath_or_buffer, _ = get_filepath_or_buffer(filepath_or_buffer,
- encoding)
+ filepath_or_buffer, _, compression = get_filepath_or_buffer(filepath_or_buffer,
+ encoding,
+ compression=kwds.get('compression', None))
+ kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
@@ -402,8 +405,9 @@ def parser_f(filepath_or_buffer,
delimiter = sep
if delim_whitespace and delimiter is not default_sep:
- raise ValueError("Specified a delimiter with both sep and"\
- " delim_whitespace=True; you can only specify one.")
+ raise ValueError("Specified a delimiter with both sep and"
+ " delim_whitespace=True; you can only"
+ " specify one.")
if engine is not None:
engine_specified = True
@@ -1711,7 +1715,7 @@ def _infer_columns(self):
num_original_columns = ncols
if not names:
if self.prefix:
- columns = [['%s%d' % (self.prefix,i) for i in range(ncols)]]
+ columns = [['%s%d' % (self.prefix, i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
@@ -2233,8 +2237,8 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None):
if index_col is None or index_col is False:
index = Index([])
else:
- index = [ np.empty(0, dtype=dtype.get(index_name, np.object))
- for index_name in index_names ]
+ index = [np.empty(0, dtype=dtype.get(index_name, np.object))
+ for index_name in index_names]
index = MultiIndex.from_arrays(index, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index db9362c5c821e..6e72c1c2f0cc0 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -932,7 +932,7 @@ def __init__(self, path_or_buf, convert_dates=True,
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
- path_or_buf, encoding = get_filepath_or_buffer(
+ path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
diff --git a/pandas/io/tests/data/salary.table.gz b/pandas/io/tests/data/salary.table.gz
new file mode 100644
index 0000000000000..629de9703d345
Binary files /dev/null and b/pandas/io/tests/data/salary.table.gz differ
diff --git a/pandas/io/tests/test_common.py b/pandas/io/tests/test_common.py
index fe163cc13c5da..34e7c94b64bcb 100644
--- a/pandas/io/tests/test_common.py
+++ b/pandas/io/tests/test_common.py
@@ -29,12 +29,12 @@ def test_expand_user_normal_path(self):
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
- filepath_or_buffer, _ = common.get_filepath_or_buffer(filename)
+ filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
self.assertNotEqual(filepath_or_buffer, filename)
self.assertNotIn('~', filepath_or_buffer)
self.assertEqual(os.path.expanduser(filename), filepath_or_buffer)
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
- filepath_or_buffer, _ = common.get_filepath_or_buffer(input_buffer)
+ filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer)
self.assertEqual(filepath_or_buffer, input_buffer)
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 0f0486e8ea596..b9e9ec3a391ec 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -3070,6 +3070,7 @@ def test_whitespace_lines(self):
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
+
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
@@ -4060,6 +4061,26 @@ def test_convert_sql_column_decimals(self):
assert_same_values_and_dtype(result, expected)
+class TestUrlGz(tm.TestCase):
+ def setUp(self):
+ dirpath = tm.get_data_path()
+ localtable = os.path.join(dirpath, 'salary.table')
+ self.local_table = read_table(localtable)
+
+ @tm.network
+ def test_url_gz(self):
+ url = ('https://raw.github.com/mdagost/pandas/url_gzip_fix/'
+ 'pandas/io/tests/data/salary.table.gz')
+ url_table = read_table(url, compression="gzip", engine="python")
+ tm.assert_frame_equal(url_table, self.local_table)
+
+ @tm.network
+ def test_url_gz_infer(self):
+ url = ('https://s3.amazonaws.com/pandas-url-test/salary.table.gz')
+ url_table = read_table(url, compression="infer", engine="python")
+ tm.assert_frame_equal(url_table, self.local_table)
+
+
class TestS3(tm.TestCase):
def setUp(self):
try:
| Address #8685 . The file in the test `test_url_gz` currently points to my fork to demonstrate that the tests pass, but should be changed to point to the real pydata master after merging.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10649 | 2015-07-21T22:01:53Z | 2015-07-24T18:43:40Z | 2015-07-24T18:43:40Z | 2015-07-24T19:27:54Z |
BUG: (GH10408, GH10412) in vectorised setting of timestamp columns | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 8e03fe02bcc97..447a42c43d24e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -393,5 +393,6 @@ Bug Fixes
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
-
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
+- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
+
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6a87f5a0b08e0..5953e783f6c4d 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2,7 +2,7 @@
import itertools
import re
import operator
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, date
from collections import defaultdict
import numpy as np
@@ -1839,7 +1839,7 @@ def _try_coerce_args(self, values, other):
if is_null_datelike_scalar(other):
other = tslib.iNaT
- elif isinstance(other, datetime):
+ elif isinstance(other, (datetime, np.datetime64, date)):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index aeb28524e5cc1..4e78e1549fb0e 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3,7 +3,7 @@
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
-from datetime import datetime, timedelta, time
+from datetime import datetime, timedelta, time, date
import sys
import operator
import re
@@ -4248,6 +4248,16 @@ def test_datetimelike_setitem_with_inference(self):
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
+ def test_setitem_datetime_coercion(self):
+ # GH 1048
+ df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')]*3})
+ df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
+ self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[0, 'c'])
+ self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[1, 'c'])
+ df.loc[2, 'c'] = date(2005, 5, 5)
+ self.assertEqual(pd.Timestamp('2005-05-05'), df.loc[2, 'c'])
+
+
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 6d2c87a187995..5a1eb719270c4 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# pylint: disable=W0102
+from datetime import datetime, date
+
import nose
import numpy as np
@@ -286,6 +288,26 @@ def test_repr(self):
pass
+class TestDatetimeBlock(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_try_coerce_arg(self):
+ block = create_block('datetime', [0])
+
+ # coerce None
+ none_coerced = block._try_coerce_args(block.values, None)[1]
+ self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
+
+ # coerce different types of date bojects
+ vals = (np.datetime64('2010-10-10'),
+ datetime(2010, 10, 10),
+ date(2010, 10, 10))
+ for val in vals:
+ coerced = block._try_coerce_args(block.values, val)[1]
+ self.assertEqual(np.int64, type(coerced))
+ self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
+
+
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
| closes #10408
closes #10412
Fix setting values with python datetime.date and numpy datetime64.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10644 | 2015-07-21T07:26:53Z | 2015-07-24T13:50:18Z | 2015-07-24T13:50:18Z | 2015-07-24T13:50:22Z |
CLN: Remove duplicate implementations of bind_method; typo in compat | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 20d71de28d2e2..2ac81f15a6d6c 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -14,7 +14,7 @@
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
- * binary_type: str in Python 2, bythes in Python 3
+ * binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
* add_metaclass(metaclass) - class decorator that recreates class with with the
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 49db94c3bfa86..796038f0b54c4 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -113,30 +113,6 @@ def __instancecheck__(cls, inst):
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
-def bind_method(cls, name, func):
- """Bind a method to class, python 2 and python 3 compatible.
-
- Parameters
- ----------
-
- cls : type
- class to receive bound method
- name : basestring
- name of method on class instance
- func : function
- function to be bound as method
-
-
- Returns
- -------
- None
- """
- # only python 2 has bound/unbound method issue
- if not compat.PY3:
- setattr(cls, name, types.MethodType(func, None, cls))
- else:
- setattr(cls, name, func)
-
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 089ca21cb0ef3..2c6a23e492ab2 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -15,7 +15,8 @@
import pandas.computation.expressions as expressions
from pandas.lib import isscalar
from pandas.tslib import iNaT
-from pandas.core.common import(bind_method, is_list_like, notnull, isnull,
+from pandas.compat import bind_method
+from pandas.core.common import(is_list_like, notnull, isnull,
_values_from_object, _maybe_match_name,
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype, is_object_dtype,
| #10566 removed one of the implementations of `bind_method` and fixed a typo
| https://api.github.com/repos/pandas-dev/pandas/pulls/10643 | 2015-07-21T06:17:08Z | 2015-07-21T11:37:51Z | 2015-07-21T11:37:51Z | 2015-07-21T11:37:54Z |
BUG: Fixed typo-related bug to resolve #9266 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 206c5e2e22711..8b8ea0c38f613 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -390,3 +390,5 @@ Bug Fixes
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
+
+- Bug in `_convert_to_ndarrays` which caused an `AttributeError` to be raised at times (:issue:`9266`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 62d51fc510f97..ad9823da5851b 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -995,7 +995,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
try:
values = lib.map_infer(values, conv_f)
except ValueError:
- mask = lib.ismember(values, na_values).view(np.uin8)
+ mask = lib.ismember(values, na_values).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
coerce_type = False
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 0f0486e8ea596..baee082f16abd 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2654,6 +2654,27 @@ def test_fwf_regression(self):
res = df.loc[:,c]
self.assertTrue(len(res))
+ def test_fwf_for_uint8(self):
+ data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
+1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
+ df = read_fwf(StringIO(data),
+ colspecs=[(0,17),(25,26),(33,37),(49,51),(58,62),(63,1000)],
+ names=['time','pri','pgn','dst','src','data'],
+ converters={
+ 'pgn':lambda x: int(x,16),
+ 'src':lambda x: int(x,16),
+ 'dst':lambda x: int(x,16),
+ 'data':lambda x: len(x.split(' '))})
+
+ expected = DataFrame([[1421302965.213420,3,61184,23,40,8],
+ [1421302964.226776,6,61442,None, 71,8]],
+ columns = ["time", "pri", "pgn", "dst", "src","data"])
+
+ # Hacky fix for dst column dtype
+ expected["dst"] = expected["dst"].astype(object)
+
+ tm.assert_frame_equal(df, expected)
+
def test_fwf_compression(self):
try:
import gzip
| Resubmitting pull request #10576 to resolve issue #9266 .
@jreback — This should be good to merge.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10642 | 2015-07-21T06:13:46Z | 2015-07-21T10:53:01Z | null | 2015-07-21T17:33:46Z |
ENH: allow duplicate column names if they are not merged upon | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..66edd6216e83e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -80,6 +80,8 @@ Other enhancements
- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`).
+- ``pd.merge`` will now allow duplicate column names if they are not merged upon (:issue:`10639`).
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index c7c578232cd0f..430828a3db31b 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -402,19 +402,14 @@ def _validate_specification(self):
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
- if not self.left.columns.is_unique:
- raise MergeError("Left data columns not unique: %s"
- % repr(self.left.columns))
-
- if not self.right.columns.is_unique:
- raise MergeError("Right data columns not unique: %s"
- % repr(self.right.columns))
-
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
+ if not common_cols.is_unique:
+ raise MergeError("Data columns not unique: %s"
+ % repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index d357182a60b1f..cd3581273b74d 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -843,7 +843,6 @@ def test_join_append_timedeltas(self):
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
- # #2649
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
@@ -853,7 +852,16 @@ def test_overlapping_columns_error_message(self):
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
+ expected = DataFrame({'key': [1, 2, 3],
+ 'v1': [4, 5, 6],
+ 'v2': [7, 8, 9],
+ 'v3': [4, 5, 6],
+ 'v4': [7, 8, 9]})
+ expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
+ assert_frame_equal(merge(df, df2), expected)
+ # #2649
+ df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
def _check_merge(x, y):
| Currently merge columns are not automatically inferred when there are duplicate column names. Instead a `MergeError` is raised. This pull request enables merging with duplicate column names as long as they are not merged upon.
```
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2.columns = ['key', 'bar', 'bar']
# worked before
merge(df, df2, on=['key'])
# now works as well since 'foo' and 'bar' don't interfere
merge(df, df2)
```
If the common cols are not unique a `MergeError` is still raised as in #2649
| https://api.github.com/repos/pandas-dev/pandas/pulls/10639 | 2015-07-20T13:20:42Z | 2015-07-28T10:08:42Z | null | 2015-07-28T10:08:42Z |
CLN: Make ufunc works for Index | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index c0b26ad0c03d7..f88e5c0a11f9f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -309,6 +309,8 @@ Other enhancements
- ``DataFrame.apply`` will return a Series of dicts if the passed function returns a dict and ``reduce=True`` (:issue:`8735`).
+- ``PeriodIndex`` now supports arithmetic with ``np.ndarray`` (:issue:`10638`)
+
- ``concat`` will now use existing Series names if provided (:issue:`10698`).
.. ipython:: python
@@ -333,6 +335,7 @@ Other enhancements
pd.concat([foo, bar, baz], 1)
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
@@ -1005,3 +1008,5 @@ Bug Fixes
- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
- Bug in ``.var()`` causing roundoff errors for highly similar values (:issue:`10242`)
- Bug in ``DataFrame.plot(subplots=True)`` with duplicated columns outputs incorrect result (:issue:`10962`)
+- Bug in ``Index`` arithmetic may result in incorrect class (:issue:`10638`)
+
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ef167489435b3..c64e181f4c721 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -273,7 +273,12 @@ def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
- return self._shallow_copy(result)
+ if is_bool_dtype(result):
+ return result
+
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+ return Index(result, **attrs)
@cache_readonly
def dtype(self):
@@ -2809,6 +2814,10 @@ def invalid_op(self, other=None):
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
+ def _maybe_update_attributes(self, attrs):
+ """ Update Index attributes (e.g. freq) depending on op """
+ return attrs
+
@classmethod
def _add_numeric_methods(cls):
""" add in numeric methods """
@@ -2849,7 +2858,9 @@ def _evaluate_numeric_binop(self, other):
if reversed:
values, other = other, values
- return self._shallow_copy(op(values, other))
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+ return Index(op(values, other), **attrs)
return _evaluate_numeric_binop
@@ -2861,8 +2872,9 @@ def _evaluate_numeric_unary(self):
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
-
- return self._shallow_copy(op(self.values))
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+ return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 9b0d6e9db1106..8a879a4de248b 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -613,7 +613,8 @@ def wrapper(left, right, name=name, na_op=na_op):
else:
# scalars
if hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex):
- lvalues = lvalues.values
+ lvalues = lvalues.values
+
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 6e7a72360ab67..36bc0755f9a6a 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -509,6 +509,56 @@ def test_equals_op(self):
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_numpy_array_equal(series_a == item, expected3)
+ def test_numpy_ufuncs(self):
+ # test ufuncs of numpy 1.9.2. see:
+ # http://docs.scipy.org/doc/numpy/reference/ufuncs.html
+
+ # some functions are skipped because it may return different result
+ # for unicode input depending on numpy version
+
+ for name, idx in compat.iteritems(self.indices):
+ for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
+ np.log1p, np.sqrt, np.sin, np.cos,
+ np.tan, np.arcsin, np.arccos, np.arctan,
+ np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh,
+ np.arctanh, np.deg2rad, np.rad2deg]:
+ if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):
+ # raise TypeError or ValueError (PeriodIndex)
+ # PeriodIndex behavior should be changed in future version
+ with tm.assertRaises(Exception):
+ func(idx)
+ elif isinstance(idx, (Float64Index, Int64Index)):
+ # coerces to float (e.g. np.sin)
+ result = func(idx)
+ exp = Index(func(idx.values), name=idx.name)
+ self.assert_index_equal(result, exp)
+ self.assertIsInstance(result, pd.Float64Index)
+ else:
+ # raise AttributeError or TypeError
+ if len(idx) == 0:
+ continue
+ else:
+ with tm.assertRaises(Exception):
+ func(idx)
+
+ for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
+ if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):
+ # raise TypeError or ValueError (PeriodIndex)
+ with tm.assertRaises(Exception):
+ func(idx)
+ elif isinstance(idx, (Float64Index, Int64Index)):
+ # results in bool array
+ result = func(idx)
+ exp = func(idx.values)
+ self.assertIsInstance(result, np.ndarray)
+ tm.assertNotIsInstance(result, Index)
+ else:
+ if len(idx) == 0:
+ continue
+ else:
+ with tm.assertRaises(Exception):
+ func(idx)
+
class TestIndex(Base, tm.TestCase):
_holder = Index
@@ -2848,6 +2898,41 @@ def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
+ def test_ufunc_coercions(self):
+ idx = pd.Int64Index([1, 2, 3, 4, 5], name='x')
+
+ result = np.sqrt(idx)
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')
+ tm.assert_index_equal(result, exp)
+
+ result = np.divide(idx, 2.)
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
+ tm.assert_index_equal(result, exp)
+
+ # _evaluate_numeric_binop
+ result = idx + 2.
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index([3., 4., 5., 6., 7.], name='x')
+ tm.assert_index_equal(result, exp)
+
+ result = idx - 2.
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index([-1., 0., 1., 2., 3.], name='x')
+ tm.assert_index_equal(result, exp)
+
+ result = idx * 1.
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index([1., 2., 3., 4., 5.], name='x')
+ tm.assert_index_equal(result, exp)
+
+ result = idx / 2.
+ tm.assertIsInstance(result, Float64Index)
+ exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
+ tm.assert_index_equal(result, exp)
+
+
class DatetimeLike(Base):
def test_str(self):
@@ -3101,7 +3186,9 @@ def test_get_loc(self):
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
- with tm.assertRaisesRegexp(ValueError, 'different freq'):
+
+ msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
@@ -3119,7 +3206,8 @@ def test_get_indexer(self):
idx.get_indexer(target, 'nearest', tolerance='1 hour'),
[0, -1, 1])
- with self.assertRaisesRegexp(ValueError, 'different freq'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
+ with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(
@@ -3215,6 +3303,44 @@ def test_numeric_compat(self):
def test_pickle_compat_construction(self):
pass
+ def test_ufunc_coercions(self):
+ # normal ops are also tested in tseries/test_timedeltas.py
+ idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
+ freq='2H', name='x')
+
+ for result in [idx * 2, np.multiply(idx, 2)]:
+ tm.assertIsInstance(result, TimedeltaIndex)
+ exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
+ freq='4H', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, '4H')
+
+ for result in [idx / 2, np.divide(idx, 2)]:
+ tm.assertIsInstance(result, TimedeltaIndex)
+ exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
+ freq='H', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, 'H')
+
+ idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
+ freq='2H', name='x')
+ for result in [ - idx, np.negative(idx)]:
+ tm.assertIsInstance(result, TimedeltaIndex)
+ exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
+ freq='-2H', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, None)
+
+ idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
+ freq='H', name='x')
+ for result in [ abs(idx), np.absolute(idx)]:
+ tm.assertIsInstance(result, TimedeltaIndex)
+ exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
+ freq=None, name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, None)
+
+
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 4ba15d319dc62..966bd5c8d0ab5 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1077,15 +1077,6 @@ def _fast_union(self, other):
end=max(left_end, right_end),
freq=left.offset)
- def __array_finalize__(self, obj):
- if self.ndim == 0: # pragma: no cover
- return self.item()
-
- self.offset = getattr(obj, 'offset', None)
- self.tz = getattr(obj, 'tz', None)
- self.name = getattr(obj, 'name', None)
- self._reset_identity()
-
def __iter__(self):
"""
Return an iterator over the boxed values
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 832791fc6933c..888c50e86b7b2 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -19,7 +19,8 @@
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
- is_integer, is_float, is_object_dtype)
+ is_integer, is_float, is_object_dtype,
+ is_float_dtype)
from pandas import compat
from pandas.util.decorators import cache_readonly
@@ -307,6 +308,30 @@ def __contains__(self, key):
return False
return key.ordinal in self._engine
+ def __array_wrap__(self, result, context=None):
+ """
+ Gets called after a ufunc. Needs additional handling as
+ PeriodIndex stores internal data as int dtype
+
+ Replace this to __numpy_ufunc__ in future version
+ """
+ if isinstance(context, tuple) and len(context) > 0:
+ func = context[0]
+ if (func is np.add):
+ return self._add_delta(context[1][1])
+ elif (func is np.subtract):
+ return self._add_delta(-context[1][1])
+ elif isinstance(func, np.ufunc):
+ if 'M->M' not in func.types:
+ msg = "ufunc '{0}' not supported for the PeriodIndex"
+ # This should be TypeError, but TypeError cannot be raised
+ # from here because numpy catches.
+ raise ValueError(msg.format(func.__name__))
+
+ if com.is_bool_dtype(result):
+ return result
+ return PeriodIndex(result, freq=self.freq, name=self.name)
+
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
@@ -522,7 +547,18 @@ def _maybe_convert_timedelta(self, other):
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
- raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
+ elif isinstance(other, np.ndarray):
+ if com.is_integer_dtype(other):
+ return other
+ elif com.is_timedelta64_dtype(other):
+ offset = frequencies.to_offset(self.freq)
+ if isinstance(offset, offsets.Tick):
+ nanos = tslib._delta_to_nanoseconds(other)
+ offset_nanos = tslib._delta_to_nanoseconds(offset)
+ if (nanos % offset_nanos).all() == 0:
+ return nanos // offset_nanos
+ msg = "Input has different freq from PeriodIndex(freq={0})"
+ raise ValueError(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
@@ -775,14 +811,6 @@ def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs):
values[imask] = np.array([formatter(dt) for dt in values[imask]])
return values
- def __array_finalize__(self, obj):
- if not self.ndim: # pragma: no cover
- return self.item()
-
- self.freq = getattr(obj, 'freq', None)
- self.name = getattr(obj, 'name', None)
- self._reset_identity()
-
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index 984f2a1cec706..0f6355ec93554 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -278,6 +278,14 @@ def __setstate__(self, state):
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
+ def _maybe_update_attributes(self, attrs):
+ """ Update Index attributes (e.g. freq) depending on op """
+ freq = attrs.get('freq', None)
+ if freq is not None:
+ # no need to infer if freq is None
+ attrs['freq'] = 'infer'
+ return attrs
+
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
@@ -560,14 +568,6 @@ def _fast_union(self, other):
else:
return left
- def __array_finalize__(self, obj):
- if self.ndim == 0: # pragma: no cover
- return self.item()
-
- self.name = getattr(obj, 'name', None)
- self.freq = getattr(obj, 'freq', None)
- self._reset_identity()
-
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 4c9726bbcf80d..4a72b094917b5 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -1391,6 +1391,7 @@ def test_add_iadd(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
+ msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
@@ -1404,7 +1405,8 @@ def test_add_iadd(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
rng + o
# Tick
@@ -1422,7 +1424,8 @@ def test_add_iadd(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
@@ -1439,9 +1442,10 @@ def test_add_iadd(self):
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
@@ -1502,7 +1506,8 @@ def test_sub_isub(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
@@ -1515,7 +1520,8 @@ def test_sub_isub(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
rng - o
# Tick
@@ -1532,7 +1538,8 @@ def test_sub_isub(self):
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
@@ -1547,9 +1554,10 @@ def test_sub_isub(self):
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
+ with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
- with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 4b5d5dfedeee7..951bb803ef793 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -2321,6 +2321,17 @@ def test_shift_nat(self):
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
+ def test_shift_ndarray(self):
+ idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
+ result = idx.shift(np.array([1, 2, 3, 4]))
+ expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
+ self.assertTrue(result.equals(expected))
+
+ idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
+ result = idx.shift(np.array([1, -2, 3, -4]))
+ expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
+ self.assertTrue(result.equals(expected))
+
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
@@ -3337,6 +3348,53 @@ def test_pi_ops_nat(self):
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
+ def test_pi_ops_array(self):
+ idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
+ result = idx + np.array([1, 2, 3, 4])
+ exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
+ self.assert_index_equal(result, exp)
+
+ result = np.add(idx, np.array([4, -1, 1, 2]))
+ exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], freq='M', name='idx')
+ self.assert_index_equal(result, exp)
+
+ result = idx - np.array([1, 2, 3, 4])
+ exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
+ self.assert_index_equal(result, exp)
+
+ result = np.subtract(idx, np.array([3, 2, 3, -2]))
+ exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], freq='M', name='idx')
+ self.assert_index_equal(result, exp)
+
+ # incompatible freq
+ msg = "Input has different freq from PeriodIndex\(freq=M\)"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ idx + np.array([np.timedelta64(1, 'D')] * 4)
+
+ idx = PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', 'NaT',
+ '2011-01-01 12:00'], freq='H', name='idx')
+ result = idx + np.array([np.timedelta64(1, 'D')] * 4)
+ exp = PeriodIndex(['2011-01-02 09:00', '2011-01-02 10:00', 'NaT',
+ '2011-01-02 12:00'], freq='H', name='idx')
+ self.assert_index_equal(result, exp)
+
+ result = idx - np.array([np.timedelta64(1, 'h')] * 4)
+ exp = PeriodIndex(['2011-01-01 08:00', '2011-01-01 09:00', 'NaT',
+ '2011-01-01 11:00'], freq='H', name='idx')
+ self.assert_index_equal(result, exp)
+
+ msg = "Input has different freq from PeriodIndex\(freq=H\)"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ idx + np.array([np.timedelta64(1, 's')] * 4)
+
+ idx = PeriodIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', 'NaT',
+ '2011-01-01 12:00:00'], freq='S', name='idx')
+ result = idx + np.array([np.timedelta64(1, 'h'), np.timedelta64(30, 's'),
+ np.timedelta64(2, 'h'), np.timedelta64(15, 'm')])
+ exp = PeriodIndex(['2011-01-01 10:00:00', '2011-01-01 10:00:30', 'NaT',
+ '2011-01-01 12:15:00'], freq='S', name='idx')
+ self.assert_index_equal(result, exp)
+
class TestPeriodRepresentation(tm.TestCase):
"""
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index a2a8f1484f70e..d3d09356648b0 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -960,7 +960,7 @@ def test_total_seconds(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s')
expt = [1*86400+10*3600+11*60+12+100123456./1e9,1*86400+10*3600+11*60+13+100123456./1e9]
assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
-
+
# test Series
s = Series(rng)
s_expt = Series(expt,index=[0,1])
@@ -970,7 +970,7 @@ def test_total_seconds(self):
s[1] = np.nan
s_expt = Series([1*86400+10*3600+11*60+12+100123456./1e9,np.nan],index=[0,1])
tm.assert_series_equal(s.dt.total_seconds(),s_expt)
-
+
# with both nat
s = Series([np.nan,np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),Series([np.nan,np.nan],index=[0,1]))
@@ -980,7 +980,7 @@ def test_total_seconds_scalar(self):
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1*86400+10*3600+11*60+12+100123456./1e9
assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
-
+
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
@@ -1513,6 +1513,44 @@ def test_slice_with_zero_step_raises(self):
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
+ def test_tdi_ops_attributes(self):
+ rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
+
+ result = rng + 1
+ exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, '2D')
+
+ result = rng -2
+ exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, '2D')
+
+ result = rng * 2
+ exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, '4D')
+
+ result = rng / 2
+ exp = timedelta_range('1 days', periods=5, freq='D', name='x')
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.freq, 'D')
+
+ result = - rng
+ exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
+ tm.assert_index_equal(result, exp)
+ # tdi doesn't infer negative freq
+ self.assertEqual(result.freq, None)
+
+ rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
+
+ result = abs(rng)
+ exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
+ '2 days'], name='x')
+ tm.assert_index_equal(result, exp)
+ # tdi doesn't infer negative freq
+ self.assertEqual(result.freq, None)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 7741747103c55..def3764c1113c 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -1088,6 +1088,8 @@ cdef class _NaT(_Timestamp):
def _delta_to_nanoseconds(delta):
+ if isinstance(delta, np.ndarray):
+ return delta.astype('m8[ns]').astype('int64')
if hasattr(delta, 'nanos'):
return delta.nanos
if hasattr(delta, 'delta'):
| closes #9966, #9974 (PR)
I understand these are never used because `Index` is no longer the subclass of `np.array`. Correct?
- [x] Add tests for ~~all~~ almost ufuncs
- http://docs.scipy.org/doc/numpy/reference/ufuncs.html
- ~~CategoricalIndex~~: Needs to be done separately to fix `Categorical`, because number of categories can be changed.
```
np.sin(pd.Categorical([1, 2, 3]))
array([ 0.84147098, 0.90929743, 0.14112001])
```
- [x] MultiIndex: Raise `TypeError` or `AttributeError`, as ufuncs are performed to array of tuples.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10638 | 2015-07-20T13:16:06Z | 2015-09-06T16:31:07Z | 2015-09-06T16:31:07Z | 2015-09-06T17:31:30Z |
BUG: made behavior of operator equal for CategoricalIndex consistent,… | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d59b6120163ff..d311e9e8c7338 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -155,7 +155,7 @@ in the method call.
Changes to Index Comparisons
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Operator equal on Index should behavior similarly to Series (:issue:`9947`)
+Operator equal on Index should behavior similarly to Series (:issue:`9947`, :issue:`10637`)
Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise
a ``ValueError``. This is to be consistent with the behavior of ``Series``.
@@ -390,7 +390,6 @@ Bug Fixes
-- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 98e0214dbf073..f45170bf4f396 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -3260,8 +3260,12 @@ def _evaluate_compare(self, other):
elif isinstance(other, Index):
other = self._create_categorical(self, other.values, categories=self.categories, ordered=self.ordered)
+ if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)):
+ if len(self.values) != len(other):
+ raise ValueError("Lengths must match to compare")
+
if isinstance(other, ABCCategorical):
- if not (self.values.is_dtype_equal(other) and len(self.values) == len(other)):
+ if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisions must have the same categories and ordered attributes")
return getattr(self.values, op)(other)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 00f120289601f..0b592368e2a1c 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -396,6 +396,66 @@ def test_symmetric_diff(self):
with tm.assertRaisesRegexp(TypeError, msg):
result = first.sym_diff([1, 2, 3])
+ def test_equals_op(self):
+ # GH9947, GH10637
+ index_a = self.create_index()
+ if isinstance(index_a, PeriodIndex):
+ return
+
+ n = len(index_a)
+ index_b = index_a[0:-1]
+ index_c = index_a[0:-1].append(index_a[-2:-1])
+ index_d = index_a[0:1]
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == index_b
+ expected1 = np.array([True] * n)
+ expected2 = np.array([True] * (n - 1) + [False])
+ assert_numpy_array_equivalent(index_a == index_a, expected1)
+ assert_numpy_array_equivalent(index_a == index_c, expected2)
+
+ # test comparisons with numpy arrays
+ array_a = np.array(index_a)
+ array_b = np.array(index_a[0:-1])
+ array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
+ array_d = np.array(index_a[0:1])
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == array_b
+ assert_numpy_array_equivalent(index_a == array_a, expected1)
+ assert_numpy_array_equivalent(index_a == array_c, expected2)
+
+ # test comparisons with Series
+ series_a = Series(array_a)
+ series_b = Series(array_b)
+ series_c = Series(array_c)
+ series_d = Series(array_d)
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == series_b
+ assert_numpy_array_equivalent(index_a == series_a, expected1)
+ assert_numpy_array_equivalent(index_a == series_c, expected2)
+
+ # cases where length is 1 for one of them
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == index_d
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == series_d
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ index_a == array_d
+ with tm.assertRaisesRegexp(ValueError, "Series lengths must match"):
+ series_a == series_d
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ series_a == array_d
+
+ # comparing with a scalar should broadcast; note that we are excluding
+ # MultiIndex because in this case each item in the index is a tuple of
+ # length 2, and therefore is considered an array of length 2 in the
+ # comparison instead of a scalar
+ if not isinstance(index_a, MultiIndex):
+ expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
+ # assuming the 2nd to last item is unique in the data
+ item = index_a[-2]
+ assert_numpy_array_equivalent(index_a == item, expected3)
+ assert_numpy_array_equivalent(series_a == item, expected3)
+
class TestIndex(Base, tm.TestCase):
_holder = Index
@@ -1548,54 +1608,7 @@ def test_groupby(self):
exp = {1: [0, 1], 2: [2, 3, 4]}
tm.assert_dict_equal(groups, exp)
- def test_equals_op(self):
- # GH9947
- index_a = Index(['foo', 'bar', 'baz'])
- index_b = Index(['foo', 'bar', 'baz', 'qux'])
- index_c = Index(['foo', 'bar', 'qux'])
- index_d = Index(['foo'])
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == index_b
- assert_numpy_array_equivalent(index_a == index_a, np.array([True, True, True]))
- assert_numpy_array_equivalent(index_a == index_c, np.array([True, True, False]))
-
- # test comparisons with numpy arrays
- array_a = np.array(['foo', 'bar', 'baz'])
- array_b = np.array(['foo', 'bar', 'baz', 'qux'])
- array_c = np.array(['foo', 'bar', 'qux'])
- array_d = np.array(['foo'])
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == array_b
- assert_numpy_array_equivalent(index_a == array_a, np.array([True, True, True]))
- assert_numpy_array_equivalent(index_a == array_c, np.array([True, True, False]))
-
- # test comparisons with Series
- series_a = Series(['foo', 'bar', 'baz'])
- series_b = Series(['foo', 'bar', 'baz', 'qux'])
- series_c = Series(['foo', 'bar', 'qux'])
- series_d = Series(['foo'])
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == series_b
- assert_numpy_array_equivalent(index_a == series_a, np.array([True, True, True]))
- assert_numpy_array_equivalent(index_a == series_c, np.array([True, True, False]))
-
- # cases where length is 1 for one of them
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == index_d
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == series_d
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- index_a == array_d
- with tm.assertRaisesRegexp(ValueError, "Series lengths must match"):
- series_a == series_d
- with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
- series_a == array_d
-
- # comparing with scalar should broadcast
- assert_numpy_array_equivalent(index_a == 'foo', np.array([True, False, False]))
- assert_numpy_array_equivalent(series_a == 'foo', np.array([True, False, False]))
- assert_numpy_array_equivalent(array_a == 'foo', np.array([True, False, False]))
-
+ def test_equals_op_multiindex(self):
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
@@ -1609,6 +1622,8 @@ def test_equals_op(self):
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == mi3
+
+ index_a = Index(['foo', 'bar', 'baz'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == index_a
assert_numpy_array_equivalent(index_a == mi3, np.array([False, False, False]))
@@ -1966,7 +1981,8 @@ def test_equals(self):
self.assertTrue((ci1 == ci1.values).all())
# invalid comparisons
- self.assertRaises(TypeError, lambda : ci1 == Index(['a','b','c']))
+ with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
+ ci1 == Index(['a','b','c'])
self.assertRaises(TypeError, lambda : ci1 == ci2)
self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, ordered=False))
self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, categories=list('abc')))
@@ -2082,7 +2098,7 @@ def setUp(self):
self.setup_indices()
def create_index(self):
- return Float64Index(np.arange(5,dtype='float64'))
+ return Float64Index(np.arange(5, dtype='float64'))
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
@@ -2253,7 +2269,7 @@ def setUp(self):
self.setup_indices()
def create_index(self):
- return Int64Index(np.arange(5,dtype='int64'))
+ return Int64Index(np.arange(5, dtype='int64'))
def test_too_many_names(self):
def testit():
@@ -2743,7 +2759,7 @@ def setUp(self):
self.setup_indices()
def create_index(self):
- return date_range('20130101',periods=5)
+ return date_range('20130101', periods=5)
def test_pickle_compat_construction(self):
pass
@@ -2936,7 +2952,7 @@ def setUp(self):
self.setup_indices()
def create_index(self):
- return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
+ return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
| … improved unit tests
this is a follow-up to https://github.com/pydata/pandas/pull/9947
@jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/10637 | 2015-07-20T13:09:31Z | 2015-07-24T13:44:14Z | 2015-07-24T13:44:14Z | 2015-07-24T13:46:23Z |
Remove Categorical.name | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..75314debe4c63 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -270,6 +270,7 @@ Other API Changes
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
+- ``Categorical.name`` was removed to make `Categorical` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`)
=========================== ==============================================================
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 264e7aea3fa0a..1d1f0d7da80e4 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -147,9 +147,6 @@ class Categorical(PandasObject):
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will not be ordered.
- name : str, optional
- Name for the Categorical variable. If name is None, will attempt
- to infer from values.
Attributes
----------
@@ -159,8 +156,6 @@ class Categorical(PandasObject):
The codes (integer positions, which point to the categories) of this categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
- name : string
- The name of this Categorical.
Raises
------
@@ -205,7 +200,6 @@ class Categorical(PandasObject):
# For comparisons, so that numpy uses our implementation if the compare ops, which raise
__array_priority__ = 1000
_typ = 'categorical'
- name = None
def __init__(self, values, categories=None, ordered=False, name=None, fastpath=False,
levels=None):
@@ -213,23 +207,24 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
- self.name = name
self.categories = categories
self._ordered = ordered
return
- if name is None:
- name = getattr(values, 'name', None)
+ if not name is None:
+ msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
+ "categorical instead (e.g. 'Series(cat, name=\"something\")'"
+ warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
if not levels is None:
warn("Creating a 'Categorical' with 'levels' is deprecated, use 'categories' instead",
- FutureWarning)
+ FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
raise ValueError("Cannot pass in both 'categories' and (deprecated) 'levels', "
- "use only 'categories'")
+ "use only 'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
@@ -293,21 +288,20 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
# TODO: check for old style usage. These warnings should be removes after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
warn("Values and categories have different dtypes. Did you mean to use\n"
- "'Categorical.from_codes(codes, categories)'?", RuntimeWarning)
+ "'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
if len(values) and is_integer_dtype(values) and (codes == -1).all():
warn("None of the categories were found in values. Did you mean to use\n"
- "'Categorical.from_codes(codes, categories)'?", RuntimeWarning)
+ "'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self.categories = categories
- self.name = name
self._codes = _coerce_indexer_dtype(codes, categories)
def copy(self):
""" Copy constructor. """
return Categorical(values=self._codes.copy(),categories=self.categories,
- name=self.name, ordered=self.ordered, fastpath=True)
+ ordered=self.ordered, fastpath=True)
def astype(self, dtype):
""" coerce this type to another dtype """
@@ -373,9 +367,12 @@ def from_codes(cls, codes, categories, ordered=False, name=None):
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will be unordered.
- name : str, optional
- Name for the Categorical variable.
"""
+ if not name is None:
+ msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
+ "categorical instead (e.g. 'Series(cat, name=\"something\")'"
+ warn(msg, UserWarning, stacklevel=2)
+
try:
codes = np.asarray(codes, np.int64)
except:
@@ -386,7 +383,7 @@ def from_codes(cls, codes, categories, ordered=False, name=None):
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
- return Categorical(codes, categories=categories, ordered=ordered, name=name, fastpath=True)
+ return Categorical(codes, categories=categories, ordered=ordered, fastpath=True)
_codes = None
@@ -416,8 +413,7 @@ def _get_labels(self):
Deprecated, use .codes!
"""
- import warnings
- warnings.warn("'labels' is deprecated. Use 'codes' instead", FutureWarning)
+ warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=3)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
@@ -464,12 +460,12 @@ def _get_categories(self):
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
- warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning)
+ warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
- warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning)
+ warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
@@ -479,7 +475,8 @@ def _get_levels(self):
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
- warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning)
+ warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning,
+ stacklevel=3)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
@@ -1140,7 +1137,7 @@ def order(self, inplace=False, ascending=True, na_position='last'):
return
else:
return Categorical(values=codes,categories=self.categories, ordered=self.ordered,
- name=self.name, fastpath=True)
+ fastpath=True)
def sort(self, inplace=True, ascending=True, na_position='last'):
@@ -1266,7 +1263,7 @@ def fillna(self, value=None, method=None, limit=None):
values[mask] = self.categories.get_loc(value)
return Categorical(values, categories=self.categories, ordered=self.ordered,
- name=self.name, fastpath=True)
+ fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
@@ -1280,7 +1277,7 @@ def take_nd(self, indexer, allow_fill=True, fill_value=None):
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = Categorical(codes, categories=self.categories, ordered=self.ordered,
- name=self.name, fastpath=True)
+ fastpath=True)
return result
take = take_nd
@@ -1300,7 +1297,7 @@ def _slice(self, slicer):
_codes = self._codes[slicer]
return Categorical(values=_codes,categories=self.categories, ordered=self.ordered,
- name=self.name, fastpath=True)
+ fastpath=True)
def __len__(self):
"""The length of this Categorical."""
@@ -1313,9 +1310,8 @@ def __iter__(self):
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default footer) """
num = max_vals // 2
- head = self[:num]._get_repr(length=False, name=False, footer=False)
+ head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False,
- name=False,
footer=False)
result = '%s, ..., %s' % (head[:-1], tail[1:])
@@ -1369,14 +1365,11 @@ def _repr_categories_info(self):
def _repr_footer(self):
- namestr = "Name: %s, " % self.name if self.name is not None else ""
- return u('%sLength: %d\n%s') % (namestr,
- len(self), self._repr_categories_info())
+ return u('Length: %d\n%s') % (len(self), self._repr_categories_info())
- def _get_repr(self, name=False, length=True, na_rep='NaN', footer=True):
+ def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.core import format as fmt
formatter = fmt.CategoricalFormatter(self,
- name=name,
length=length,
na_rep=na_rep,
footer=footer)
@@ -1389,11 +1382,9 @@ def __unicode__(self):
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
- result = self._get_repr(length=len(self) > _maxlen,
- name=True)
+ result = self._get_repr(length=len(self) > _maxlen)
else:
- result = '[], %s' % self._get_repr(name=True,
- length=False,
+ result = '[], %s' % self._get_repr(length=False,
footer=True,
).replace("\n",", ")
@@ -1562,8 +1553,7 @@ def mode(self):
import pandas.hashtable as htable
good = self._codes != -1
result = Categorical(sorted(htable.mode_int64(_ensure_int64(self._codes[good]))),
- categories=self.categories,ordered=self.ordered, name=self.name,
- fastpath=True)
+ categories=self.categories,ordered=self.ordered, fastpath=True)
return result
def unique(self):
@@ -1586,8 +1576,6 @@ def equals(self, other):
"""
Returns True if categorical arrays are equal.
- The name of the `Categorical` is not compared!
-
Parameters
----------
other : `Categorical`
@@ -1596,7 +1584,6 @@ def equals(self, other):
-------
are_equal : boolean
"""
- # TODO: should this also test if name is equal?
return self.is_dtype_equal(other) and np.array_equal(self._codes, other._codes)
def is_dtype_equal(self, other):
@@ -1647,7 +1634,7 @@ def repeat(self, repeats):
"""
codes = self._codes.repeat(repeats)
return Categorical(values=codes, categories=self.categories,
- ordered=self.ordered, name=self.name, fastpath=True)
+ ordered=self.ordered, fastpath=True)
##### The Series.cat accessor #####
@@ -1696,7 +1683,6 @@ def _delegate_method(self, name, *args, **kwargs):
if not res is None:
return Series(res, index=self.index)
-# TODO: remove levels after the deprecation period
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories", "ordered"],
typ='property')
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 4f0e57130006b..6a05f819908af 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -68,10 +68,9 @@
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
- na_rep='NaN', name=False, footer=True):
+ na_rep='NaN', footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
- self.name = name
self.na_rep = na_rep
self.length = length
self.footer = footer
@@ -79,12 +78,6 @@ def __init__(self, categorical, buf=None, length=True,
def _get_footer(self):
footer = ''
- if self.name:
- name = com.pprint_thing(self.categorical.name,
- escape_chars=('\t', '\r', '\n'))
- footer += ('Name: %s' % name if self.categorical.name is not None
- else '')
-
if self.length:
if footer:
footer += ', '
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index df788f806eda6..8e1a18006bbaf 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1960,8 +1960,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
self._group_index = CategoricalIndex(Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
- if self.name is None:
- self.name = self.grouper.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 98e0214dbf073..442f188267a58 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -4414,7 +4414,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None):
levels = [c.categories for c in cats]
labels = [c.codes for c in cats]
if names is None:
- names = [c.name for c in cats]
+ names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, labels=labels,
sortorder=sortorder, names=names,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 062a32413286f..506aa1a6eb51e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -189,8 +189,6 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
elif isinstance(data, Categorical):
if dtype is not None:
raise ValueError("cannot specify a dtype with a Categorical")
- if name is None:
- name = data.name
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 5f3ff794b4900..fdd20af6ab6ce 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -492,7 +492,7 @@ def test_print(self):
def test_big_print(self):
factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]",
- "Name: cat, Length: 600",
+ "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
@@ -501,15 +501,11 @@ def test_big_print(self):
self.assertEqual(actual, expected)
def test_empty_print(self):
- factor = Categorical([], ["a","b","c"], name="cat")
- expected = ("[], Name: cat, Categories (3, object): [a, b, c]")
- # hack because array_repr changed in numpy > 1.6.x
- actual = repr(factor)
- self.assertEqual(actual, expected)
-
factor = Categorical([], ["a","b","c"])
expected = ("[], Categories (3, object): [a, b, c]")
+ # hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
+ self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a","b","c"], ordered=True)
@@ -523,9 +519,9 @@ def test_empty_print(self):
def test_print_none_width(self):
# GH10087
- a = pd.Series(pd.Categorical([1,2,3,4], name="a"))
+ a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
- "Name: a, dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
+ "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
@@ -1170,6 +1166,13 @@ def test_deprecated_levels(self):
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
+ def test_removed_names_produces_warning(self):
+ with tm.assert_produces_warning(UserWarning):
+ Categorical([0,1], name="a")
+
+ with tm.assert_produces_warning(UserWarning):
+ Categorical.from_codes([1,2], ["a","b","c"], name="a")
+
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
@@ -1673,23 +1676,23 @@ def test_describe(self):
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
- a = pd.Series(pd.Categorical([1,2,3,4], name="a"))
+ a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
- "Name: a, dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
+ "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
- a = pd.Series(pd.Categorical(["a","b"] *25, name="a"))
+ a = pd.Series(pd.Categorical(["a","b"] *25))
exp = u("0 a\n1 b\n" + " ..\n" +
"48 a\n49 b\n" +
- "Name: a, dtype: category\nCategories (2, object): [a, b]")
+ "dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
- a = pd.Series(pd.Categorical(["a","b"], name="a", categories=levs, ordered=True))
+ a = pd.Series(pd.Categorical(["a","b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" +
- "Name: a, dtype: category\n"
+ "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp,a.__unicode__())
@@ -2202,8 +2205,8 @@ def test_slicing_doc_examples(self):
tm.assert_series_equal(result, expected)
result = df.loc["h":"j","cats"]
- expected = Series(Categorical(['a','b','b'], name='cats',
- categories=['a','b','c']), index=['h','i','j'])
+ expected = Series(Categorical(['a','b','b'],
+ categories=['a','b','c']), index=['h','i','j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j",0:1]
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 91902aae3c835..a73f4e2939578 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3453,7 +3453,7 @@ def test_groupby_categorical(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
- cats = Categorical.from_codes(codes, levels, name='myfactor', ordered=True)
+ cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
@@ -3461,10 +3461,8 @@ def test_groupby_categorical(self):
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
- expected.index.name = 'myfactor'
assert_frame_equal(result, expected)
- self.assertEqual(result.index.name, cats.name)
grouped = data.groupby(cats)
desc_result = grouped.describe()
@@ -3473,12 +3471,12 @@ def test_groupby_categorical(self):
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, sort=False).describe()
- expected.index.names = ['myfactor', None]
+ expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
# GH 10460
- expc = Categorical.from_codes(np.arange(4).repeat(8), levels, name='myfactor', ordered=True)
- exp = CategoricalIndex(expc, name='myfactor')
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
+ exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
@@ -3488,7 +3486,7 @@ def test_groupby_datetime_categorical(self):
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
- cats = Categorical.from_codes(codes, levels, name='myfactor', ordered=True)
+ cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
@@ -3496,10 +3494,9 @@ def test_groupby_datetime_categorical(self):
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index, categories=expected.index,
- name='myfactor', ordered=True)
+ ordered=True)
assert_frame_equal(result, expected)
- self.assertEqual(result.index.name, cats.name)
grouped = data.groupby(cats)
desc_result = grouped.describe()
@@ -3508,14 +3505,14 @@ def test_groupby_datetime_categorical(self):
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels).describe()
- expected.index.names = ['myfactor', None]
+ expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(desc_result.index.get_level_values(0), expected.index.get_level_values(0))
# GH 10460
- expc = Categorical.from_codes(np.arange(4).repeat(8), levels, name='myfactor', ordered=True)
- exp = CategoricalIndex(expc, name='myfactor')
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
+ exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index becea04c5dc98..cb6659af9eca5 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -692,9 +692,7 @@ def test_constructor_map(self):
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)
- cat.name = 'foo'
res = Series(cat)
- self.assertEqual(res.name, cat.name)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index 6830919d9c09f..416addfcf2ad5 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -217,7 +217,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,
levels = np.asarray(levels, dtype=object)
np.putmask(ids, na_mask, 0)
- fac = Categorical(ids - 1, levels, ordered=True, name=name, fastpath=True)
+ fac = Categorical(ids - 1, levels, ordered=True, fastpath=True)
else:
fac = ids - 1
if has_nas:
@@ -225,7 +225,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,
np.putmask(fac, na_mask, np.nan)
if x_is_series:
- fac = Series(fac, index=series_index)
+ fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9f75e42a8676a..97bae51b18248 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -630,9 +630,6 @@ def assert_categorical_equal(res, exp):
if res.ordered != exp.ordered:
raise AssertionError("ordered not the same")
- if res.name != exp.name:
- raise AssertionError("name not the same")
-
def assert_numpy_array_equal(np_array, assert_equal, err_msg=None):
"""Checks that 'np_array' is equal to 'assert_equal'
| Probably work in progress... Up to now only tests locally with 'not slow' (whoever wrote `nosetests --with-id --failed` should be praised! :-) )
See here: https://github.com/pydata/pandas/issues/10482
| https://api.github.com/repos/pandas-dev/pandas/pulls/10632 | 2015-07-19T21:29:41Z | 2015-07-26T01:54:21Z | 2015-07-26T01:54:21Z | 2015-07-26T01:54:28Z |
TST: windows compat for testing / msgpack | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 8c582f2618882..d703fa7bb54a1 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -370,7 +370,7 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pd.eval` using ``numexpr`` engine coerces 1 element numpy array to scalar (:issue:`10546`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
-- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
+- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`, :issue:`10630`)
- Bug in `pandas.read_csv` with kwargs ``index_col=False``, ``index_col=['a', 'b']`` or ``dtype``
(:issue:`10413`, :issue:`10467`, :issue:`10577`)
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 847a7c4f90216..d7655e9e052c0 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -169,10 +169,16 @@ def read(fh):
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
- u('timedelta64[us]'): np.dtype('m8[us]')}
+ u('timedelta64[us]'): np.dtype('m8[us]'),
+
+ # this is platform int, which we need to remap to np.int64
+ # for compat on windows platforms
+ 7: np.dtype('int64'),
+}
def dtype_for(t):
+ """ return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict[t]
@@ -266,7 +272,7 @@ def encode(obj):
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'freq': getattr(obj, 'freqstr', None),
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'compress': compressor}
elif isinstance(obj, DatetimeIndex):
@@ -279,7 +285,7 @@ def encode(obj):
return {'typ': 'datetime_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'freq': getattr(obj, 'freqstr', None),
'tz': tz,
@@ -288,14 +294,14 @@ def encode(obj):
return {'typ': 'multi_index',
'klass': obj.__class__.__name__,
'names': getattr(obj, 'names', None),
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
else:
return {'typ': 'index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif isinstance(obj, Series):
@@ -305,7 +311,7 @@ def encode(obj):
)
#d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
- # 'dtype': obj.dtype.num,
+ # 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
@@ -318,7 +324,7 @@ def encode(obj):
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'index': obj.index,
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif issubclass(tobj, NDFrame):
@@ -360,7 +366,7 @@ def encode(obj):
'locs': b.mgr_locs.as_array,
'values': convert(b.values),
'shape': b.values.shape,
- 'dtype': b.dtype.num,
+ 'dtype': b.dtype.name,
'klass': b.__class__.__name__,
'compress': compressor
} for b in data.blocks]}
@@ -413,7 +419,7 @@ def encode(obj):
return {'typ': 'ndarray',
'shape': obj.shape,
'ndim': obj.ndim,
- 'dtype': obj.dtype.num,
+ 'dtype': obj.dtype.name,
'data': convert(obj),
'compress': compressor}
elif isinstance(obj, np.number):
@@ -449,11 +455,12 @@ def decode(obj):
return Period(ordinal=obj['ordinal'], freq=obj['freq'])
elif typ == 'index':
dtype = dtype_for(obj['dtype'])
- data = unconvert(obj['data'], np.typeDict[obj['dtype']],
+ data = unconvert(obj['data'], dtype,
obj.get('compress'))
return globals()[obj['klass']](data, dtype=dtype, name=obj['name'])
elif typ == 'multi_index':
- data = unconvert(obj['data'], np.typeDict[obj['dtype']],
+ dtype = dtype_for(obj['dtype'])
+ data = unconvert(obj['data'], dtype,
obj.get('compress'))
data = [tuple(x) for x in data]
return globals()[obj['klass']].from_tuples(data, names=obj['names'])
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_2.7.10.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_2.7.10.msgpack
new file mode 100644
index 0000000000000..1e128f42a37a6
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_2.7.10.msgpack differ
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_3.4.3.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_3.4.3.msgpack
new file mode 100644
index 0000000000000..156905faece90
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_AMD64_windows_3.4.3.msgpack differ
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack
new file mode 100644
index 0000000000000..6bf1b9b9afaaa
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_2.7.10.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_2.7.10.pickle
new file mode 100644
index 0000000000000..a2a3ffa044013
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_2.7.10.pickle differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_3.4.3.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_3.4.3.pickle
new file mode 100644
index 0000000000000..6b8fdaa21badc
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_AMD64_windows_3.4.3.pickle differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle
new file mode 100644
index 0000000000000..60101c2f1e95e
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle differ
diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py
index 93d55c654de90..ceb845073e2c3 100644
--- a/pandas/io/tests/test_cparser.py
+++ b/pandas/io/tests/test_cparser.py
@@ -186,6 +186,30 @@ def test_header_not_enough_lines(self):
'1,2,3\n'
'4,5,6')
+ reader = TextReader(StringIO(data), delimiter=',', header=2)
+ header = reader.header
+ expected = [['a', 'b', 'c']]
+ self.assertEqual(header, expected)
+
+ recs = reader.read()
+ expected = {0 : [1, 4], 1 : [2, 5], 2 : [3, 6]}
+ assert_array_dicts_equal(expected, recs)
+
+ # not enough rows
+ self.assertRaises(parser.CParserError, TextReader, StringIO(data),
+ delimiter=',', header=5, as_recarray=True)
+
+ def test_header_not_enough_lines_as_recarray(self):
+
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64, only when all tests are run")
+
+ data = ('skip this\n'
+ 'skip this\n'
+ 'a,b,c\n'
+ '1,2,3\n'
+ '4,5,6')
+
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
@@ -246,6 +270,21 @@ def _make_reader(**kwds):
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
+ def test_numpy_string_dtype_as_recarray(self):
+ data = """\
+a,1
+aa,2
+aaa,3
+aaaa,4
+aaaaa,5"""
+
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64, only when all tests are run")
+
+ def _make_reader(**kwds):
+ return TextReader(StringIO(data), delimiter=',', header=None,
+ **kwds)
+
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index 303ecdbf0ec6e..cb99c1705c5eb 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -114,6 +114,9 @@ def test_decimalDecodeTestPrecise(self):
self.assertEqual(sut, decoded)
def test_encodeDoubleTinyExponential(self):
+ if compat.is_platform_windows() and not compat.PY3:
+ raise nose.SkipTest("buggy on win-64 for py2")
+
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 62dbb0090aac5..724dcf1de14d2 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -3204,6 +3204,9 @@ def read_table(self, *args, **kwds):
return read_table(*args, **kwds)
def test_compact_ints(self):
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64, only when all tests are run")
+
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
@@ -3515,6 +3518,25 @@ def test_compact_ints(self):
'1,1,0,0\n'
'0,1,0,1')
+ result = read_csv(StringIO(data), delimiter=',', header=None,
+ compact_ints=True)
+ ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
+ self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
+
+ result = read_csv(StringIO(data), delimiter=',', header=None,
+ compact_ints=True,
+ use_unsigned=True)
+ ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
+ self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
+
+ def test_compact_ints_as_recarray(self):
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64, only when all tests are run")
+
+ data = ('0,1,0,0\n'
+ '1,1,0,0\n'
+ '0,1,0,1')
+
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
@@ -3554,6 +3576,21 @@ def test_pass_dtype(self):
3,4.5
4,5.5"""
+ result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
+ self.assertEqual(result['one'].dtype, 'u1')
+ self.assertEqual(result['two'].dtype, 'object')
+
+ def test_pass_dtype_as_recarray(self):
+ data = """\
+one,two
+1,2.5
+2,3.5
+3,4.5
+4,5.5"""
+
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64, only when all tests are run")
+
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
@@ -3623,6 +3660,7 @@ def test_usecols_dtypes(self):
4,5,6
7,8,9
10,11,12"""
+
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
diff --git a/pandas/sparse/tests/test_libsparse.py b/pandas/sparse/tests/test_libsparse.py
index 440f4ffb46cb5..7f9e61571ebfc 100644
--- a/pandas/sparse/tests/test_libsparse.py
+++ b/pandas/sparse/tests/test_libsparse.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
from pandas.core.sparse import SparseSeries
-from pandas import DataFrame
+from pandas import DataFrame, compat
from pandas._sparse import IntIndex, BlockIndex
import pandas._sparse as splib
@@ -230,6 +230,8 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
_check_length_exc(xindex.to_int_index(),
longer_index.to_int_index())
+ if compat.is_platform_windows():
+ raise nose.SkipTest("segfaults on win-64 when all tests are run")
check_cases(_check_case)
| - skipping a couple of parsing routines that cause odd test failures on windows
- msgpack compat for windows (not tested before, prob not working correctly). Slight internal API change where writing the `dtype.name` rather than `dtype.num`, but back-compat is ok on this. (and forward compat is better).
- skipping a buggy test for ujson on windows on 2.7 only
| https://api.github.com/repos/pandas-dev/pandas/pulls/10630 | 2015-07-19T20:26:50Z | 2015-07-21T12:39:44Z | 2015-07-21T12:39:43Z | 2015-07-21T12:39:44Z |
DOC Add more examples to Series.Resample docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 273c444b30b80..ce6e66c4bc209 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3274,7 +3274,107 @@ def resample(self, rule, how=None, axis=0, fill_method=None,
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
+
+
+ Examples
+ --------
+
+ Start by creating a series with 9 one minute timestamps.
+
+ >>> index = pd.date_range('1/1/2000', periods=9, freq='T')
+ >>> series = pd.Series(range(9), index=index)
+ >>> series
+ 2000-01-01 00:00:00 0
+ 2000-01-01 00:01:00 1
+ 2000-01-01 00:02:00 2
+ 2000-01-01 00:03:00 3
+ 2000-01-01 00:04:00 4
+ 2000-01-01 00:05:00 5
+ 2000-01-01 00:06:00 6
+ 2000-01-01 00:07:00 7
+ 2000-01-01 00:08:00 8
+ Freq: T, dtype: int64
+
+ Downsample the series into 3 minute bins and sum the values
+ of the timestamps falling into a bin.
+
+ >>> series.resample('3T', how='sum')
+ 2000-01-01 00:00:00 3
+ 2000-01-01 00:03:00 12
+ 2000-01-01 00:06:00 21
+ Freq: 3T, dtype: int64
+
+ Downsample the series into 3 minute bins as above, but label each
+ bin using the right edge instead of the left. Please note that the
+ value in the bucket used as the label is not included in the bucket,
+ which it labels. For example, in the original series the
+ bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
+ value in the resampled bucket with the label``2000-01-01 00:03:00``
+ does not include 3 (if it did, the summed value would be 6, not 3).
+ To include this value close the right side of the bin interval as
+ illustrated in the example below this one.
+
+ >>> series.resample('3T', how='sum', label='right')
+ 2000-01-01 00:03:00 3
+ 2000-01-01 00:06:00 12
+ 2000-01-01 00:09:00 21
+ Freq: 3T, dtype: int64
+
+ Downsample the series into 3 minute bins as above, but close the right
+ side of the bin interval.
+
+ >>> series.resample('3T', how='sum', label='right', closed='right')
+ 2000-01-01 00:00:00 0
+ 2000-01-01 00:03:00 6
+ 2000-01-01 00:06:00 15
+ 2000-01-01 00:09:00 15
+ Freq: 3T, dtype: int64
+
+ Upsample the series into 30 second bins.
+
+ >>> series.resample('30S')[0:5] #select first 5 rows
+ 2000-01-01 00:00:00 0
+ 2000-01-01 00:00:30 NaN
+ 2000-01-01 00:01:00 1
+ 2000-01-01 00:01:30 NaN
+ 2000-01-01 00:02:00 2
+ Freq: 30S, dtype: float64
+
+ Upsample the series into 30 second bins and fill the ``NaN``
+ values using the ``pad`` method.
+
+ >>> series.resample('30S', fill_method='pad')[0:5]
+ 2000-01-01 00:00:00 0
+ 2000-01-01 00:00:30 0
+ 2000-01-01 00:01:00 1
+ 2000-01-01 00:01:30 1
+ 2000-01-01 00:02:00 2
+ Freq: 30S, dtype: int64
+
+ Upsample the series into 30 second bins and fill the
+ ``NaN`` values using the ``bfill`` method.
+
+ >>> series.resample('30S', fill_method='bfill')[0:5]
+ 2000-01-01 00:00:00 0
+ 2000-01-01 00:00:30 1
+ 2000-01-01 00:01:00 1
+ 2000-01-01 00:01:30 2
+ 2000-01-01 00:02:00 2
+ Freq: 30S, dtype: int64
+
+ Pass a custom function to ``how``.
+
+ >>> def custom_resampler(array_like):
+ ... return np.sum(array_like)+5
+
+ >>> series.resample('3T', how=custom_resampler)
+ 2000-01-01 00:00:00 8
+ 2000-01-01 00:03:00 17
+ 2000-01-01 00:06:00 26
+ Freq: 3T, dtype: int64
+
"""
+
from pandas.tseries.resample import TimeGrouper
axis = self._get_axis_number(axis)
sampler = TimeGrouper(rule, label=label, closed=closed, how=how,
| Partial fix for https://github.com/pydata/pandas/issues/5023
flake8 doesn't do a very good job at checking code in the docstrings for PEP8 compliance, so I'm just eyeballing it at the moment and my PEP8 knowledge leaves something to be desired. Apologies if I missed something really obvious.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10628 | 2015-07-19T10:16:42Z | 2015-09-01T12:07:44Z | 2015-09-01T12:07:44Z | 2015-09-01T12:08:42Z |
BUG: GH10536 in concat for SparseSeries | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..52e4c1f4e7d03 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -388,3 +388,5 @@ Bug Fixes
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
+
+- Bug in ``concat`` with ``SparseSeries`` (:issue:`10536`)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index c7c578232cd0f..a6533ad600f45 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -18,7 +18,8 @@
from pandas.util.decorators import Appender, Substitution
from pandas.core.common import ABCSeries
from pandas.io.parsers import TextFileReader
-
+from pandas.sparse.series import SparseSeries
+from pandas.sparse.frame import SparseDataFrame
import pandas.core.common as com
import pandas.lib as lib
@@ -838,6 +839,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
+ self._is_sp_series = isinstance(sample, SparseSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
@@ -894,13 +896,21 @@ def get_result(self):
if self.axis == 0:
new_data = com._concat_compat([x.values for x in self.objs])
name = com._consensus_name_attr(self.objs)
- return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
+ if self._is_sp_series:
+ klass = SparseSeries
+ else:
+ klass = Series
+ return klass(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
- tmpdf = DataFrame(data, index=index)
+ if self._is_sp_series:
+ klass = SparseDataFrame
+ else:
+ klass = DataFrame
+ tmpdf = klass(data, index=index)
if columns is not None:
tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index d357182a60b1f..61fbce8c2c7de 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -17,9 +17,11 @@
assert_almost_equal,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
-from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv
+from pandas import (isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range,
+ read_table, read_csv, SparseSeries, SparseDataFrame)
import pandas.algos as algos
import pandas.util.testing as tm
+from pandas.sparse.tests.test_sparse import assert_sp_series_equal, assert_sp_frame_equal
a_ = np.array
@@ -2476,6 +2478,24 @@ def test_concat_invalid_first_argument(self):
expected = read_csv(StringIO(data))
assert_frame_equal(result,expected)
+ def test_concat_sp_series(self):
+ # GH10536
+ data = [0, 1, 1, 2, 3, 0, np.nan]
+ index = [1, 2, 3, 4, 5, 6, 7]
+ sp = SparseSeries(data, index=index)
+ result = concat([sp, sp], axis=0)
+ expected = SparseSeries(data * 2, index=index * 2, kind='integer')
+ assert_sp_series_equal(result, expected)
+
+ def test_concat_sp_dataframe(self):
+ # GH10536
+ data = [0, 1, 1, 2, 3, 0, np.nan]
+ sp = SparseDataFrame(data)
+ result = concat([sp, sp], axis=1, ignore_index=True)
+ expected = SparseDataFrame({0: data, 1: data})
+ assert_sp_frame_equal(result, expected)
+
+
class TestOrderedMerge(tm.TestCase):
def setUp(self):
| To address #10536, but it's clearly not enough. What should be done for `SparseSeries` of different `kind`s and different `fill` values?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10626 | 2015-07-19T06:54:19Z | 2015-11-10T01:27:48Z | null | 2023-05-11T01:13:05Z |
BUG: Fix issue with incorrect groupby handling of NaT | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 31b6bb0d5575d..4acd77222efd7 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -865,4 +865,5 @@ Bug Fixes
- Bug in ``to_json`` which was causing segmentation fault when serializing 0-rank ndarray (:issue:`9576`)
- Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`)
- Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`)
-- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
+- Bug in ``groupby`` incorrect computation for aggregation on ``DataFrame`` with ``NaT`` (E.g ``first``, ``last``, ``min``). (:issue:`10590`)
+- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
\ No newline at end of file
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 534117b8e9249..1a40b73de8dd2 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1532,6 +1532,7 @@ def aggregate(self, values, how, axis=0):
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
+ values[values == tslib.iNaT] = np.nan
# GH 7754
is_numeric = True
elif is_bool_dtype(values.dtype):
@@ -2761,9 +2762,7 @@ def _cython_agg_blocks(self, how, numeric_only=True):
for block in data.blocks:
- values = block._try_operate(block.values)
-
- result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
+ result, _ = self.grouper.aggregate(block.values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 41703b3b5a3b7..ec6ab4e0d2ab1 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -5413,6 +5413,24 @@ def test_func(x):
expected = DataFrame()
tm.assert_frame_equal(result, expected)
+ def test_first_last_max_min_on_time_data(self):
+ # GH 10295
+ # Verify that NaT is not in the result of max, min, first and last on
+ # Dataframe with datetime or timedelta values.
+ from datetime import timedelta as td
+ df_test=DataFrame({'dt':[nan,'2015-07-24 10:10','2015-07-25 11:11','2015-07-23 12:12',nan],
+ 'td':[nan,td(days=1),td(days=2),td(days=3),nan]})
+ df_test.dt=pd.to_datetime(df_test.dt)
+ df_test['group']='A'
+ df_ref=df_test[df_test.dt.notnull()]
+
+ grouped_test=df_test.groupby('group')
+ grouped_ref=df_ref.groupby('group')
+
+ assert_frame_equal(grouped_ref.max(),grouped_test.max())
+ assert_frame_equal(grouped_ref.min(),grouped_test.min())
+ assert_frame_equal(grouped_ref.first(),grouped_test.first())
+ assert_frame_equal(grouped_ref.last(),grouped_test.last())
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
| closes #10590
For groupby the time stamps gets converted to integervalue `tslib.iNaT`
which is -9223372036854775808. The aggregation is then done using this
value with incorrect result as a consequence. The solution proposed here
is to replace its value by np.nan in case it is a `datetime64[ns]`
| https://api.github.com/repos/pandas-dev/pandas/pulls/10625 | 2015-07-19T06:51:30Z | 2015-09-03T13:09:57Z | 2015-09-03T13:09:57Z | 2016-02-14T22:15:10Z |
Very minor refactoring and edits to code comments | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 08fc8483762ab..57c1667dca0c3 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -52,7 +52,7 @@
with open("index.rst") as f:
- lines = f.readlines()
+ index_rst_lines = f.readlines()
# only include the slow autosummary feature if we're building the API section
# of the docs
@@ -60,20 +60,21 @@
# JP: added from sphinxdocs
autosummary_generate = False
-if any([re.match("\s*api\s*",l) for l in lines]):
+if any([re.match("\s*api\s*",l) for l in index_rst_lines]):
autosummary_generate = True
-ds = []
+files_to_delete = []
for f in os.listdir(os.path.dirname(__file__)):
- if (not f.endswith(('.rst'))) or (f.startswith('.')) or os.path.basename(f) == 'index.rst':
+ if not f.endswith('.rst') or f.startswith('.') or os.path.basename(f) == 'index.rst':
continue
- _f = f.split('.rst')[0]
- if not any([re.match("\s*%s\s*$" % _f,l) for l in lines]):
- ds.append(f)
+ _file_basename = f.split('.rst')[0]
+ _regex_to_match = "\s*{}\s*$".format(_file_basename)
+ if not any([re.match(_regex_to_match, line) for line in index_rst_lines]):
+ files_to_delete.append(f)
-if ds:
- print("I'm about to DELETE the following:\n%s\n" % list(sorted(ds)))
+if files_to_delete:
+ print("I'm about to DELETE the following:\n%s\n" % list(sorted(files_to_delete)))
sys.stdout.write("WARNING: I'd like to delete those to speed up processing (yes/no)? ")
if PY3:
answer = input()
@@ -81,7 +82,7 @@
answer = raw_input()
if answer.lower().strip() in ('y','yes'):
- for f in ds:
+ for f in files_to_delete:
f = os.path.join(os.path.join(os.path.dirname(__file__),f))
f= os.path.abspath(f)
try:
diff --git a/doc/source/internals.rst b/doc/source/internals.rst
index 5899c3089cdac..3d96b93de4cc9 100644
--- a/doc/source/internals.rst
+++ b/doc/source/internals.rst
@@ -35,7 +35,7 @@ containers for the axis labels:
- ``TimedeltaIndex``: An Index object with ``Timedelta`` boxed elements (impl are the in64 values)
- ``PeriodIndex``: An Index object with Period elements
-These are range generates to make the creation of a regular index easy:
+There are functions that make the creation of a regular index easy:
- ``date_range``: fixed frequency date range generated from a time rule or
DateOffset. An ndarray of Python datetime objects
@@ -193,7 +193,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
Define Original Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~
-To let original data structures have additional properties, you should let ``pandas`` knows what properties are added. ``pandas`` maps unknown properties to data names overriding ``__getattribute__``. Defining original properties can be done in one of 2 ways:
+To let original data structures have additional properties, you should let ``pandas`` know what properties are added. ``pandas`` maps unknown properties to data names overriding ``__getattribute__``. Defining original properties can be done in one of 2 ways:
1. Define ``_internal_names`` and ``_internal_names_set`` for temporary properties which WILL NOT be passed to manipulation results.
2. Define ``_metadata`` for normal properties which will be passed to manipulation results.
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 49db94c3bfa86..4c9a97bd68fbc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -47,7 +47,7 @@ def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
- return "This method must be defined on the concrete class of " \
+ return "This method must be defined in the concrete class of " \
+ self.class_instance.__class__.__name__
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a7ecb74a67485..af2ccb8917e52 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -184,7 +184,6 @@ class DataFrame(NDFrame):
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
- _auto_consolidate = True
@property
def _constructor(self):
@@ -2169,16 +2168,11 @@ def _ensure_valid_index(self, value):
ensure that if we don't have an index, that we can create one from the
passed value
"""
- if not len(self.index):
-
- # GH5632, make sure that we are a Series convertible
- if is_list_like(value):
+ # GH5632, make sure that we are a Series convertible
+ if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
- pass
-
- if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
@@ -2186,11 +2180,6 @@ def _ensure_valid_index(self, value):
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
- # we are a scalar
- # noop
- else:
-
- pass
def _set_item(self, key, value):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 273c444b30b80..292871000cafb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -138,6 +138,9 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
@property
def _constructor(self):
+ """Used when a manipulation result has the same dimesions as the
+ original.
+ """
raise AbstractMethodError(self)
def __unicode__(self):
@@ -153,10 +156,16 @@ def _dir_additions(self):
@property
def _constructor_sliced(self):
+ """Used when a manipulation result has one lower dimension(s) as the
+ original, such as DataFrame single columns slicing.
+ """
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
+ """Used when a manipulation result has one higher dimension as the
+ original, such as Series.to_frame() and DataFrame.to_panel()
+ """
raise NotImplementedError
#----------------------------------------------------------------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 84cf0256d7ef0..7fbc6736db4bd 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -431,8 +431,8 @@ def can_do_equal_len():
return False
- # we need an interable, with a ndim of at least 1
- # eg. don't pass thru np.array(0)
+ # we need an iterable, with a ndim of at least 1
+ # eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value,'ndim',1) > 0:
# we have an equal len Frame
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6a87f5a0b08e0..e66467f31f37e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2121,7 +2121,7 @@ def make_block(values, placement, klass=None, ndim=None,
class BlockManager(PandasObject):
"""
- Core internal data structure to implement DataFrame
+ Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 53fcdb61bd1ae..48e828af826c1 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -2613,13 +2613,7 @@ def generate_take_cython_file():
print(generate_put_selection_template(template, use_ints=True,
use_datelikes=True,
use_objects=True),
- file=f)
-
- # for template in templates_1d_datetime:
- # print >> f, generate_from_template_datetime(template)
-
- # for template in templates_2d_datetime:
- # print >> f, generate_from_template_datetime(template, ndim=2)
+ file=f)
for template in nobool_1d_templates:
print(generate_from_template(template, exclude=['bool']), file=f)
| Just some miscellaneous improvements on things that tripped me up a bit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10622 | 2015-07-19T03:10:37Z | 2015-07-28T11:03:50Z | null | 2015-07-28T17:42:19Z |
Drop & insert on subtypes of index return their subtypes | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index c947758b8e8b1..f9888137d096b 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -401,4 +401,5 @@ Bug Fixes
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
+- Bug in `Index` subtypes (such as `PeriodIndex`) not returning their own type for `.drop` and `.insert` methods
diff --git a/pandas/core/index.py b/pandas/core/index.py
index e62fd646ede56..1866ea274e325 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -89,7 +89,6 @@ class Index(IndexOpsMixin, PandasObject):
_left_indexer = _algos.left_join_indexer_object
_inner_indexer = _algos.inner_join_indexer_object
_outer_indexer = _algos.outer_join_indexer_object
-
_box_scalars = False
_typ = 'index'
@@ -204,6 +203,17 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
@classmethod
def _simple_new(cls, values, name=None, **kwargs):
+ """
+ we require the we have a dtype compat for the values
+ if we are passed a non-dtype compat, then coerce using the constructor
+
+ Must be careful not to recurse.
+ """
+ if not hasattr(values, 'dtype'):
+ values = np.array(values,copy=False)
+ if is_object_dtype(values):
+ values = cls(values, name=name, **kwargs).values
+
result = object.__new__(cls)
result._data = values
result.name = name
@@ -341,15 +351,41 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy(self, values=None, **kwargs):
- """ create a new Index, don't copy the data, use the same object attributes
- with passed in attributes taking precedence """
+ def _shallow_copy(self, values=None, infer=False, **kwargs):
+ """
+ create a new Index, don't copy the data, use the same object attributes
+ with passed in attributes taking precedence
+
+ *this is an internal non-public method*
+
+ Parameters
+ ----------
+ values : the values to create the new Index, optional
+ infer : boolean, default False
+ if True, infer the new type of the passed values
+ kwargs : updates the default attributes for this Index
+ """
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
+
+ if infer:
+ attributes['copy'] = False
+ return Index(values, **attributes)
+
return self.__class__._simple_new(values,**attributes)
+ def _coerce_scalar_to_index(self, item):
+ """
+ we need to coerce a scalar to a compat for our index type
+
+ Parameters
+ ----------
+ item : scalar item to coerce
+ """
+ return Index([item], dtype=self.dtype, **self._get_attributes_dict())
+
def copy(self, names=None, name=None, dtype=None, deep=False):
"""
Make a copy of this object. Name and dtype sets those attributes on
@@ -1132,7 +1168,9 @@ def append(self, other):
appended : Index
"""
to_concat, name = self._ensure_compat_append(other)
- return Index(np.concatenate(to_concat), name=name)
+ attribs = self._get_attributes_dict()
+ attribs['name'] = name
+ return self._shallow_copy(np.concatenate(to_concat), infer=True, **attribs)
@staticmethod
def _ensure_compat_concat(indexes):
@@ -1549,7 +1587,11 @@ def sym_diff(self, other, result_name=None):
if result_name is None:
result_name = result_name_update
the_diff = sorted(set((self.difference(other)).union(other.difference(self))))
- return Index(the_diff, name=result_name)
+ attribs = self._get_attributes_dict()
+ attribs['name'] = result_name
+ if 'freq' in attribs:
+ attribs['freq'] = None
+ return self._shallow_copy(the_diff, infer=True, **attribs)
def get_loc(self, key, method=None):
"""
@@ -2527,7 +2569,8 @@ def delete(self, loc):
-------
new_index : Index
"""
- return Index(np.delete(self._data, loc), name=self.name)
+ attribs = self._get_attributes_dict()
+ return self._shallow_copy(np.delete(self._data, loc), **attribs)
def insert(self, loc, item):
"""
@@ -2544,10 +2587,12 @@ def insert(self, loc, item):
new_index : Index
"""
_self = np.asarray(self)
- item_idx = Index([item], dtype=self.dtype).values
+ item = self._coerce_scalar_to_index(item).values
+
idx = np.concatenate(
- (_self[:loc], item_idx, _self[loc:]))
- return Index(idx, name=self.name)
+ (_self[:loc], item, _self[loc:]))
+ attribs = self._get_attributes_dict()
+ return self._shallow_copy(idx, infer=True, **attribs)
def drop(self, labels, errors='raise'):
"""
@@ -3678,7 +3723,7 @@ class MultiIndex(Index):
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
- Names for each of the index levels.
+ Names for each of the index levels. (name is accepted for compat)
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
@@ -3694,8 +3739,11 @@ class MultiIndex(Index):
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
- copy=False, verify_integrity=True, _set_identity=True, **kwargs):
+ copy=False, verify_integrity=True, _set_identity=True, name=None, **kwargs):
+ # compat with Index
+ if name is not None:
+ names = name
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
@@ -4004,7 +4052,12 @@ def view(self, cls=None):
result._id = self._id
return result
- _shallow_copy = view
+ def _shallow_copy(self, values=None, infer=False, **kwargs):
+ if values is not None:
+ if 'name' in kwargs:
+ kwargs['names'] = kwargs.pop('name',None)
+ return MultiIndex.from_tuples(values, **kwargs)
+ return self.view()
@cache_readonly
def dtype(self):
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 0b592368e2a1c..7a35123960a06 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -396,6 +396,40 @@ def test_symmetric_diff(self):
with tm.assertRaisesRegexp(TypeError, msg):
result = first.sym_diff([1, 2, 3])
+ def test_insert_base(self):
+
+ for name, idx in compat.iteritems(self.indices):
+ result = idx[1:4]
+
+ if not len(idx):
+ continue
+
+ #test 0th element
+ self.assertTrue(idx[0:4].equals(
+ result.insert(0, idx[0])))
+
+ def test_delete_base(self):
+
+ for name, idx in compat.iteritems(self.indices):
+
+ if not len(idx):
+ continue
+
+ expected = idx[1:]
+ result = idx.delete(0)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+
+ expected = idx[:-1]
+ result = idx.delete(-1)
+ self.assertTrue(result.equals(expected))
+ self.assertEqual(result.name, expected.name)
+
+ with tm.assertRaises((IndexError, ValueError)):
+ # either depending on numpy version
+ result = idx.delete(len(idx))
+
+
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6cf972d4d7a8a..ec60edb6a78d6 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -5,7 +5,8 @@
import numpy as np
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
- ABCSeries, is_integer, is_float)
+ ABCSeries, is_integer, is_float,
+ is_object_dtype, is_datetime64_dtype)
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
from pandas.compat import u
@@ -494,9 +495,16 @@ def _local_timestamps(self):
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None, **kwargs):
+ """
+ we require the we have a dtype compat for the values
+ if we are passed a non-dtype compat, then coerce using the constructor
+ """
+
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
- if values.dtype != _NS_DTYPE:
+ if is_object_dtype(values):
+ return cls(values, name=name, freq=freq, tz=tz, **kwargs).values
+ elif not is_datetime64_dtype(values):
values = com._ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 7606bd0bd86b8..242d9a7757556 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -19,7 +19,7 @@
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
- is_integer, is_float)
+ is_integer, is_float, is_object_dtype)
from pandas import compat
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
@@ -259,6 +259,11 @@ def _from_arraylike(cls, data, freq, tz):
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
+ if not getattr(values,'dtype',None):
+ values = np.array(values,copy=False)
+ if is_object_dtype(values):
+ return PeriodIndex(values, name=name, freq=freq, **kwargs)
+
result = object.__new__(cls)
result._data = values
result.name = name
@@ -266,6 +271,20 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
result._reset_identity()
return result
+ def _shallow_copy(self, values=None, infer=False, **kwargs):
+ """ we always want to return a PeriodIndex """
+ return super(PeriodIndex, self)._shallow_copy(values=values, infer=False, **kwargs)
+
+ def _coerce_scalar_to_index(self, item):
+ """
+ we need to coerce a scalar to a compat for our index type
+
+ Parameters
+ ----------
+ item : scalar item to coerce
+ """
+ return PeriodIndex([item], **self._get_attributes_dict())
+
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
| Current behavior:
``` python
In [32]:
period_index=pd.PeriodIndex(start='2015-01-01',end='2015-01-07',freq='B')
period_index
Out[32]:
PeriodIndex(['2015-01-01', '2015-01-02', '2015-01-05', '2015-01-06',
'2015-01-07'],
dtype='int64', freq='B')
In [33]:
period_index.drop(period_index[:2])
Out[33]:
Int64Index([11742, 11743, 11744], dtype='int64')
```
New behavior:
``` python
In [32]: period_index.drop(period_index[:2])
Out[32]: PeriodIndex(['2015-01-05', '2015-01-06', '2015-01-07'], dtype='int64', freq='B')
```
I could combine with #10599, but am having a couple of issues with this (will post below) that I need help with - I'll take direction on whether to combine or not.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10620 | 2015-07-19T01:46:06Z | 2015-07-28T10:05:34Z | null | 2015-07-28T14:47:17Z |
Fix bug in outer_indexer where the special case of an empty right array resulted in bogus return data. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 83e5ec5b1d107..610a1dbc65c6e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -389,6 +389,7 @@ Bug Fixes
- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`)
- Bug in ``pd.get_dummies`` with `sparse=True` not returning ``SparseDataFrame`` (:issue:`10531`)
- Bug in ``Index`` subtypes (such as ``PeriodIndex``) not returning their own type for ``.drop`` and ``.insert`` methods (:issue:`10620`)
+- Bug in ``algos.outer_join_indexer`` when ``right`` array is empty (:issue:`10618`)
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 48e828af826c1..29a991a9acfd3 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -2131,7 +2131,7 @@ def outer_join_indexer_%(name)s(ndarray[%(c_type)s] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index db0e96d158f0c..d4cf7824c8911 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -10345,7 +10345,7 @@ def outer_join_indexer_float64(ndarray[float64_t] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
@@ -10474,7 +10474,7 @@ def outer_join_indexer_float32(ndarray[float32_t] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
@@ -10603,7 +10603,7 @@ def outer_join_indexer_object(ndarray[object] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
@@ -10732,7 +10732,7 @@ def outer_join_indexer_int32(ndarray[int32_t] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
@@ -10861,7 +10861,7 @@ def outer_join_indexer_int64(ndarray[int64_t] left,
rindexer[j] = j
result[j] = right[j]
elif nright == 0:
- for i in range(nright):
+ for i in range(nleft):
lindexer[i] = i
rindexer[i] = -1
result[i] = left[i]
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index cb5687acf3a34..30dcd8631f13a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -168,6 +168,38 @@ def _test_vector_resize(htable, uniques, dtype, nvals):
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)
+class TestIndexer(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_outer_join_indexer(self):
+ typemap = [('int32', algos.algos.outer_join_indexer_int32),
+ ('int64', algos.algos.outer_join_indexer_int64),
+ ('float32', algos.algos.outer_join_indexer_float32),
+ ('float64', algos.algos.outer_join_indexer_float64),
+ ('object', algos.algos.outer_join_indexer_object)]
+
+ for dtype, indexer in typemap:
+ left = np.arange(3, dtype = dtype)
+ right = np.arange(2,5, dtype = dtype)
+ empty = np.array([], dtype = dtype)
+
+ result, lindexer, rindexer = indexer(left, right)
+ tm.assertIsInstance(result, np.ndarray)
+ tm.assertIsInstance(lindexer, np.ndarray)
+ tm.assertIsInstance(rindexer, np.ndarray)
+ tm.assert_numpy_array_equal(result, np.arange(5, dtype = dtype))
+ tm.assert_numpy_array_equal(lindexer, np.array([0, 1, 2, -1, -1]))
+ tm.assert_numpy_array_equal(rindexer, np.array([-1, -1, 0, 1, 2]))
+
+ result, lindexer, rindexer = indexer(empty, right)
+ tm.assert_numpy_array_equal(result, right)
+ tm.assert_numpy_array_equal(lindexer, np.array([-1, -1, -1]))
+ tm.assert_numpy_array_equal(rindexer, np.array([0, 1, 2]))
+
+ result, lindexer, rindexer = indexer(left, empty)
+ tm.assert_numpy_array_equal(result, left)
+ tm.assert_numpy_array_equal(lindexer, np.array([0, 1, 2]))
+ tm.assert_numpy_array_equal(rindexer, np.array([-1, -1, -1]))
class TestUnique(tm.TestCase):
_multiprocess_can_split_ = True
| Fixes bug described in issue #10618.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10619 | 2015-07-19T00:11:26Z | 2015-07-30T15:49:15Z | 2015-07-30T15:49:15Z | 2015-07-31T06:42:06Z |
support sql transactions | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index ef8360f0ff459..f0f8897db3f26 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -62,7 +62,11 @@ def compile_big_int_sqlite(type_, compiler, **kw):
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
- return isinstance(con, sqlalchemy.engine.Connectable)
+ is_connectable = isinstance(con, sqlalchemy.engine.Connectable)
+ if sqlalchemy.__version__ >= '1.0.0':
+ # support sessions, if sqlalchemy version has them
+ is_connectable |= isinstance(con, sqlalchemy.orm.session.Session)
+ return is_connectable
else:
return False
@@ -362,7 +366,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string
SQL query to be executed
- con : SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
+ con : SQLAlchemy connectable(engine/connection/session) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
@@ -420,7 +424,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string
SQL query to be executed or database table name.
- con : SQLAlchemy connectable(engine/connection) or DBAPI2 connection (fallback mode)
+ con : SQLAlchemy connectable(engine/connection/session) or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index d95babff2653b..43f6e65adf35d 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -918,6 +918,34 @@ def test_sqlalchemy_type_mapping(self):
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
+ def test_session(self):
+ """
+ read_sql_query should work within a session.
+ a temporary table created within the session should be able to be queried.
+ """
+ if sqlalchemy.__version__ < '1.0.0':
+ nose.SkipTest('session requires sqlalchemy>=1.0.0')
+ from sqlalchemy.orm import sessionmaker
+ session = sessionmaker(bind=self.conn)()
+ # create a temporary table within a session
+ # this is contrived example or a temporary tables but they can be really useful
+ session.execute("""CREATE TEMPORARY TABLE temp_iris AS SELECT * FROM iris LIMIT 5""")
+ # read_sql_query can read from the temporary table
+ iris_frame = sql.read_sql_query("SELECT * FROM temp_iris", session)
+ assert(len(iris_frame) == 5)
+
+ def test_session_close(self):
+ """read_sql_query shouldn't close the session"""
+ if sqlalchemy.__version__ < '1.0.0':
+ nose.SkipTest('session requires sqlalchemy>=1.0.0')
+ from sqlalchemy.orm import sessionmaker
+ session = sessionmaker(bind=self.conn)()
+ session.execute("""CREATE TEMPORARY TABLE temp_iris AS SELECT * FROM iris LIMIT 5""")
+ sql.read_sql_query("SELECT count(1) FROM temp_iris", session)
+ # run again to test that the session hasn't been closed by the last call
+ iris_frame = sql.read_sql_query("SELECT * FROM temp_iris", session)
+ assert(len(iris_frame) == 5)
+
class _EngineToConnMixin(object):
"""
| this allows the `con` argument of `pd.read_sql`/`pd.read_sql_query` to be a sqlalchemy [`Session`](http://docs.sqlalchemy.org/en/rel_1_0/orm/session.html) object.
this allows for the use of temporary tables.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10617 | 2015-07-18T20:59:43Z | 2015-07-20T03:21:57Z | null | 2015-07-20T05:37:35Z |
PERF: Improve perf of to_datetime with ISO format | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6ab299eb70eb5..206c5e2e22711 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -328,6 +328,7 @@ Performance Improvements
- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`)
- Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`)
- 20x improvement in ``concat`` of Categoricals when categories are identical (:issue:`10587`)
+- Improved performance of ``to_datetime`` when specified format string is ISO8601 (:issue:`10178`)
.. _whatsnew_0170.bug_fixes:
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 166760678f3ab..9703accc42695 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -919,8 +919,8 @@ def test_to_datetime_with_apply(self):
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
- self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
- self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
+ self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise'))
+ self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
@@ -4197,6 +4197,20 @@ def test_to_datetime_format_YYYYMMDD(self):
expected = Series(['20121231','20141231','NaT'],dtype='M8[ns]')
assert_series_equal(result, expected)
+ # GH 10178
+ def test_to_datetime_format_integer(self):
+ s = Series([2000, 2001, 2002])
+ expected = Series([ Timestamp(x) for x in s.apply(str) ])
+
+ result = to_datetime(s,format='%Y')
+ assert_series_equal(result, expected)
+
+ s = Series([200001, 200105, 200206])
+ expected = Series([ Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str) ])
+
+ result = to_datetime(s,format='%Y%m')
+ assert_series_equal(result, expected)
+
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
format = '%d-%b-%Y %H:%M:%S.%f'
@@ -4524,9 +4538,9 @@ def test_day_not_in_month_coerce_false_raise(self):
def test_day_not_in_month_coerce_false_ignore(self):
self.assertEqual(to_datetime('2015-02-29', errors='ignore', coerce=False), '2015-02-29')
- self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='ignore', format="%Y-%m-%d", coerce=False)
- self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='ignore', format="%Y-%m-%d", coerce=False)
- self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='ignore', format="%Y-%m-%d", coerce=False)
+ self.assertEqual(to_datetime('2015-02-29', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-02-29')
+ self.assertEqual(to_datetime('2015-02-32', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-02-32')
+ self.assertEqual(to_datetime('2015-04-31', errors='ignore', format="%Y-%m-%d", coerce=False), '2015-04-31')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 5ff6a48981ceb..6a1dd934d6bce 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -296,21 +296,24 @@ def _convert_listlike(arg, box, format):
return result
arg = com._ensure_object(arg)
+ require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
- if format is not None:
- # There is a special fast-path for iso8601 formatted
- # datetime strings, so in those cases don't use the inferred
- # format because this path makes process slower in this
- # special case
- format_is_iso8601 = (
- '%Y-%m-%dT%H:%M:%S.%f'.startswith(format) or
- '%Y-%m-%d %H:%M:%S.%f'.startswith(format)
- )
- if format_is_iso8601:
- format = None
+ if format is not None:
+ # There is a special fast-path for iso8601 formatted
+ # datetime strings, so in those cases don't use the inferred
+ # format because this path makes process slower in this
+ # special case
+ format_is_iso8601 = (
+ ('%Y-%m-%dT%H:%M:%S.%f'.startswith(format) or
+ '%Y-%m-%d %H:%M:%S.%f'.startswith(format)) and
+ format != '%Y'
+ )
+ if format_is_iso8601:
+ require_iso8601 = not infer_datetime_format
+ format = None
try:
result = None
@@ -334,16 +337,20 @@ def _convert_listlike(arg, box, format):
raise
result = arg
except ValueError:
- # Only raise this error if the user provided the
- # datetime format, and not when it was inferred
+ # if format was inferred, try falling back
+ # to array_to_datetime - terminate here
+ # for specified formats
if not infer_datetime_format:
- raise
+ if errors == 'raise':
+ raise
+ result = arg
if result is None and (format is None or infer_datetime_format):
- result = tslib.array_to_datetime(arg, raise_=errors == 'raise',
+ result = tslib.array_to_datetime(arg, raise_=errors=='raise',
utc=utc, dayfirst=dayfirst,
yearfirst=yearfirst, freq=freq,
- coerce=coerce, unit=unit)
+ coerce=coerce, unit=unit,
+ require_iso8601=require_iso8601)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz='utc' if utc else None)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index a2fc9b07b16a1..da7cc05621775 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -1808,7 +1808,8 @@ cpdef object _get_rule_month(object source, object default='DEC'):
cpdef array_to_datetime(ndarray[object] values, raise_=False,
dayfirst=False, yearfirst=False, freq=None,
- format=None, utc=None, coerce=False, unit=None):
+ format=None, utc=None, coerce=False, unit=None,
+ require_iso8601=False):
cdef:
Py_ssize_t i, n = len(values)
object val, py_dt
@@ -1908,6 +1909,17 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
iresult[i] = value
_check_dts_bounds(&dts)
except ValueError:
+ # if requiring iso8601 strings, skip trying other formats
+ if require_iso8601:
+ if coerce:
+ iresult[i] = iNaT
+ continue
+ elif raise_:
+ raise ValueError("time data %r does match format specified" %
+ (val,))
+ else:
+ return values
+
try:
py_dt = parse_datetime_string(val, dayfirst=dayfirst,
yearfirst=yearfirst, freq=freq)
@@ -1971,7 +1983,7 @@ cpdef array_to_datetime(ndarray[object] values, raise_=False,
continue
try:
oresult[i] = parse_datetime_string(val, dayfirst=dayfirst,
- yearfirst=yearfirst, freq=freq)
+ yearfirst=yearfirst, freq=freq)
_pydatetime_to_dts(oresult[i], &dts)
_check_dts_bounds(&dts)
except Exception:
diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py
index 57fb1ada78691..75147e079bb65 100644
--- a/vb_suite/timeseries.py
+++ b/vb_suite/timeseries.py
@@ -157,6 +157,10 @@ def date_range(start=None, end=None, periods=None, freq=None):
Benchmark('to_datetime(strings)', setup,
start_date=datetime(2012, 7, 11))
+timeseries_to_datetime_iso8601_format = \
+ Benchmark("to_datetime(strings, format='%Y-%m-%d %H:%M:%S')", setup,
+ start_date=datetime(2012, 7, 11))
+
setup = common_setup + """
rng = date_range('1/1/2000', periods=10000, freq='D')
strings = Series(rng.year*10000+rng.month*100+rng.day,dtype=np.int64).apply(str)
| Closes #10178
Closes #8154
Using the example data from the issue:
```
In [2]: df = DataFrame({'date_text':["2015-05-18" for i in range(10**6)]})
```
Before:
```
In [3]: %timeit pd.to_datetime(df['date_text'],format="%Y-%m-%d", box=False).values.view('i8')/10**9
1 loops, best of 3: 3.14 s per loop
In [4]: %timeit pd.to_datetime(df['date_text'],infer_datetime_format=True, box=False).values.view('i8')/10**9
1 loops, best of 3: 253 ms per lo
```
After:
```
In [6]: %timeit pd.to_datetime(df['date_text'],format="%Y-%m-%d", box=False).values.view('i8')/10**9
1 loops, best of 3: 217 ms per loop
In [7]: %timeit pd.to_datetime(df['date_text'],infer_datetime_format=True, box=False).values.view('i8')/10**9
1 loops, best of 3: 243 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10615 | 2015-07-18T17:34:33Z | 2015-07-20T23:24:49Z | 2015-07-20T23:24:49Z | 2015-10-06T03:19:45Z |
Extended docs on numba | diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 4ada4d4bbdfe5..456ac5e79ac4b 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -307,6 +307,10 @@ Numba works by generating optimized machine code using the LLVM compiler infrast
You will need to install ``numba``. This is easy with ``conda``, by using: ``conda install numba``, see :ref:`installing using miniconda<install.miniconda>`.
+.. note::
+
+ As of ``numba`` version 0.20, pandas objects cannot be passed directly to numba-compiled functions. Instead, one must pass the ``numpy`` array underlying the ``pandas`` object to the numba-compiled function as demonstrated below.
+
We simply take the plain python code from above and annotate with the ``@jit`` decorator.
.. code-block:: python
@@ -338,14 +342,49 @@ We simply take the plain python code from above and annotate with the ``@jit`` d
result = apply_integrate_f_numba(df['a'].values, df['b'].values, df['N'].values)
return pd.Series(result, index=df.index, name='result')
-Similar to above, we directly pass ``numpy`` arrays directly to the numba function. Further
-we are wrapping the results to provide a nice interface by passing/returning pandas objects.
+Note that we directly pass ``numpy`` arrays to the numba function. ``compute_numba`` is just a wrapper that provides a nicer interface by passing/returning pandas objects.
.. code-block:: python
In [4]: %timeit compute_numba(df)
1000 loops, best of 3: 798 us per loop
+``numba`` can also be used to write vectorized functions that do not require the user to explicitly
+loop over the observations of a vector; a vectorized function will be applied to each row automatically.
+Consider the following toy example of doubling each observation:
+
+.. code-block:: python
+
+ import numba
+
+ def double_every_value_nonumba(x):
+ return x*2
+
+ @numba.vectorize
+ def double_every_value_withnumba(x):
+ return x*2
+
+
+ # Custom function without numba
+ In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba)
+ 1000 loops, best of 3: 797 us per loop
+
+ # Standard implementation (faster than a custom function)
+ In [6]: %timeit df['col1_doubled'] = df.a*2
+ 1000 loops, best of 3: 233 us per loop
+
+ # Custom function with numba
+ In [7]: %timeit df['col1_doubled'] = double_every_value_withnumba(df.a.values)
+ 1000 loops, best of 3: 145 us per loop
+
+.. note::
+
+ ``numba`` will execute on any function, but can only accelerate certain classes of functions.
+
+``numba`` is best at accelerating functions that apply numerical functions to numpy arrays. When passed a function that only uses operations it knows how to accelerate, it will execute in ``nopython`` mode.
+
+If ``numba`` is passed a function that includes something it doesn't know how to work with -- a category that currently includes sets, lists, dictionaries, or string functions -- it will revert to ``object mode``. In ``object mode``, numba will execute but your code will not speed up significantly. If you would prefer that ``numba`` throw an error if it cannot compile a function in a way that speeds up your code, pass numba the argument ``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on troubleshooting ``numba`` modes, see the `numba troubleshooting page <http://numba.pydata.org/numba-doc/0.20.0/user/troubleshoot.html#the-compiled-code-is-too-slow>`__.
+
Read more in the `numba docs <http://numba.pydata.org/>`__.
.. _enhancingperf.eval:
| Added more explicit note about the fact numba does not yet recognize pandas objects so one must always use `.values`, and added example of `numba.vectorize`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10614 | 2015-07-18T04:40:51Z | 2015-07-22T10:52:51Z | 2015-07-22T10:52:51Z | 2015-07-22T10:52:57Z |
ENH: add StataReader context manager to ensure closing of the path | diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 53f2ff455d32e..db9362c5c821e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -949,6 +949,21 @@ def __init__(self, path_or_buf, convert_dates=True,
self._read_header()
+ def __enter__(self):
+ """ enter context manager """
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ exit context manager """
+ self.close()
+
+ def close(self):
+ """ close the handle if its open """
+ try:
+ self.path_or_buf.close()
+ except IOError:
+ pass
+
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index a06c4384d72c5..4b2781c9dceb6 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -430,10 +430,11 @@ def test_timestamp_and_label(self):
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
- reader = StataReader(path)
- parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
- assert parsed_time_stamp == time_stamp
- assert reader.data_label == data_label
+
+ with StataReader(path) as reader:
+ parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
+ assert parsed_time_stamp == time_stamp
+ assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
@@ -599,13 +600,14 @@ def test_minimal_size_col(self):
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
- sr = StataReader(path)
- typlist = sr.typlist
- variables = sr.varlist
- formats = sr.fmtlist
- for variable, fmt, typ in zip(variables, formats, typlist):
- self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
- self.assertTrue(int(variable[1:]) == typ)
+
+ with StataReader(path) as sr:
+ typlist = sr.typlist
+ variables = sr.varlist
+ formats = sr.fmtlist
+ for variable, fmt, typ in zip(variables, formats, typlist):
+ self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
+ self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
| tests were not closing on windows properly
| https://api.github.com/repos/pandas-dev/pandas/pulls/10613 | 2015-07-17T23:51:25Z | 2015-07-18T15:00:58Z | 2015-07-18T15:00:58Z | 2015-07-18T15:00:58Z |
TST: test_read_famafrench fails with HTTP 404 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e77532b2fe432..5e5b906be3433 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -327,3 +327,5 @@ Bug Fixes
- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
+
+- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
\ No newline at end of file
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 3e077bf526ab9..1556f6b00e981 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -504,7 +504,7 @@ def fetch_data(url, name):
def get_data_famafrench(name):
# path of zip files
- zip_file_path = '{0}/{1}.zip'.format(_FAMAFRENCH_URL, name)
+ zip_file_path = '{0}/{1}_TXT.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 848a775c32f32..63ed26ea7d931 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -481,8 +481,6 @@ def test_read_famafrench(self):
for name in ("F-F_Research_Data_Factors",
"F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3",
"F-F_ST_Reversal_Factor", "F-F_Momentum_Factor"):
- raise nose.SkipTest('getting 404 errors as of 7/15/15')
-
ff = DataReader(name, "famafrench")
self.assertTrue(ff is not None)
self.assertIsInstance(ff, dict)
| Closes #10591. The same fix as pydata/pandas-datareader#53.
Though it is not a bug, added it to the bug section because users likely to check it when they meet 404.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10609 | 2015-07-17T16:40:37Z | 2015-07-17T21:03:49Z | 2015-07-17T21:03:49Z | 2015-07-17T21:03:53Z |
DOC: 9789 Added missing letter, fixed link and Examples formatting. | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index df788f806eda6..3e86527c3ec63 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -167,9 +167,10 @@ class Grouper(object):
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
- freq : string / freqency object, defaults to None
+ freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
- a datetime-like object
+ a datetime-like object. For full specification of available frequencies, please see
+ `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
@@ -187,11 +188,19 @@ class Grouper(object):
Examples
--------
- >>> df.groupby(Grouper(key='A')) : syntactic sugar for df.groupby('A')
- >>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
- >>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
- specify a resample on the level 'date' on the columns axis with a frequency of 60s
+
+ Syntactic sugar for ``df.groupby('A')``
+ >>> df.groupby(Grouper(key='A'))
+
+ Specify a resample operation on the column 'date'
+
+ >>> df.groupby(Grouper(key='date', freq='60s'))
+
+ Specify a resample operation on the level 'date' on the columns axis
+ with a frequency of 60s
+
+ >>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
| Fix for https://github.com/pydata/pandas/issues/9789.
How do you feel about direct URLs in the doctrings?
I saw them in a few other docstrings. Pandas maintainers, any preferences?
Still need to fix the Examples at the bottom of the page.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10605 | 2015-07-16T20:38:02Z | 2015-07-21T11:40:02Z | 2015-07-21T11:40:02Z | 2015-07-21T11:40:06Z |
ENH: More permissive S3 reading | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6ab299eb70eb5..b6e166619c1e8 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -389,3 +389,5 @@ Bug Fixes
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
+
+- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 65cfdff1df14b..b341679176256 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -151,7 +151,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
except boto.exception.NoAuthHandlerFound:
conn = boto.connect_s3(anon=True)
- b = conn.get_bucket(parsed_url.netloc)
+ b = conn.get_bucket(parsed_url.netloc, validate=False)
k = boto.s3.key.Key(b)
k.key = parsed_url.path
filepath_or_buffer = BytesIO(k.get_contents_as_string(
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 0f0486e8ea596..a4940ebdd6079 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -4075,6 +4075,12 @@ def test_parse_public_s3_bucket(self):
nt.assert_false(df.empty)
tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)
+ # Read public file from bucket with not-public contents
+ df = pd.read_csv('s3://cant_get_it/tips.csv')
+ nt.assert_true(isinstance(df, pd.DataFrame))
+ nt.assert_false(df.empty)
+ tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)
+
@tm.network
def test_s3_fails(self):
import boto
@@ -4082,9 +4088,11 @@ def test_s3_fails(self):
'S3ResponseError: 404 Not Found'):
pd.read_csv('s3://nyqpug/asdf.csv')
+ # Receive a permission error when trying to read a private bucket.
+ # It's irrelevant here that this isn't actually a table.
with tm.assertRaisesRegexp(boto.exception.S3ResponseError,
- 'S3ResponseError: 403 Forbidden'):
- pd.read_csv('s3://cant_get_it/tips.csv')
+ 'S3ResponseError: 403 Forbidden'):
+ pd.read_csv('s3://cant_get_it/')
def assert_same_values_and_dtype(res, exp):
| When calling `get_bucket`, boto will by default try to establish that the S3 bucket exists by listing all of the keys that exist in it. This behavior is controlled by the "validate" keyword, which defaults to True. If your access key doesn't have permission to read everything in a bucket (even if you do have permission to read the file you're trying to access), this generates an uninformative exception.
This PR sets "validate=False". This means that boto will trust you that the bucket exists, and not try to check immediately. If the bucket actually doesn't exist, the `get_contents_as_string` call a couple of lines later will generate the exception "S3ResponseError: S3ResponseError: 404 Not Found".
| https://api.github.com/repos/pandas-dev/pandas/pulls/10604 | 2015-07-16T19:33:45Z | 2015-07-21T12:59:38Z | 2015-07-21T12:59:38Z | 2015-07-21T12:59:42Z |
BUG: Fix issue with old-style usage in convert_objects | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ba4e8b0c88358..e363ab8e68408 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3,6 +3,7 @@
import operator
import weakref
import gc
+
import numpy as np
import pandas.lib as lib
@@ -27,6 +28,7 @@
from pandas.util.decorators import Appender, Substitution, deprecate_kwarg
from pandas.core import config
+
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
@@ -2473,6 +2475,26 @@ def convert_objects(self, datetime=False, numeric=False,
-------
converted : same as input object
"""
+
+ # Deprecation code to handle usage change
+ issue_warning = False
+ if datetime == 'coerce':
+ datetime = coerce = True
+ numeric = timedelta = False
+ issue_warning = True
+ elif numeric == 'coerce':
+ numeric = coerce = True
+ datetime = timedelta = False
+ issue_warning = True
+ elif timedelta == 'coerce':
+ timedelta = coerce = True
+ datetime = numeric = False
+ issue_warning = True
+ if issue_warning:
+ warnings.warn("The use of 'coerce' as an input is deprecated. "
+ "Instead set coerce=True.",
+ FutureWarning)
+
return self._constructor(
self._data.convert(datetime=datetime,
numeric=numeric,
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6f128b2e4ff4d..58a5bf4a39000 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -6209,6 +6209,34 @@ def test_convert_objects(self):
result = s.convert_objects(datetime=True, coerce=True)
assert_series_equal(result, expected)
+ # GH 10601
+ # Remove test after deprecation to convert_objects is final
+ def test_convert_objects_old_style_deprecation(self):
+ s = Series(['foo', 'bar', 1, 1.0], dtype='O')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always', FutureWarning)
+ new_style = s.convert_objects(datetime=True, coerce=True)
+ old_style = s.convert_objects(convert_dates='coerce')
+ self.assertEqual(len(w), 2)
+ assert_series_equal(new_style, old_style)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always', FutureWarning)
+ new_style = s.convert_objects(numeric=True, coerce=True)
+ old_style = s.convert_objects(convert_numeric='coerce')
+ self.assertEqual(len(w), 2)
+ assert_series_equal(new_style, old_style)
+
+ dt = datetime(2001, 1, 1, 0, 0)
+ td = dt - datetime(2000, 1, 1, 0, 0)
+ s = Series(['a', '3.1415', dt, td])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always', FutureWarning)
+ new_style = s.convert_objects(timedelta=True, coerce=True)
+ old_style = s.convert_objects(convert_timedeltas='coerce')
+ self.assertEqual(len(w), 2)
+ assert_series_equal(new_style, old_style)
+
def test_convert_objects_no_arg_warning(self):
s = Series(['1.0','2'])
with warnings.catch_warnings(record=True) as w:
| Fix to temporary allow passing 'coerce' to variables
closes #10601
| https://api.github.com/repos/pandas-dev/pandas/pulls/10602 | 2015-07-16T15:00:18Z | 2015-09-06T17:14:50Z | 2015-09-06T17:14:50Z | 2016-02-16T16:30:00Z |
DOC: some formatting fixes in whatsnew | diff --git a/doc/source/whatsnew/v0.11.0.txt b/doc/source/whatsnew/v0.11.0.txt
index befdf848ad23b..50b74fc5af090 100644
--- a/doc/source/whatsnew/v0.11.0.txt
+++ b/doc/source/whatsnew/v0.11.0.txt
@@ -103,6 +103,7 @@ Conversion
Mixed Conversion
.. ipython:: python
+ :okwarning:
df3['D'] = '1.'
df3['E'] = '1'
@@ -116,6 +117,7 @@ Mixed Conversion
Forcing Date coercion (and setting ``NaT`` when not datelike)
.. ipython:: python
+ :okwarning:
from datetime import datetime
s = Series([datetime(2001,1,1,0,0), 'foo', 1.0, 1,
@@ -328,4 +330,3 @@ Enhancements
See the :ref:`full release notes
<release>` or issue tracker
on GitHub for a complete list.
-
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e77532b2fe432..42b8d03ac5863 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -86,20 +86,20 @@ Other enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. _whatsnew_0170.api_breaking.convert_objects:
+
Changes to convert_objects
^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. _whatsnew_0170.api_breaking.convert_objects:
-
- ``DataFrame.convert_objects`` keyword arguments have been shortened. (:issue:`10265`)
-===================== =============
-Old New
-===================== =============
-``convert_dates`` ``datetime``
-``convert_numeric`` ``numeric``
-``convert_timedelta`` ``timedelta``
-===================== =============
+ ===================== =============
+ Old New
+ ===================== =============
+ ``convert_dates`` ``datetime``
+ ``convert_numeric`` ``numeric``
+ ``convert_timedelta`` ``timedelta``
+ ===================== =============
- Coercing types with ``DataFrame.convert_objects`` is now implemented using the
keyword argument ``coerce=True``. Previously types were coerced by setting a
@@ -152,14 +152,19 @@ Old New
to do nothing, and so it is necessary to pass at least one conversion target
in the method call.
+.. _whatsnew_0170.api_breaking.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
+
- Operator equal on Index should behavior similarly to Series (:issue:`9947`)
-Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise
-a ``ValueError``. This is to be consistent with the behavior of ``Series``.
+ Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise
+ a ``ValueError``. This is to be consistent with the behavior of ``Series``.
-Previous behavior:
+ Previous behavior:
-.. code-block:: python
+ .. code-block:: python
In [2]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5])
Out[2]: array([ True, False, False], dtype=bool)
@@ -183,9 +188,9 @@ Previous behavior:
In [7]: pd.Series([1, 2, 3]) == pd.Series([1, 2])
ValueError: Series lengths must match to compare
-New behavior:
+ New behavior:
-.. code-block:: python
+ .. code-block:: python
In [8]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5])
Out[8]: array([ True, False, False], dtype=bool)
@@ -209,23 +214,19 @@ New behavior:
In [13]: pd.Series([1, 2, 3]) == pd.Series([1, 2])
ValueError: Series lengths must match to compare
-Note that this is different from the ``numpy`` behavior where a comparison can
-be broadcast:
+ Note that this is different from the ``numpy`` behavior where a comparison can
+ be broadcast:
-.. ipython:: python
+ .. ipython:: python
np.array([1, 2, 3]) == np.array([1])
-or it can return False if broadcasting can not be done:
+ or it can return False if broadcasting can not be done:
-.. ipython:: python
+ .. ipython:: python
np.array([1, 2, 3]) == np.array([1, 2])
-.. _whatsnew_0170.api_breaking.other:
-
-Other API Changes
-^^^^^^^^^^^^^^^^^
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
@@ -265,8 +266,8 @@ as well as the ``.sum()`` operation.
Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. ``QT``), or performaning multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library.
-.. _whatsnew_0170.performance:
+.. _whatsnew_0170.performance:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -277,6 +278,7 @@ Performance Improvements
- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`)
- Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`)
+
.. _whatsnew_0170.bug_fixes:
Bug Fixes
| https://api.github.com/repos/pandas-dev/pandas/pulls/10600 | 2015-07-16T09:02:32Z | 2015-07-17T13:46:29Z | 2015-07-17T13:46:29Z | 2015-07-18T14:51:28Z | |
Empty subtypes of Index return their type, rather than Index | Resolves #10596
| https://api.github.com/repos/pandas-dev/pandas/pulls/10599 | 2015-07-16T00:25:13Z | 2015-07-24T07:34:42Z | null | 2015-07-24T07:34:42Z | |
Improve categorical concat speed by ~20x | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e77532b2fe432..39ed153376e66 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -276,6 +276,7 @@ Performance Improvements
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`)
- Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`)
+- 20x improvement in ``concat`` of Categoricals when categories are identical (:issue:`10587`)
.. _whatsnew_0170.bug_fixes:
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index edd4a532cf8f5..7aa9145d8aed8 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1715,18 +1715,20 @@ def _convert_to_list_like(list_like):
return [list_like]
def _concat_compat(to_concat, axis=0):
- """
- provide concatenation of an object/categorical array of arrays each of which is a single dtype
+ """Concatenate an object/categorical array of arrays, each of which is a
+ single dtype
Parameters
----------
to_concat : array of arrays
- axis : axis to provide concatenation
- in the current impl this is always 0, e.g. we only have 1-d categoricals
+ axis : int
+ Axis to provide concatenation in the current implementation this is
+ always 0, e.g. we only have 1D categoricals
Returns
-------
- a single array, preserving the combined dtypes
+ Categorical
+ A single array, preserving the combined dtypes
"""
def convert_categorical(x):
@@ -1735,31 +1737,34 @@ def convert_categorical(x):
return x.get_values()
return x.ravel()
- typs = get_dtype_kinds(to_concat)
- if not len(typs-set(['object','category'])):
-
- # we only can deal with object & category types
- pass
-
- else:
-
+ if get_dtype_kinds(to_concat) - set(['object', 'category']):
# convert to object type and perform a regular concat
from pandas.core.common import _concat_compat
- return _concat_compat([ np.array(x,copy=False).astype('object') for x in to_concat ],axis=0)
+ return _concat_compat([np.array(x, copy=False, dtype=object)
+ for x in to_concat], axis=0)
- # we could have object blocks and categorical's here
- # if we only have a single cateogoricals then combine everything
+ # we could have object blocks and categoricals here
+ # if we only have a single categoricals then combine everything
# else its a non-compat categorical
- categoricals = [ x for x in to_concat if is_categorical_dtype(x.dtype) ]
- objects = [ x for x in to_concat if is_object_dtype(x.dtype) ]
+ categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)]
# validate the categories
- categories = None
- for x in categoricals:
- if categories is None:
- categories = x.categories
- if not categories.equals(x.categories):
+ categories = categoricals[0]
+ rawcats = categories.categories
+ for x in categoricals[1:]:
+ if not categories.is_dtype_equal(x):
raise ValueError("incompatible categories in categorical concat")
- # concat them
- return Categorical(np.concatenate([ convert_categorical(x) for x in to_concat ],axis=0), categories=categories)
+ # we've already checked that all categoricals are the same, so if their
+ # length is equal to the input then we have all the same categories
+ if len(categoricals) == len(to_concat):
+ # concating numeric types is much faster than concating object types
+ # and fastpath takes a shorter path through the constructor
+ return Categorical(np.concatenate([x.codes for x in to_concat], axis=0),
+ rawcats,
+ ordered=categoricals[0].ordered,
+ fastpath=True)
+ else:
+ concatted = np.concatenate(list(map(convert_categorical, to_concat)),
+ axis=0)
+ return Categorical(concatted, rawcats)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6b7909086403e..37d6cb9c0d5b6 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4388,7 +4388,11 @@ def is_null(self):
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
- values_flat = self.block.values.ravel()
+ values = self.block.values
+ if self.block.is_categorical:
+ values_flat = values.categories
+ else:
+ values_flat = values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 27ba6f953306d..44be74b78d6bb 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -21,6 +21,7 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
PyTuple_SetItem,
PyTuple_New,
PyObject_SetAttrString,
+ PyObject_RichCompareBool,
PyBytes_GET_SIZE,
PyUnicode_GET_SIZE)
@@ -372,19 +373,19 @@ def isnullobj2d_old(ndarray[object, ndim=2] arr):
result[i, j] = 1
return result.view(np.bool_)
-def list_to_object_array(list obj):
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+cpdef ndarray[object] list_to_object_array(list obj):
'''
Convert list to object ndarray. Seriously can't believe I had to write this
function
'''
cdef:
- Py_ssize_t i, n
- ndarray[object] arr
-
- n = len(obj)
- arr = np.empty(n, dtype=object)
+ Py_ssize_t i, n = len(obj)
+ ndarray[object] arr = np.empty(n, dtype=object)
- for i from 0 <= i < n:
+ for i in range(n):
arr[i] = obj[i]
return arr
@@ -732,28 +733,25 @@ def scalar_compare(ndarray[object] values, object val, object op):
return result.view(bool)
+
@cython.wraparound(False)
@cython.boundscheck(False)
-def array_equivalent_object(ndarray[object] left, ndarray[object] right):
+cpdef bint array_equivalent_object(object[:] left, object[:] right):
""" perform an element by element comparion on 1-d object arrays
taking into account nan positions """
- cdef Py_ssize_t i, n
- cdef object x, y
+ cdef:
+ Py_ssize_t i, n = left.shape[0]
+ object x, y
- n = len(left)
- for i from 0 <= i < n:
+ for i in range(n):
x = left[i]
y = right[i]
# we are either not equal or both nan
# I think None == None will be true here
- if cpython.PyObject_RichCompareBool(x, y, cpython.Py_EQ):
- continue
- elif _checknull(x) and _checknull(y):
- continue
- else:
+ if not (PyObject_RichCompareBool(x, y, cpython.Py_EQ) or
+ _checknull(x) and _checknull(y)):
return False
-
return True
diff --git a/vb_suite/categoricals.py b/vb_suite/categoricals.py
new file mode 100644
index 0000000000000..cb33f1bb6c0b1
--- /dev/null
+++ b/vb_suite/categoricals.py
@@ -0,0 +1,16 @@
+from vbench.benchmark import Benchmark
+from datetime import datetime
+
+common_setup = """from pandas_vb_common import *
+"""
+
+#----------------------------------------------------------------------
+# Series constructors
+
+setup = common_setup + """
+s = pd.Series(list('aabbcd') * 1000000).astype('category')
+"""
+
+concat_categorical = \
+ Benchmark("concat([s, s])", setup=setup, name='concat_categorical',
+ start_date=datetime(year=2015, month=7, day=15))
| closes #10587
before (current master):
``` python
In [1]: s = pd.Series(list('aabbcd')*1000000).astype('category')
In [2]: timeit pd.concat([s,s])
1 loops, best of 3: 573 ms per loop
```
after (this PR):
``` python
In [1]: s = pd.Series(list('aabbcd')*1000000).astype('category')
In [2]: timeit pd.concat([s,s])
10 loops, best of 3: 30.1 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10597 | 2015-07-15T23:38:26Z | 2015-07-17T13:05:06Z | 2015-07-17T13:05:06Z | 2015-07-17T13:05:22Z |
DOC-10371 Add note regarding supported interpolation methods for Series/DF | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1656b306a0ddb..c792309e83ecb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2876,6 +2876,9 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
"""
Interpolate values according to different methods.
+ Please note that only ``method='linear'`` is supported for DataFrames/Series
+ with a MultiIndex.
+
Parameters
----------
method : {'linear', 'time', 'index', 'values', 'nearest', 'zero',
| Closes #10371
Suggested improvement for https://github.com/pydata/pandas/issues/10371
| https://api.github.com/repos/pandas-dev/pandas/pulls/10593 | 2015-07-15T22:24:57Z | 2015-07-15T23:36:26Z | 2015-07-15T23:36:26Z | 2015-07-16T08:28:28Z |
DOC: Small improvement to convert_objects doc | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index f739d89295ac1..6c6fda48fb649 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1528,12 +1528,14 @@ object conversion
:ref:`API changes <whatsnew_0170.api_breaking.convert_objects>`
for more details.
-:meth:`~DataFrame.convert_objects` is a method to try to force conversion of
-types from the ``object`` dtype to other types. To try converting specific
-types that are *number like*, e.g. could be a string that represents a number,
-pass ``numeric=True``. To force the conversion, add the keyword argument
-``coerce=True``. This will force strings and number-like objects to be numbers if
-possible, otherwise they will be set to ``np.nan``.
+:meth:`~DataFrame.convert_objects` is a method that converts columns from
+the ``object`` dtype to datetimes, timedeltas or floats. For example, to
+attempt conversion of object data that are *number like*, e.g. could be a
+string that represents a number, pass ``numeric=True``. By default, this will
+attempt a soft conversion and so will only succeed if the entire column is
+convertible. To force the conversion, add the keyword argument ``coerce=True``.
+This will force strings and number-like objects to be numbers if
+possible, and other values will be set to ``np.nan``.
.. ipython:: python
@@ -1549,7 +1551,7 @@ possible, otherwise they will be set to ``np.nan``.
To force conversion to ``datetime64[ns]``, pass ``datetime=True`` and ``coerce=True``.
This will convert any datetime-like object to dates, forcing other values to ``NaT``.
This might be useful if you are reading in data which is mostly dates,
-but occasionally has non-dates intermixed and you want to represent as missing.
+but occasionally contains non-dates that you wish to represent as missing.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 579117b8f0ff7..192a62f8dc223 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -80,16 +80,16 @@ Other enhancements
.. _whatsnew_0170.api:
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
.. _whatsnew_0170.api_breaking:
-.. _whatsnew_0170.api_breaking.other:
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_0170.api_breaking.convert_objects:
Changes to convert_objects
^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _whatsnew_0170.api_breaking.convert_objects:
+
- ``DataFrame.convert_objects`` keyword arguments have been shortened. (:issue:`10265`)
===================== =============
@@ -101,8 +101,8 @@ Old New
===================== =============
- Coercing types with ``DataFrame.convert_objects`` is now implemented using the
-keyword argument ``coerce=True``. Previously types were coerced by setting a
-keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coerce'``.
+ keyword argument ``coerce=True``. Previously types were coerced by setting a
+ keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coerce'``.
.. ipython:: python
@@ -125,7 +125,7 @@ keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coe
df.convert_objects(numeric=True, coerce=True)
- In earlier versions of pandas, ``DataFrame.convert_objects`` would not coerce
-numeric types when there were no values convertible to a numeric type. For example,
+ numeric types when there were no values convertible to a numeric type. For example,
.. code-block:: python
@@ -136,21 +136,22 @@ numeric types when there were no values convertible to a numeric type. For exam
0 a
1 b
-returns the original DataFrame with no conversion. This change alters
-this behavior so that
+ returns the original DataFrame with no conversion. This change alters
+ this behavior so that
.. ipython:: python
pd.DataFrame({'s': ['a','b']})
df.convert_objects(numeric=True, coerce=True)
-converts all non-number-like strings to ``NaN``.
+ converts all non-number-like strings to ``NaN``.
- In earlier versions of pandas, the default behavior was to try and convert
-datetimes and timestamps. The new default is for ``DataFrame.convert_objects``
-to do nothing, and so it is necessary to pass at least one conversion target
-in the method call.
+ datetimes and timestamps. The new default is for ``DataFrame.convert_objects``
+ to do nothing, and so it is necessary to pass at least one conversion target
+ in the method call.
+.. _whatsnew_0170.api_breaking.other:
Other API Changes
^^^^^^^^^^^^^^^^^
| Fix small issues in convert_objects doc
[skip ci]
| https://api.github.com/repos/pandas-dev/pandas/pulls/10589 | 2015-07-15T21:09:35Z | 2015-07-15T23:37:23Z | 2015-07-15T23:37:23Z | 2016-02-16T16:30:00Z |
Fixed bug where read_csv ignores dtype arg if input is empty. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 77adbfc41b97a..9672066197969 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -357,7 +357,8 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
-- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)
+- Bug in `pandas.read_csv` with kwargs ``index_col=False``, ``index_col=['a', 'b']`` or ``dtype``
+ (:issue:`10413`, :issue:`10467`, :issue:`10577`)
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 1ebe1ad137698..62d51fc510f97 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1170,7 +1170,8 @@ def read(self, nrows=None):
if nrows is None:
return _get_empty_meta(self.orig_names,
self.index_col,
- self.index_names)
+ self.index_names,
+ dtype=self.kwds.get('dtype'))
else:
raise
@@ -2219,19 +2220,30 @@ def _clean_index_names(columns, index_col):
return index_names, columns, index_col
-def _get_empty_meta(columns, index_col, index_names):
+def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
+ if dtype is None:
+ dtype = {}
+ else:
+ # Convert column indexes to column names.
+ dtype = dict((columns[k] if com.is_integer(k) else k, v)
+ for k, v in compat.iteritems(dtype))
+
if index_col is None or index_col is False:
index = Index([])
else:
- index_col = list(index_col)
- index = MultiIndex.from_arrays([[]] * len(index_col), names=index_names)
+ index = [ np.empty(0, dtype=dtype.get(index_name, np.object))
+ for index_name in index_names ]
+ index = MultiIndex.from_arrays(index, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n-i)
- return index, columns, {}
+ col_dict = dict((col_name, np.empty(0, dtype=dtype.get(col_name, np.object)))
+ for col_name in columns)
+
+ return index, columns, col_dict
def _floatify_na_values(na_values):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index e6aee76df4e74..0f0486e8ea596 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -3540,6 +3540,64 @@ def test_pass_dtype(self):
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
+ def test_empty_pass_dtype(self):
+ data = 'one,two'
+ result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
+
+ expected = DataFrame({'one': np.empty(0, dtype='u1'),
+ 'two': np.empty(0, dtype=np.object)})
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_index_pass_dtype(self):
+ data = 'one,two'
+ result = self.read_csv(StringIO(data), index_col=['one'],
+ dtype={'one': 'u1', 1: 'f'})
+
+ expected = DataFrame({'two': np.empty(0, dtype='f')},
+ index=Index([], dtype='u1', name='one'))
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_multiindex_pass_dtype(self):
+ data = 'one,two,three'
+ result = self.read_csv(StringIO(data), index_col=['one', 'two'],
+ dtype={'one': 'u1', 1: 'f8'})
+
+ expected = DataFrame({'three': np.empty(0, dtype=np.object)}, index=MultiIndex.from_arrays(
+ [np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
+ names=['one', 'two'])
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_mangled_column_pass_dtype_by_names(self):
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), dtype={'one': 'u1', 'one.1': 'f'})
+
+ expected = DataFrame({'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
+
+ expected = DataFrame({'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_dup_column_pass_dtype_by_names(self):
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
+ expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_dup_column_pass_dtype_by_indexes(self):
+ ### FIXME in GH9424
+ raise nose.SkipTest("GH 9424; known failure read_csv with duplicate columns")
+
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
+ expected = pd.concat([Series([], name='one', dtype='u1'),
+ Series([], name='one', dtype='f')], axis=1)
+ tm.assert_frame_equal(result, expected)
+
def test_usecols_dtypes(self):
data = """\
1,2,3
| The `CParser` implementation for `pd.read_csv` ignores argument `dtype` if the input is empty. This pull request fixes this so that a DataFrame with the expected column types is returned.
``` Python
import pandas as pd
import cStringIO as stringio
data, dtype = 'a,b', 'i'
df = pd.read_csv(stringio.StringIO(data), dtype={'a': dtype})
assert df.dtypes[0].kind == dtype, "df.types[0].kind = %r, dtype = %r" % (df.dtypes[0].kind, dtype)
```
``
Traceback (most recent call last):
File "issue_1.py", line 9, in <module>
assert df.dtypes[0].kind == dtype, "df.types[0].kind = %r, dtype = %r" % (df.dtypes[0].kind, dtype)
AssertionError: df.types[0].kind = 'O', dtype = 'i'
```
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10577 | 2015-07-15T06:38:56Z | 2015-07-18T11:20:37Z | 2015-07-18T11:20:37Z | 2015-07-18T11:20:49Z |
BUG: Fix typo-related bug to resolve #9266 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2dbed08aa02f3..592855cc89f3b 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -135,8 +135,13 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
+
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
+
+- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
+
+- Bug in `_convert_to_ndarrays` which cause an `AttributeError` in some cases (:issue:`9266`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 4c94ab66d3de2..4e69eeb600ecb 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1055,7 +1055,7 @@ def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
- raise TypeError("Indexes do not support mutable operations")
+ raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 7d4c9df64c0bb..1a3be8b7a488d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -997,7 +997,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
try:
values = lib.map_infer(values, conv_f)
except ValueError:
- mask = lib.ismember(values, na_values).view(np.uin8)
+ mask = lib.ismember(values, na_values).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
coerce_type = False
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 3dae9f383db8f..c82151c9243fc 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2679,6 +2679,27 @@ def test_fwf_compression(self):
compression=comp_name)
tm.assert_frame_equal(result, expected)
+ def test_fwf_for_uint8(self):
+ data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
+1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
+ df = read_fwf(StringIO(data),
+ colspecs=[(0,17),(25,26),(33,37),(49,51),(58,62),(63,1000)],
+ names=['time','pri','pgn','dst','src','data'],
+ converters={
+ 'pgn':lambda x: int(x,16),
+ 'src':lambda x: int(x,16),
+ 'dst':lambda x: int(x,16),
+ 'data':lambda x: len(x.split(' '))})
+
+ expected = DataFrame([[1421302965.213420,3,61184,23,40,8],
+ [1421302964.226776,6,61442,None, 71,8]],
+ columns = ["time", "pri", "pgn", "dst", "src","data"])
+
+ # Hacky fix for dst column dtype
+ expected["dst"] = expected["dst"].astype(object)
+
+ tm.assert_frame_equal(df, expected)
+
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest("Bytes-related test - only needs to work on Python 3")
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 4cffb663748a4..53fcdb61bd1ae 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -1147,58 +1147,43 @@ def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
group_var_template = """@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
ndarray[int64_t] counts,
ndarray[%(dest_type2)s, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- %(dest_type2)s val, ct
- ndarray[%(dest_type2)s, ndim=2] nobs, sumx, sumxx
+ %(dest_type2)s val, ct, oldmean
+ ndarray[%(dest_type2)s, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -1206,8 +1191,8 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
"""
group_var_bin_template = """@cython.wraparound(False)
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index c76838c4a49c9..db0e96d158f0c 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -7232,58 +7232,43 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, ct
- ndarray[float64_t, ndim=2] nobs, sumx, sumxx
+ float64_t val, ct, oldmean
+ ndarray[float64_t, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -7291,63 +7276,48 @@ def group_var_float64(ndarray[float64_t, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float32_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, ct
- ndarray[float32_t, ndim=2] nobs, sumx, sumxx
+ float32_t val, ct, oldmean
+ ndarray[float32_t, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -7355,8 +7325,8 @@ def group_var_float32(ndarray[float32_t, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
@cython.wraparound(False)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 8192f6e99116b..138ef92831b2a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -2,6 +2,7 @@
from pandas.compat import range
import numpy as np
+from numpy.random import RandomState
from pandas.core.api import Series, Categorical
import pandas as pd
@@ -10,6 +11,7 @@
import pandas.util.testing as tm
import pandas.hashtable as hashtable
+
class TestMatch(tm.TestCase):
_multiprocess_can_split_ = True
@@ -285,6 +287,125 @@ def test_dropna(self):
pd.Series([10.3, 5., 5., None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[5., 10.3, np.nan]))
+
+class GroupVarTestMixin(object):
+
+ def test_group_var_generic_1d(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 1))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(15, 1).astype(self.dtype)
+ labels = np.tile(np.arange(5), (3, ))
+
+ expected_out = (np.squeeze(values)
+ .reshape((5, 3), order='F')
+ .std(axis=1, ddof=1) ** 2)[:, np.newaxis]
+ expected_counts = counts + 3
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_1d_flat_labels(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((1, 1))).astype(self.dtype)
+ counts = np.zeros(1, dtype=int)
+ values = 10 * prng.rand(5, 1).astype(self.dtype)
+ labels = np.zeros(5, dtype=int)
+
+ expected_out = np.array([[values.std(ddof=1) ** 2]])
+ expected_counts = counts + 5
+
+ self.algo(out, counts, values, labels)
+
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_2d_all_finite(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
+ labels = np.tile(np.arange(5), (2, ))
+
+ expected_out = np.std(
+ values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
+ expected_counts = counts + 2
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_2d_some_nan(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
+ values[:, 1] = np.nan
+ labels = np.tile(np.arange(5), (2, ))
+
+ expected_out = np.vstack([
+ values[:, 0].reshape(5, 2, order='F').std(ddof=1, axis=1) ** 2,
+ np.nan * np.ones(5)
+ ]).T
+ expected_counts = counts + 2
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_constant(self):
+ # Regression test from GH 10448.
+
+ out = np.array([[np.nan]], dtype=self.dtype)
+ counts = np.array([0])
+ values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
+ labels = np.zeros(3, dtype=np.int)
+
+ self.algo(out, counts, values, labels)
+
+ self.assertEqual(counts[0], 3)
+ self.assertTrue(out[0, 0] >= 0) # Python 2.6 has no assertGreaterEqual
+ tm.assert_almost_equal(out[0, 0], 0.0)
+
+
+class TestGroupVarFloat64(tm.TestCase, GroupVarTestMixin):
+ __test__ = True
+ _multiprocess_can_split_ = True
+
+ algo = algos.algos.group_var_float64
+ dtype = np.float64
+ rtol = 1e-5
+
+ def test_group_var_large_inputs(self):
+
+ prng = RandomState(1234)
+
+ out = np.array([[np.nan]], dtype=self.dtype)
+ counts = np.array([0])
+ values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
+ values.shape = (10 ** 6, 1)
+ labels = np.zeros(10 ** 6, dtype=np.int)
+
+ self.algo(out, counts, values, labels)
+
+ self.assertEqual(counts[0], 10 ** 6)
+ tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
+
+
+class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin):
+ __test__ = True
+ _multiprocess_can_split_ = True
+
+ algo = algos.algos.group_var_float32
+ dtype = np.float32
+ rtol = 1e-2
+
+
def test_quantile():
s = Series(np.random.randn(100))
| closes #9266
Tried replicating the test case provided in the original issue. Let me know if it needs any changes or fixes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10576 | 2015-07-15T04:31:58Z | 2015-07-21T05:27:32Z | null | 2015-07-21T06:09:49Z |
Add Python 3 support and optional parameter "silent" for read_gbq | diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 06ad8827a5642..87157205591a2 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -15,9 +15,6 @@
def _check_google_client_version():
- if compat.PY3:
- raise NotImplementedError("Google's libraries do not support Python 3 yet")
-
try:
import pkg_resources
@@ -26,8 +23,9 @@ def _check_google_client_version():
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
- if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < '1.2.0':
- raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google "
+ required_version = '1.4.0' if compat.PY3 else '1.2.0'
+ if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < required_version:
+ raise ImportError("pandas requires google-api-python-client >= " + required_version + " for Google "
"BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION)
logger = logging.getLogger('pandas.io.gbq')
@@ -133,7 +131,7 @@ def get_service(self, credentials):
return bigquery_service
- def run_query(self, query):
+ def run_query(self, query, silent):
try:
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
@@ -182,7 +180,8 @@ def run_query(self, query):
job_reference = query_reply['jobReference']
while(not query_reply.get('jobComplete', False)):
- print('Job not yet complete...')
+ if not silent:
+ print('Job is not yet complete...')
query_reply = job_collection.getQueryResults(
projectId=job_reference['projectId'],
jobId=job_reference['jobId']).execute()
@@ -267,10 +266,10 @@ def _parse_data(schema, rows):
fields = schema['fields']
col_types = [field['type'] for field in fields]
- col_names = [field['name'].encode('ascii', 'ignore') for field in fields]
+ col_names = [field['name'] for field in fields]
col_dtypes = [dtype_map.get(field['type'], object) for field in fields]
page_array = np.zeros((len(rows),),
- dtype=zip(col_names, col_dtypes))
+ dtype=list(zip(col_names, col_dtypes)))
for row_num, raw_row in enumerate(rows):
entries = raw_row.get('f', [])
@@ -294,7 +293,7 @@ def _parse_entry(field_value, field_type):
return field_value
-def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False):
+def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, silent = False):
"""Load data from Google BigQuery.
THIS IS AN EXPERIMENTAL LIBRARY
@@ -319,6 +318,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
+ silent : boolean (default False)
+ Do not print status messages during query execution if True
Returns
-------
@@ -332,7 +333,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals
raise TypeError("Missing required parameter: project_id")
connector = GbqConnector(project_id, reauth = reauth)
- schema, pages = connector.run_query(query)
+ schema, pages = connector.run_query(query, silent = silent)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index 5417842d3f863..c9883414da36f 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -9,6 +9,7 @@
import sys
import platform
from time import sleep
+from io import StringIO
import numpy as np
@@ -39,47 +40,43 @@ def missing_bq():
return True
def _test_imports():
- if not compat.PY3:
+ required_version = '1.4.0' if compat.PY3 else '1.2.0'
- global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
- _HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
+ global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
+ _HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
- try:
- import pkg_resources
- _SETUPTOOLS_INSTALLED = True
- except ImportError:
- _SETUPTOOLS_INSTALLED = False
+ try:
+ import pkg_resources
+ _SETUPTOOLS_INSTALLED = True
+ except ImportError:
+ _SETUPTOOLS_INSTALLED = False
- if _SETUPTOOLS_INSTALLED:
- try:
- from apiclient.discovery import build
- from apiclient.errors import HttpError
+ if _SETUPTOOLS_INSTALLED:
+ try:
+ from apiclient.discovery import build
+ from apiclient.errors import HttpError
- from oauth2client.client import OAuth2WebServerFlow
- from oauth2client.client import AccessTokenRefreshError
+ from oauth2client.client import OAuth2WebServerFlow
+ from oauth2client.client import AccessTokenRefreshError
- from oauth2client.file import Storage
- from oauth2client.tools import run_flow
- _GOOGLE_API_CLIENT_INSTALLED=True
- _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
+ from oauth2client.file import Storage
+ from oauth2client.tools import run_flow
+ _GOOGLE_API_CLIENT_INSTALLED=True
+ _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
- if LooseVersion(_GOOGLE_API_CLIENT_VERSION) >= '1.2.0':
- _GOOGLE_API_CLIENT_VALID_VERSION = True
+ if LooseVersion(_GOOGLE_API_CLIENT_VERSION) >= required_version:
+ _GOOGLE_API_CLIENT_VALID_VERSION = True
- except ImportError:
- _GOOGLE_API_CLIENT_INSTALLED = False
+ except ImportError:
+ _GOOGLE_API_CLIENT_INSTALLED = False
- try:
- import httplib2
- _HTTPLIB2_INSTALLED = True
- except ImportError:
- _HTTPLIB2_INSTALLED = False
+ try:
+ import httplib2
+ _HTTPLIB2_INSTALLED = True
+ except ImportError:
+ _HTTPLIB2_INSTALLED = False
-
- if compat.PY3:
- raise NotImplementedError("Google's libraries do not support Python 3 yet")
-
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
@@ -87,7 +84,7 @@ def _test_imports():
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
- raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google "
+ raise ImportError("pandas requires google-api-python-client >= " + required_version + " for Google "
"BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION)
if not _HTTPLIB2_INSTALLED:
@@ -295,6 +292,14 @@ def test_download_dataset_larger_than_200k_rows(self):
# http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] GROUP EACH BY id ORDER BY id ASC LIMIT 200005", project_id=PROJECT_ID)
self.assertEqual(len(df.drop_duplicates()), 200005)
+
+ def test_silent_option_true(self):
+ stdout = sys.stdout
+ sys.stdout = StringIO()
+ gbq.read_gbq("SELECT 3", project_id = PROJECT_ID, silent = True)
+ output = sys.stdout.getvalue()
+ sys.stdout = stdout
+ tm.assert_equal(output, "")
class TestToGBQIntegration(tm.TestCase):
# This class requires bq.py to be installed for setup/teardown.
| Google's libraries support Python 3 since version 1.4.0, so we could add support for Python 3 in read_gbq method.
Also it is not always useful to see "Job not yet complete" printings, so I've added optional parameter that allows disable it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10572 | 2015-07-14T22:21:28Z | 2015-09-14T19:41:30Z | null | 2015-09-15T01:12:12Z |
COMPAT: Allow multi-indexes to be written to excel | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7e69a8044a305..76efa6592877e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -501,6 +501,7 @@ Other API Changes
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
+- Allow ``DataFrame`` with ``MultiIndex`` columns to be written to Excel (:issue: `10564`). This was changed in 0.16.2 as the read-back method could not always guarantee perfect fidelity (:issue:`9794`).
- ``Categorical.unique`` now returns new ``Categorical`` which ``categories`` and ``codes`` are unique, rather than returning ``np.array`` (:issue:`10508`)
- unordered category: values and categories are sorted by appearance order.
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 4ec4375349764..c04531c682413 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -4,6 +4,7 @@
# pylint: disable=W0141
import sys
+import warnings
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
@@ -1640,11 +1641,14 @@ class ExcelFormatter(object):
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
+ verbose: boolean, default True
+ If True, warn user that the resulting output file may not be
+ re-read or parsed directly by pandas.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
- inf_rep='inf'):
+ inf_rep='inf', verbose=True):
self.df = df
self.rowcounter = 0
self.na_rep = na_rep
@@ -1657,6 +1661,7 @@ def __init__(self, df, na_rep='', float_format=None, cols=None,
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
+ self.verbose = verbose
def _format_value(self, val):
if lib.checknull(val):
@@ -1671,6 +1676,17 @@ def _format_value(self, val):
return val
def _format_header_mi(self):
+
+ if self.columns.nlevels > 1:
+ if not self.index:
+ raise NotImplementedError("Writing to Excel with MultiIndex"
+ " columns and no index ('index'=False) "
+ "is not yet implemented.")
+ elif self.index and self.verbose:
+ warnings.warn("Writing to Excel with MultiIndex columns is a"
+ " one way serializable operation. You will not"
+ " be able to re-read or parse the output file.")
+
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
return
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 062cbe579785c..d9443fe09c623 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1247,7 +1247,8 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
- merge_cells=True, encoding=None, inf_rep='inf'):
+ merge_cells=True, encoding=None, inf_rep='inf',
+ verbose=True):
"""
Write DataFrame to a excel sheet
@@ -1288,6 +1289,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
+ verbose: boolean, default True
+ If True, warn user that the resulting output file may not be
+ re-read or parsed directly by pandas.
Notes
-----
@@ -1304,12 +1308,8 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
strings before writing.
"""
from pandas.io.excel import ExcelWriter
- if self.columns.nlevels > 1:
- raise NotImplementedError("Writing as Excel with a MultiIndex is "
- "not yet implemented.")
-
need_save = False
- if encoding == None:
+ if encoding is None:
encoding = 'ascii'
if isinstance(excel_writer, compat.string_types):
@@ -1324,7 +1324,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
index=index,
index_label=index_label,
merge_cells=merge_cells,
- inf_rep=inf_rep)
+ inf_rep=inf_rep, verbose=verbose)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 26f4d65978fa0..83db59f9d9029 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -316,7 +316,6 @@ def test_read_from_file_url(self):
tm.assert_frame_equal(url_table, local_table)
-
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
@@ -1145,10 +1144,10 @@ def test_excel_010_hemstring(self):
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
- def roundtrip(df, header=True, parser_hdr=0):
+ def roundtrip(df, header=True, parser_hdr=0, index=True):
with ensure_clean(self.ext) as path:
- df.to_excel(path, header=header, merge_cells=self.merge_cells)
+ df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index)
xf = pd.ExcelFile(path)
res = xf.parse(xf.sheet_names[0], header=parser_hdr)
return res
@@ -1164,7 +1163,7 @@ def roundtrip(df, header=True, parser_hdr=0):
#is implemented for now fixing #9794
if j>1:
with tm.assertRaises(NotImplementedError):
- res = roundtrip(df, use_headers)
+ res = roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
@@ -1187,6 +1186,33 @@ def roundtrip(df, header=True, parser_hdr=0):
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
+ def test_excel_010_hemstring_raises_NotImplementedError(self):
+ # This test was failing only for j>1 and header=False,
+ # So I reproduced a simple test.
+ _skip_if_no_xlrd()
+
+ if self.merge_cells:
+ raise nose.SkipTest('Skip tests for merged MI format.')
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ # ensure limited functionality in 0.10
+ # override of #2370 until sorted out in 0.11
+
+ def roundtrip2(df, header=True, parser_hdr=0, index=True):
+
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index)
+ xf = pd.ExcelFile(path)
+ res = xf.parse(xf.sheet_names[0], header=parser_hdr)
+ return res
+
+ nrows = 5; ncols = 3
+ j = 2; i = 1
+ df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
+ with tm.assertRaises(NotImplementedError):
+ res = roundtrip2(df, header=False, index=False)
+
+
def test_duplicated_columns(self):
# Test for issue #5235.
_skip_if_no_xlrd()
@@ -1439,29 +1465,37 @@ class XlwtTests(ExcelWriterBase, tm.TestCase):
engine_name = 'xlwt'
check_skip = staticmethod(_skip_if_no_xlwt)
- def test_excel_raise_not_implemented_error_on_multiindex_columns(self):
+ def test_excel_raise_error_on_multiindex_columns_and_no_index(self):
_skip_if_no_xlwt()
- #MultiIndex as columns is not yet implemented 9794
- cols = pd.MultiIndex.from_tuples([('site',''),
- ('2014','height'),
- ('2014','weight')])
- df = pd.DataFrame(np.random.randn(10,3), columns=cols)
+ # MultiIndex as columns is not yet implemented 9794
+ cols = pd.MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with tm.assertRaises(NotImplementedError):
with ensure_clean(self.ext) as path:
df.to_excel(path, index=False)
+ def test_excel_warns_verbosely_on_multiindex_columns_and_index_true(self):
+ _skip_if_no_xlwt()
+ cols = pd.MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
+ with tm.assert_produces_warning(UserWarning):
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, index=True)
+
def test_excel_multiindex_index(self):
_skip_if_no_xlwt()
- #MultiIndex as index works so assert no error #9794
- cols = pd.MultiIndex.from_tuples([('site',''),
- ('2014','height'),
- ('2014','weight')])
- df = pd.DataFrame(np.random.randn(3,10), index=cols)
+ # MultiIndex as index works so assert no error #9794
+ cols = pd.MultiIndex.from_tuples([('site', ''),
+ ('2014', 'height'),
+ ('2014', 'weight')])
+ df = pd.DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(self.ext) as path:
df.to_excel(path, index=False)
-
-
def test_to_excel_styleconverter(self):
_skip_if_no_xlwt()
| (Even though they cannot be read back in)
Closes #10564
| https://api.github.com/repos/pandas-dev/pandas/pulls/10570 | 2015-07-14T18:12:46Z | 2015-08-20T20:33:14Z | 2015-08-20T20:33:14Z | 2016-03-31T15:59:29Z |
ERR: Boolean comparisons of a Series vs None will now be equivalent to null comparisons | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 45f1412c65e3d..c7e08910a5924 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -34,6 +34,7 @@ New features
Other enhancements
^^^^^^^^^^^^^^^^^^
+
- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`)
- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`)
@@ -91,7 +92,7 @@ Backwards incompatible API changes
Changes to convert_objects
^^^^^^^^^^^^^^^^^^^^^^^^^^
-- ``DataFrame.convert_objects`` keyword arguments have been shortened. (:issue:`10265`)
+``DataFrame.convert_objects`` keyword arguments have been shortened. (:issue:`10265`)
===================== =============
Old New
@@ -101,70 +102,65 @@ Changes to convert_objects
``convert_timedelta`` ``timedelta``
===================== =============
-- Coercing types with ``DataFrame.convert_objects`` is now implemented using the
- keyword argument ``coerce=True``. Previously types were coerced by setting a
- keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coerce'``.
-
- .. ipython:: python
-
- df = pd.DataFrame({'i': ['1','2'],
- 'f': ['apple', '4.2'],
- 's': ['apple','banana']})
- df
+Coercing types with ``DataFrame.convert_objects`` is now implemented using the
+keyword argument ``coerce=True``. Previously types were coerced by setting a
+keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coerce'``.
- The old usage of ``DataFrame.convert_objects`` used `'coerce'` along with the
- type.
+.. ipython:: python
- .. code-block:: python
+ df = pd.DataFrame({'i': ['1','2'],
+ 'f': ['apple', '4.2'],
+ 's': ['apple','banana']})
+ df
- In [2]: df.convert_objects(convert_numeric='coerce')
+The old usage of ``DataFrame.convert_objects`` used `'coerce'` along with the
+type.
- Now the ``coerce`` keyword must be explicitly used.
+.. code-block:: python
- .. ipython:: python
+ In [2]: df.convert_objects(convert_numeric='coerce')
- df.convert_objects(numeric=True, coerce=True)
+Now the ``coerce`` keyword must be explicitly used.
-- In earlier versions of pandas, ``DataFrame.convert_objects`` would not coerce
- numeric types when there were no values convertible to a numeric type. For example,
+.. ipython:: python
- .. code-block:: python
+ df.convert_objects(numeric=True, coerce=True)
- In [1]: df = pd.DataFrame({'s': ['a','b']})
- In [2]: df.convert_objects(convert_numeric='coerce')
- Out[2]:
- s
- 0 a
- 1 b
+In earlier versions of pandas, ``DataFrame.convert_objects`` would not coerce
+numeric types when there were no values convertible to a numeric type. This returns
+the original DataFrame with no conversion. This change alters
+this behavior so that converts all non-number-like strings to ``NaN``.
- returns the original DataFrame with no conversion. This change alters
- this behavior so that
+.. code-block:: python
- .. ipython:: python
+ In [1]: df = pd.DataFrame({'s': ['a','b']})
+ In [2]: df.convert_objects(convert_numeric='coerce')
+ Out[2]:
+ s
+ 0 a
+ 1 b
- pd.DataFrame({'s': ['a','b']})
- df.convert_objects(numeric=True, coerce=True)
+.. ipython:: python
- converts all non-number-like strings to ``NaN``.
+ pd.DataFrame({'s': ['a','b']})
+ df.convert_objects(numeric=True, coerce=True)
-- In earlier versions of pandas, the default behavior was to try and convert
- datetimes and timestamps. The new default is for ``DataFrame.convert_objects``
- to do nothing, and so it is necessary to pass at least one conversion target
- in the method call.
+In earlier versions of pandas, the default behavior was to try and convert
+datetimes and timestamps. The new default is for ``DataFrame.convert_objects``
+to do nothing, and so it is necessary to pass at least one conversion target
+in the method call.
-.. _whatsnew_0170.api_breaking.other:
+Changes to Index Comparisons
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Other API Changes
-^^^^^^^^^^^^^^^^^
+Operator equal on Index should behavior similarly to Series (:issue:`9947`)
-- Operator equal on Index should behavior similarly to Series (:issue:`9947`)
+Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise
+a ``ValueError``. This is to be consistent with the behavior of ``Series``.
- Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise
- a ``ValueError``. This is to be consistent with the behavior of ``Series``.
+Previous behavior:
- Previous behavior:
-
- .. code-block:: python
+.. code-block:: python
In [2]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5])
Out[2]: array([ True, False, False], dtype=bool)
@@ -188,9 +184,9 @@ Other API Changes
In [7]: pd.Series([1, 2, 3]) == pd.Series([1, 2])
ValueError: Series lengths must match to compare
- New behavior:
+New behavior:
- .. code-block:: python
+.. code-block:: python
In [8]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5])
Out[8]: array([ True, False, False], dtype=bool)
@@ -214,25 +210,27 @@ Other API Changes
In [13]: pd.Series([1, 2, 3]) == pd.Series([1, 2])
ValueError: Series lengths must match to compare
- Note that this is different from the ``numpy`` behavior where a comparison can
- be broadcast:
+Note that this is different from the ``numpy`` behavior where a comparison can
+be broadcast:
- .. ipython:: python
+.. ipython:: python
np.array([1, 2, 3]) == np.array([1])
- or it can return False if broadcasting can not be done:
+or it can return False if broadcasting can not be done:
- .. ipython:: python
+.. ipython:: python
np.array([1, 2, 3]) == np.array([1, 2])
+Other API Changes
+^^^^^^^^^^^^^^^^^
+
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
-
.. _whatsnew_0170.deprecations:
Deprecations
@@ -243,6 +241,8 @@ Deprecations
Removal of prior version deprecations/changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`)
+
.. _dask: https://dask.readthedocs.org/en/latest/
.. _whatsnew_0170.gil:
@@ -285,48 +285,51 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Boolean comparisons of a ``Series`` vs ``None`` will now be equivalent to comparing with ``np.nan``, rather than raise ``TypeError``, xref (:issue:`1079`).
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
-
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
-
-
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
-
- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
+- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
+- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
+- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
-- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
-- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
-- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
-
- Bug in ``io.sql.get_schema`` when specifying multiple columns as primary
key (:issue:`10385`).
-
-
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
- Bug in ``Series.map`` using categorical ``Series`` raises ``AttributeError`` (:issue:`10324`)
- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`)
+
+
+
+
+
+
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
-
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
-
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
-
- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)
-
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
-
- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
-
- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
+
+
+
+
+
+
+
+
+
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 33a2fc0aea732..49db94c3bfa86 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -462,6 +462,10 @@ def array_equivalent(left, right, strict_nan=False):
if issubclass(left.dtype.type, (np.floating, np.complexfloating)):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
+ # numpy will will not allow this type of datetimelike vs integer comparison
+ elif is_datetimelike_v_numeric(left, right):
+ return False
+
# NaNs cannot occur otherwise.
return np.array_equal(left, right)
@@ -2539,6 +2543,26 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
return issubclass(tipo, (np.datetime64, np.timedelta64))
+def is_datetimelike_v_numeric(a, b):
+ # return if we have an i8 convertible and numeric comparision
+ if not hasattr(a,'dtype'):
+ a = np.asarray(a)
+ if not hasattr(b, 'dtype'):
+ b = np.asarray(b)
+ f = lambda x: is_integer_dtype(x) or is_float_dtype(x)
+ return (needs_i8_conversion(a) and f(b)) or (
+ needs_i8_conversion(b) and f(a))
+
+def is_datetimelike_v_object(a, b):
+ # return if we have an i8 convertible and object comparision
+ if not hasattr(a,'dtype'):
+ a = np.asarray(a)
+ if not hasattr(b, 'dtype'):
+ b = np.asarray(b)
+ f = lambda x: is_object_dtype(x)
+ return (needs_i8_conversion(a) and f(b)) or (
+ needs_i8_conversion(b) and f(a))
+
needs_i8_conversion = is_datetime_or_timedelta_dtype
def i8_boxer(arr_or_dtype):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c792309e83ecb..273c444b30b80 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3574,7 +3574,14 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
except ValueError:
new_other = np.array(other)
- matches = (new_other == np.array(other))
+ # we can end up comparing integers and m8[ns]
+ # which is a numpy no no
+ is_i8 = com.needs_i8_conversion(self.dtype)
+ if is_i8:
+ matches = False
+ else:
+ matches = (new_other == np.array(other))
+
if matches is False or not matches.all():
# coerce other to a common dtype if we can
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7047f07280012..98e0214dbf073 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -164,18 +164,18 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
elif data is None or np.isscalar(data):
cls._scalar_data_error(data)
else:
- if tupleize_cols and isinstance(data, list) and data:
+ if tupleize_cols and isinstance(data, list) and data and isinstance(data[0], tuple):
try:
- sorted(data)
- has_mixed_types = False
- except (TypeError, UnicodeDecodeError):
- has_mixed_types = True # python3 only
- if isinstance(data[0], tuple) and not has_mixed_types:
- try:
- return MultiIndex.from_tuples(
- data, names=name or kwargs.get('names'))
- except (TypeError, KeyError):
- pass # python2 - MultiIndex fails on mixed types
+
+ # must be orderable in py3
+ if compat.PY3:
+ sorted(data)
+ return MultiIndex.from_tuples(
+ data, names=name or kwargs.get('names'))
+ except (TypeError, KeyError):
+ # python2 - MultiIndex fails on mixed types
+ pass
+
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 37d6cb9c0d5b6..67e25164537a7 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -14,7 +14,7 @@
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
- is_categorical)
+ is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
@@ -3885,9 +3885,16 @@ def _vstack(to_stack, dtype):
def _possibly_compare(a, b, op):
- res = op(a, b)
+
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
+
+ # numpy deprecation warning to have i8 vs integer comparisions
+ if is_datetimelike_v_numeric(a, b):
+ res = False
+ else:
+ res = op(a, b)
+
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 0b62eb1e53ddb..089ca21cb0ef3 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -13,8 +13,13 @@
from pandas.util.decorators import Appender
import pandas.core.common as com
import pandas.computation.expressions as expressions
+from pandas.lib import isscalar
+from pandas.tslib import iNaT
from pandas.core.common import(bind_method, is_list_like, notnull, isnull,
- _values_from_object, _maybe_match_name)
+ _values_from_object, _maybe_match_name,
+ needs_i8_conversion, is_datetimelike_v_numeric,
+ is_integer_dtype, is_categorical_dtype, is_object_dtype,
+ is_timedelta64_dtype, is_datetime64_dtype, is_bool_dtype)
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
@@ -257,7 +262,7 @@ class _TimeOp(object):
Generally, you should use classmethod ``maybe_convert_for_time_op`` as an
entry point.
"""
- fill_value = tslib.iNaT
+ fill_value = iNaT
wrap_results = staticmethod(lambda x: x)
dtype = None
@@ -273,11 +278,11 @@ def __init__(self, left, right, name):
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
- self.is_timedelta_lhs = com.is_timedelta64_dtype(left)
- self.is_datetime_lhs = com.is_datetime64_dtype(left)
+ self.is_timedelta_lhs = is_timedelta64_dtype(left)
+ self.is_datetime_lhs = is_datetime64_dtype(left)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
- self.is_datetime_rhs = com.is_datetime64_dtype(rvalues)
- self.is_timedelta_rhs = com.is_timedelta64_dtype(rvalues)
+ self.is_datetime_rhs = is_datetime64_dtype(rvalues)
+ self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self._validate()
@@ -346,13 +351,13 @@ def _convert_to_array(self, values, name=None, other=None):
if (other is not None and other.dtype == 'timedelta64[ns]' and
all(isnull(v) for v in values)):
values = np.empty(values.shape, dtype=other.dtype)
- values[:] = tslib.iNaT
+ values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
elif not (isinstance(values, (np.ndarray, pd.Series)) and
- com.is_datetime64_dtype(values)):
+ is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
@@ -381,7 +386,7 @@ def _convert_to_array(self, values, name=None, other=None):
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
if isnull(values).all():
values = np.empty(values.shape, dtype=other.dtype)
- values[:] = tslib.iNaT
+ values[:] = iNaT
else:
raise TypeError(
'incompatible type [{0}] for a datetime/timedelta '
@@ -445,8 +450,8 @@ def maybe_convert_for_time_op(cls, left, right, name):
that the data is not the right type for time ops.
"""
# decide if we can do it
- is_timedelta_lhs = com.is_timedelta64_dtype(left)
- is_datetime_lhs = com.is_datetime64_dtype(left)
+ is_timedelta_lhs = is_timedelta64_dtype(left)
+ is_datetime_lhs = is_datetime64_dtype(left)
if not (is_datetime_lhs or is_timedelta_lhs):
return None
@@ -544,17 +549,17 @@ def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
- if com.is_categorical_dtype(x):
+ if is_categorical_dtype(x):
return op(x,y)
- elif com.is_categorical_dtype(y) and not lib.isscalar(y):
+ elif is_categorical_dtype(y) and not isscalar(y):
return op(y,x)
- if x.dtype == np.object_:
+ if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
- if y.dtype != np.object_:
+ if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
@@ -562,13 +567,44 @@ def na_op(x, y):
result = lib.scalar_compare(x, y, op)
else:
+ # we want to compare like types
+ # we only want to convert to integer like if
+ # we are not NotImplemented, otherwise
+ # we would allow datetime64 (but viewed as i8) against
+ # integer comparisons
+ if is_datetimelike_v_numeric(x, y):
+ raise TypeError("invalid type comparison")
+
+ # numpy does not like comparisons vs None
+ if isscalar(y) and isnull(y):
+ y = np.nan
+
+ # we have a datetime/timedelta and may need to convert
+ mask = None
+ if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):
+
+ if isscalar(y):
+ y = _index.convert_scalar(x,_values_from_object(y))
+ else:
+ y = y.view('i8')
+
+ if name == '__ne__':
+ mask = notnull(x)
+ else:
+ mask = isnull(x)
+
+ x = x.view('i8')
+
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
- except (AttributeError):
+ except AttributeError:
result = op(x, y)
+ if mask is not None and mask.any():
+ result[mask] = False
+
return result
def wrapper(self, other, axis=None):
@@ -590,29 +626,24 @@ def wrapper(self, other, axis=None):
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
- if not com.is_categorical_dtype(self):
+ if not is_categorical_dtype(self):
msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
"If you want to compare values, use 'series <op> np.asarray(other)'."
raise TypeError(msg.format(op=op,typ=self.dtype))
- mask = isnull(self)
-
- if com.is_categorical_dtype(self):
+ if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray, which would then
# not take categories ordering into account
# we can go directly to op, as the na_op would just test again and dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
- other = _index.convert_scalar(values,_values_from_object(other))
-
- if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
- values = values.view('i8')
+ if is_list_like(other):
+ other = np.asarray(other)
- # scalars
res = na_op(values, other)
- if np.isscalar(res):
+ if isscalar(res):
raise TypeError('Could not compare %s type with Series'
% type(other))
@@ -621,11 +652,6 @@ def wrapper(self, other, axis=None):
res = pd.Series(res, index=self.index, name=self.name,
dtype='bool')
-
- # mask out the invalids
- if mask.any():
- res[mask] = masker
-
return res
return wrapper
@@ -643,8 +669,7 @@ def na_op(x, y):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
- if (x.dtype == np.bool_ and
- y.dtype == np.bool_): # pragma: no cover
+ if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
@@ -665,7 +690,7 @@ def na_op(x, y):
return result
def wrapper(self, other):
- is_self_int_dtype = com.is_integer_dtype(self.dtype)
+ is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
@@ -673,7 +698,7 @@ def wrapper(self, other):
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
- is_other_int_dtype = com.is_integer_dtype(other.dtype)
+ is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
@@ -686,7 +711,7 @@ def wrapper(self, other):
else:
# scalars, list, tuple, np.array
- filler = fill_int if is_self_int_dtype and com.is_integer_dtype(np.asarray(other)) else fill_bool
+ filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
@@ -1046,7 +1071,7 @@ def na_op(x, y):
# work only for scalars
def f(self, other):
- if not np.isscalar(other):
+ if not isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index eecc225d06beb..53f2ff455d32e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -23,6 +23,7 @@
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
+import pandas as pd
import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer
from pandas.lib import max_len_string_array, infer_dtype
@@ -291,7 +292,7 @@ def convert_delta_safe(base, deltas, unit):
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
- conv_dates[bad_locs] = np.nan
+ conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 8eb60b13fcc81..a06c4384d72c5 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -180,9 +180,9 @@ def test_read_dta2(self):
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
- tm.assert_frame_equal(parsed_114, expected)
- tm.assert_frame_equal(parsed_115, expected)
- tm.assert_frame_equal(parsed_117, expected)
+ tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True)
+ tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True)
+ tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
@@ -684,6 +684,7 @@ def test_big_dates(self):
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
+
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
@@ -696,11 +697,10 @@ def test_big_dates(self):
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
-
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
- tm.assert_frame_equal(expected, parsed_115)
- tm.assert_frame_equal(expected, parsed_117)
+ tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True)
+ tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
@@ -709,7 +709,8 @@ def test_big_dates(self):
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
- expected)
+ expected,
+ check_datetimelike_compat=True)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
@@ -912,7 +913,8 @@ def test_read_chunks_117(self):
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
- itr = read_stata(fname, iterator=True)
+ itr = read_stata(fname, iterator=True, convert_categoricals=convert_categoricals,
+ convert_dates=convert_dates)
pos = 0
for j in range(5):
@@ -923,12 +925,10 @@ def test_read_chunks_117(self):
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
- try:
- tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
- except AssertionError:
- # datetime.datetime and pandas.tslib.Timestamp may hold
- # equivalent values but fail assert_frame_equal
- assert(all([x == y for x, y in zip(from_frame, chunk)]))
+ tm.assert_frame_equal(from_frame,
+ chunk,
+ check_dtype=False,
+ check_datetimelike_compat=True)
pos += chunksize
@@ -966,13 +966,15 @@ def test_read_chunks_115(self):
for convert_categoricals in False, True:
for convert_dates in False, True:
+ # Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
- itr = read_stata(fname, iterator=True,
- convert_categoricals=convert_categoricals)
+ # Compare to what we get when reading by chunk
+ itr = read_stata(fname, iterator=True, convert_dates=convert_dates,
+ convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
@@ -982,12 +984,10 @@ def test_read_chunks_115(self):
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
- try:
- tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
- except AssertionError:
- # datetime.datetime and pandas.tslib.Timestamp may hold
- # equivalent values but fail assert_frame_equal
- assert(all([x == y for x, y in zip(from_frame, chunk)]))
+ tm.assert_frame_equal(from_frame,
+ chunk,
+ check_dtype=False,
+ check_datetimelike_compat=True)
pos += chunksize
@@ -1011,4 +1011,3 @@ def test_read_chunks_columns(self):
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
-
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 44be74b78d6bb..4805a33e5b496 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -378,7 +378,7 @@ def isnullobj2d_old(ndarray[object, ndim=2] arr):
@cython.boundscheck(False)
cpdef ndarray[object] list_to_object_array(list obj):
'''
- Convert list to object ndarray. Seriously can't believe I had to write this
+ Convert list to object ndarray. Seriously can\'t believe I had to write this
function
'''
cdef:
@@ -682,6 +682,7 @@ def scalar_compare(ndarray[object] values, object val, object op):
cdef:
Py_ssize_t i, n = len(values)
ndarray[uint8_t, cast=True] result
+ bint isnull_val
int flag
object x
@@ -701,12 +702,15 @@ def scalar_compare(ndarray[object] values, object val, object op):
raise ValueError('Unrecognized operator')
result = np.empty(n, dtype=bool).view(np.uint8)
+ isnull_val = _checknull(val)
if flag == cpython.Py_NE:
for i in range(n):
x = values[i]
if _checknull(x):
result[i] = True
+ elif isnull_val:
+ result[i] = True
else:
try:
result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
@@ -717,6 +721,8 @@ def scalar_compare(ndarray[object] values, object val, object op):
x = values[i]
if _checknull(x):
result[i] = False
+ elif isnull_val:
+ result[i] = False
else:
try:
result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
@@ -728,6 +734,8 @@ def scalar_compare(ndarray[object] values, object val, object op):
x = values[i]
if _checknull(x):
result[i] = False
+ elif isnull_val:
+ result[i] = False
else:
result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index e17910a2e14be..db23b13edd42b 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -12,6 +12,7 @@
from pandas.tseries.common import is_datetimelike
from pandas import Series, Index, Int64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta
import pandas.tslib as tslib
+from pandas import _np_version_under1p9
import nose
import pandas.util.testing as tm
@@ -273,6 +274,45 @@ def setUp(self):
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
+ def test_none_comparison(self):
+
+ # bug brought up by #1079
+ # changed from TypeError in 0.17.0
+ for o in self.is_valid_objs:
+ if isinstance(o, Series):
+
+ o[0] = np.nan
+
+ result = o == None
+ self.assertFalse(result.iat[0])
+ self.assertFalse(result.iat[1])
+
+ result = o != None
+ self.assertTrue(result.iat[0])
+ self.assertTrue(result.iat[1])
+
+ result = None == o
+ self.assertFalse(result.iat[0])
+ self.assertFalse(result.iat[1])
+
+ if _np_version_under1p9:
+ # fails as this tries not __eq__ which
+ # is not valid for numpy
+ pass
+ else:
+ result = None != o
+ self.assertTrue(result.iat[0])
+ self.assertTrue(result.iat[1])
+
+ result = None > o
+ self.assertFalse(result.iat[0])
+ self.assertFalse(result.iat[1])
+
+ result = o < None
+ self.assertFalse(result.iat[0])
+ self.assertFalse(result.iat[1])
+
+
def test_ndarray_compat_properties(self):
for o in self.objs:
@@ -513,7 +553,7 @@ def test_value_counts_inferred(self):
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
- self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
+ self.assert_numpy_array_equivalent(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 3d901837f5123..569f7d84862ff 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -10,14 +10,13 @@
import os
import numpy as np
-from numpy.testing import assert_array_equal
from pandas import (period_range, date_range, Categorical, Series,
Index, Float64Index, Int64Index, MultiIndex,
CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex)
from pandas.core.index import InvalidIndexError, NumericIndex
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
- assert_copy)
+ assert_copy, assert_numpy_array_equivalent, assert_numpy_array_equal)
from pandas import compat
from pandas.compat import long, is_platform_windows
@@ -101,7 +100,7 @@ def test_reindex_base(self):
expected = np.arange(idx.size)
actual = idx.get_indexer(idx)
- assert_array_equal(expected, actual)
+ assert_numpy_array_equivalent(expected, actual)
with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
@@ -449,7 +448,7 @@ def test_constructor(self):
index = Index(arr, copy=True, name='name')
tm.assertIsInstance(index, Index)
self.assertEqual(index.name, 'name')
- assert_array_equal(arr, index)
+ assert_numpy_array_equivalent(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
@@ -506,7 +505,7 @@ def __array__(self, dtype=None):
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
- assert_array_equal(rs, xp)
+ assert_numpy_array_equivalent(rs, xp)
tm.assertIsInstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
@@ -1111,11 +1110,11 @@ def test_get_indexer_nearest(self):
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
- self.assert_array_equal(actual, [0, 5, 9])
+ self.assert_numpy_array_equivalent(actual, [0, 5, 9])
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9], [0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- self.assert_array_equal(actual, expected)
+ self.assert_numpy_array_equivalent(actual, expected)
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
@@ -1126,22 +1125,22 @@ def test_get_indexer_nearest_decreasing(self):
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
- self.assert_array_equal(actual, [9, 4, 0])
+ self.assert_numpy_array_equivalent(actual, [9, 4, 0])
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1], [9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- self.assert_array_equal(actual, expected)
+ self.assert_numpy_array_equivalent(actual, expected)
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = [-1, 0, 1, 1]
- self.assert_array_equal(actual, expected)
+ self.assert_numpy_array_equivalent(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = [0, 0, 1, -1]
- self.assert_array_equal(actual, expected)
+ self.assert_numpy_array_equivalent(actual, expected)
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
@@ -1447,7 +1446,7 @@ def test_str_attribute(self):
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
- self.assert_array_equal(idx.str.startswith('a'), expected)
+ self.assert_numpy_array_equivalent(idx.str.startswith('a'), expected)
self.assertIsInstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
@@ -1557,8 +1556,8 @@ def test_equals_op(self):
index_d = Index(['foo'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
index_a == index_b
- assert_array_equal(index_a == index_a, np.array([True, True, True]))
- assert_array_equal(index_a == index_c, np.array([True, True, False]))
+ assert_numpy_array_equivalent(index_a == index_a, np.array([True, True, True]))
+ assert_numpy_array_equivalent(index_a == index_c, np.array([True, True, False]))
# test comparisons with numpy arrays
array_a = np.array(['foo', 'bar', 'baz'])
@@ -1567,8 +1566,8 @@ def test_equals_op(self):
array_d = np.array(['foo'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
index_a == array_b
- assert_array_equal(index_a == array_a, np.array([True, True, True]))
- assert_array_equal(index_a == array_c, np.array([True, True, False]))
+ assert_numpy_array_equivalent(index_a == array_a, np.array([True, True, True]))
+ assert_numpy_array_equivalent(index_a == array_c, np.array([True, True, False]))
# test comparisons with Series
series_a = Series(['foo', 'bar', 'baz'])
@@ -1577,8 +1576,8 @@ def test_equals_op(self):
series_d = Series(['foo'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
index_a == series_b
- assert_array_equal(index_a == series_a, np.array([True, True, True]))
- assert_array_equal(index_a == series_c, np.array([True, True, False]))
+ assert_numpy_array_equivalent(index_a == series_a, np.array([True, True, True]))
+ assert_numpy_array_equivalent(index_a == series_c, np.array([True, True, False]))
# cases where length is 1 for one of them
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
@@ -1593,27 +1592,26 @@ def test_equals_op(self):
series_a == array_d
# comparing with scalar should broadcast
- assert_array_equal(index_a == 'foo', np.array([True, False, False]))
- assert_array_equal(series_a == 'foo', np.array([True, False, False]))
- assert_array_equal(array_a == 'foo', np.array([True, False, False]))
+ assert_numpy_array_equivalent(index_a == 'foo', np.array([True, False, False]))
+ assert_numpy_array_equivalent(series_a == 'foo', np.array([True, False, False]))
+ assert_numpy_array_equivalent(array_a == 'foo', np.array([True, False, False]))
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
- assert_array_equal(df.index == df.index, np.array([True, True]))
+ assert_numpy_array_equivalent(df.index == df.index, np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
- assert_array_equal(df.index == mi1, np.array([True, True]))
+ assert_numpy_array_equivalent(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
- assert_array_equal(df.index == mi2, np.array([True, False]))
+ assert_numpy_array_equivalent(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == mi3
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == index_a
- assert_array_equal(index_a == mi3, np.array([False, False, False]))
-
+ assert_numpy_array_equivalent(index_a == mi3, np.array([False, False, False]))
class TestCategoricalIndex(Base, tm.TestCase):
_holder = CategoricalIndex
@@ -1868,7 +1866,7 @@ def test_reindex_base(self):
expected = np.array([4,0,1,5,2,3])
actual = idx.get_indexer(idx)
- assert_array_equal(expected, actual)
+ assert_numpy_array_equivalent(expected, actual)
with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
@@ -1883,7 +1881,7 @@ def test_reindexing(self):
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
- assert_array_equal(expected, actual)
+ assert_numpy_array_equivalent(expected, actual)
def test_duplicates(self):
@@ -2184,12 +2182,12 @@ def test_equals(self):
def test_get_indexer(self):
idx = Float64Index([0.0, 1.0, 2.0])
- self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
target = [-0.1, 0.5, 1.1]
- self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_get_loc(self):
idx = Float64Index([0.0, 1.0, 2.0])
@@ -2227,15 +2225,15 @@ def test_doesnt_contain_all_the_things(self):
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
- np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
- np.testing.assert_array_equal(i.isin([2.0, np.pi]),
+ assert_numpy_array_equivalent(i.isin([1.0]), np.array([True, False]))
+ assert_numpy_array_equivalent(i.isin([2.0, np.pi]),
np.array([False, False]))
- np.testing.assert_array_equal(i.isin([np.nan]),
+ assert_numpy_array_equivalent(i.isin([np.nan]),
np.array([False, True]))
- np.testing.assert_array_equal(i.isin([1.0, np.nan]),
+ assert_numpy_array_equivalent(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
- np.testing.assert_array_equal(i.isin([np.nan]),
+ assert_numpy_array_equivalent(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
@@ -2784,19 +2782,19 @@ def test_get_loc(self):
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
- assert_array_equal(idx.get_loc(time(12)), [12])
- assert_array_equal(idx.get_loc(time(12, 30)), [])
+ assert_numpy_array_equivalent(idx.get_loc(time(12)), [12])
+ assert_numpy_array_equivalent(idx.get_loc(time(12, 30)), [])
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
- self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
- self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_roundtrip_pickle_with_tz(self):
@@ -2826,7 +2824,7 @@ def test_time_loc(self): # GH8667
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
- tm.assert_array_equal(ts.index.get_loc(key), i)
+ tm.assert_numpy_array_equivalent(ts.index.get_loc(key), i)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
@@ -2906,13 +2904,13 @@ def test_get_loc(self):
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
- self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
- self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
with self.assertRaisesRegexp(ValueError, 'different freq'):
idx.asfreq('D').get_indexer(idx)
@@ -2950,12 +2948,12 @@ def test_get_loc(self):
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
- self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
- self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_numeric_compat(self):
@@ -3535,7 +3533,7 @@ def test_from_product(self):
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
- assert_array_equal(result, expected)
+ assert_numpy_array_equivalent(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
@@ -3545,7 +3543,7 @@ def test_from_product_datetimeindex(self):
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
- assert_array_equal(mi.values, etalon)
+ assert_numpy_array_equivalent(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
@@ -3555,9 +3553,9 @@ def test_values_boxed(self):
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
- assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
+ assert_numpy_array_equivalent(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
- assert_array_equal(mi.values[:4], mi[:4].values)
+ assert_numpy_array_equivalent(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
@@ -3597,28 +3595,28 @@ def test_get_level_values_na(self):
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
- assert_array_equal(values.values.astype(float), expected)
+ assert_numpy_array_equivalent(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
- assert_array_equal(values.values.astype(float), expected)
+ assert_numpy_array_equivalent(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
- assert_array_equal(values.values.astype(float), expected)
+ assert_numpy_array_equivalent(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
- assert_array_equal(values.values, expected)
+ assert_numpy_array_equivalent(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
- assert_array_equal(values.values, expected.values)
+ assert_numpy_array_equivalent(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
@@ -4644,14 +4642,14 @@ def check(nlevels, with_nulls):
for take_last in [False, True]:
left = mi.duplicated(take_last=take_last)
right = pd.lib.duplicated(mi.values, take_last=take_last)
- tm.assert_array_equal(left, right)
+ tm.assert_numpy_array_equivalent(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
- self.assert_array_equal(mi.duplicated(), np.zeros(2, dtype='bool'))
+ self.assert_numpy_array_equivalent(mi.duplicated(), np.zeros(2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
@@ -4662,7 +4660,7 @@ def check(nlevels, with_nulls):
self.assertEqual(len(mi), (n + 1) * (m + 1))
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
- self.assert_array_equal(mi.duplicated(),
+ self.assert_numpy_array_equivalent(mi.duplicated(),
np.zeros(len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
@@ -4866,7 +4864,6 @@ def test_equals_operator(self):
# GH9785
self.assertTrue((self.index == self.index).all())
-
def test_get_combined_index():
from pandas.core.index import _get_combined_index
result = _get_combined_index([])
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index ef05b40827dfd..6d2c87a187995 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -753,15 +753,15 @@ def test_equals(self):
def test_equals_block_order_different_dtypes(self):
# GH 9330
-
- mgr_strings = [
+
+ mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
-
+
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
@@ -812,6 +812,13 @@ def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
+
+ # we maybe using an ndarray to test slicing and
+ # might not be the full length of the axis
+ if isinstance(slobj, np.ndarray):
+ ax = mgr.axes[axis]
+ if len(ax) and len(slobj) and len(slobj) != len(ax):
+ slobj = np.concatenate([slobj, np.zeros(len(ax)-len(slobj),dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None),) * axis + (slobj,)
assert_almost_equal(mat[mat_slobj], sliced.as_matrix())
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 7326d7a9d811d..361cf4aba705f 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -504,11 +504,6 @@ def test_comparisons(self):
s == s2
s2 == s
- def test_none_comparison(self):
- # bug brought up by #1079
- s = Series(np.random.randn(10), index=lrange(0, 20, 2))
- self.assertRaises(TypeError, s.__eq__, None)
-
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index c8b96076b26bd..5b23d7123935e 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -849,11 +849,11 @@ def test_string_na_nat_conversion(self):
result2 = to_datetime(strings)
tm.assertIsInstance(result2, DatetimeIndex)
- self.assert_numpy_array_equal(result, result2)
+ self.assert_numpy_array_equivalent(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
- self.assert_numpy_array_equal(result, malformed)
+ self.assert_numpy_array_equivalent(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 7378e3504b5ca..9f75e42a8676a 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -24,7 +24,8 @@
from numpy.testing import assert_array_equal
import pandas as pd
-from pandas.core.common import is_sequence, array_equivalent, is_list_like, is_number
+from pandas.core.common import (is_sequence, array_equivalent, is_list_like, is_number,
+ is_datetimelike_v_numeric, is_datetimelike_v_object)
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
@@ -59,7 +60,6 @@ def reset_testing_mode():
set_testing_mode()
-
class TestCase(unittest.TestCase):
@classmethod
@@ -634,7 +634,7 @@ def assert_categorical_equal(res, exp):
raise AssertionError("name not the same")
-def assert_numpy_array_equal(np_array, assert_equal):
+def assert_numpy_array_equal(np_array, assert_equal, err_msg=None):
"""Checks that 'np_array' is equal to 'assert_equal'
Note that the expected array should not contain `np.nan`!
@@ -646,11 +646,12 @@ def assert_numpy_array_equal(np_array, assert_equal):
"""
if np.array_equal(np_array, assert_equal):
return
- raise AssertionError(
- '{0} is not equal to {1}.'.format(np_array, assert_equal))
+ if err_msg is None:
+ err_msg = '{0} is not equal to {1}.'.format(np_array, assert_equal)
+ raise AssertionError(err_msg)
-def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False):
+def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False, err_msg=None):
"""Checks that 'np_array' is equivalent to 'assert_equal'
Two numpy arrays are equivalent if the arrays have equal non-NaN elements,
@@ -664,8 +665,9 @@ def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False):
"""
if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
return
- raise AssertionError(
- '{0} is not equivalent to {1}.'.format(np_array, assert_equal))
+ if err_msg is None:
+ err_msg = '{0} is not equivalent to {1}.'.format(np_array, assert_equal)
+ raise AssertionError(err_msg)
# This could be refactored to use the NDFrame.equals method
@@ -674,7 +676,8 @@ def assert_series_equal(left, right, check_dtype=True,
check_series_type=False,
check_less_precise=False,
check_exact=False,
- check_names=True):
+ check_names=True,
+ check_datetimelike_compat=False):
if check_series_type:
assertIsInstance(left, type(right))
if check_dtype:
@@ -683,6 +686,18 @@ def assert_series_equal(left, right, check_dtype=True,
if not np.array_equal(left.values, right.values):
raise AssertionError('{0} is not equal to {1}.'.format(left.values,
right.values))
+ elif check_datetimelike_compat:
+ # we want to check only if we have compat dtypes
+ # e.g. integer and M|m are NOT compat, but we can simply check the values in that case
+ if is_datetimelike_v_numeric(left, right) or is_datetimelike_v_object(left, right):
+
+ # datetimelike may have different objects (e.g. datetime.datetime vs Timestamp) but will compare equal
+ if not Index(left.values).equals(Index(right.values)):
+ raise AssertionError(
+ '[datetimelike_compat=True] {0} is not equal to {1}.'.format(left.values,
+ right.values))
+ else:
+ assert_numpy_array_equivalent(left.values, right.values)
else:
assert_almost_equal(left.values, right.values, check_less_precise)
if check_less_precise:
@@ -715,7 +730,8 @@ def assert_frame_equal(left, right, check_dtype=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
- check_exact=False):
+ check_exact=False,
+ check_datetimelike_compat=False):
if check_frame_type:
assertIsInstance(left, type(right))
assertIsInstance(left, DataFrame)
@@ -749,7 +765,8 @@ def assert_frame_equal(left, right, check_dtype=True,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
- check_names=check_names)
+ check_names=check_names,
+ check_datetimelike_compat=check_datetimelike_compat)
if check_index_type:
for level in range(left.index.nlevels):
| xref, #1079
numpy 1.10 shows a deprecation warning for this. this should eliminate a bunch of these.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10569 | 2015-07-14T17:43:09Z | 2015-07-17T13:58:58Z | 2015-07-17T13:58:58Z | 2015-07-17T14:46:09Z |
ENH: Added DataFrame.round and associated tests | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 2f4fd860f270a..38c2c1091469b 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -853,6 +853,7 @@ Computations / Descriptive Stats
DataFrame.prod
DataFrame.quantile
DataFrame.rank
+ DataFrame.round
DataFrame.sem
DataFrame.skew
DataFrame.sum
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 26871a11473de..753c4cc52cab8 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -438,3 +438,5 @@ For instance:
:suppress:
pd.reset_option('^display\.')
+
+To round floats on a case-by-case basis, you can also use :meth:`~pandas.Series.round` and :meth:`~pandas.DataFrame.round`.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index eae33bc80be32..b21fcf0dcb007 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -62,6 +62,16 @@ New features
ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13])
ser.interpolate(limit=1, limit_direction='both')
+- Round DataFrame to variable number of decimal places (:issue:`10568`).
+
+ .. ipython :: python
+
+ df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
+ index=['first', 'second', 'third'])
+ df
+ df.round(2)
+ df.round({'A': 0, 'C': 2})
+
.. _whatsnew_0170.gil:
Releasing the GIL
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 997dfeb728ade..64d9d28ddc611 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4253,6 +4253,76 @@ def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=left_index, right_index=right_index, sort=sort,
suffixes=suffixes, copy=copy)
+ def round(self, decimals=0, out=None):
+ """
+ Round a DataFrame to a variable number of decimal places.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ decimals : int, dict, Series
+ Number of decimal places to round each column to. If an int is
+ given, round each column to the same number of places.
+ Otherwise dict and Series round to variable numbers of places.
+ Column names should be in the keys if `decimals` is a
+ dict-like, or in the index if `decimals` is a Series. Any
+ columns not included in `decimals` will be left as is. Elements
+ of `decimals` which are not columns of the input will be
+ ignored.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame(np.random.random([3, 3]),
+ ... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
+ >>> df
+ A B C
+ first 0.028208 0.992815 0.173891
+ second 0.038683 0.645646 0.577595
+ third 0.877076 0.149370 0.491027
+ >>> df.round(2)
+ A B C
+ first 0.03 0.99 0.17
+ second 0.04 0.65 0.58
+ third 0.88 0.15 0.49
+ >>> df.round({'A': 1, 'C': 2})
+ A B C
+ first 0.0 0.992815 0.17
+ second 0.0 0.645646 0.58
+ third 0.9 0.149370 0.49
+ >>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
+ >>> df.round(decimals)
+ A B C
+ first 0.0 1 0.17
+ second 0.0 1 0.58
+ third 0.9 0 0.49
+
+ Returns
+ -------
+ DataFrame object
+ """
+ from pandas.tools.merge import concat
+
+ def _dict_round(df, decimals):
+ for col in df:
+ try:
+ yield np.round(df[col], decimals[col])
+ except KeyError:
+ yield df[col]
+
+ if isinstance(decimals, (dict, Series)):
+ new_cols = [col for col in _dict_round(self, decimals)]
+ elif com.is_integer(decimals):
+ # Dispatch to numpy.round
+ new_cols = [np.round(self[col], decimals) for col in self]
+ else:
+ raise TypeError("decimals must be an integer, a dict-like or a Series")
+
+ if len(new_cols) > 0:
+ return concat(new_cols, axis=1)
+ else:
+ return self
+
#----------------------------------------------------------------------
# Statistical methods, etc.
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index de6d172408916..7877ee3c5a6cc 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2680,6 +2680,115 @@ def test_to_csv_date_format(self):
self.assertEqual(df_day.to_csv(), expected_default_day)
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d'), expected_default_day)
+ def test_round_dataframe(self):
+
+ # GH 2665
+
+ # Test that rounding an empty DataFrame does nothing
+ df = DataFrame()
+ tm.assert_frame_equal(df, df.round())
+
+ # Here's the test frame we'll be working with
+ df = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
+
+ # Default round to integer (i.e. decimals=0)
+ expected_rounded = DataFrame(
+ {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
+ tm.assert_frame_equal(df.round(), expected_rounded)
+
+ # Round with an integer
+ decimals = 2
+ expected_rounded = DataFrame(
+ {'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
+ tm.assert_frame_equal(df.round(decimals), expected_rounded)
+
+ # This should also work with np.round (since np.round dispatches to
+ # df.round)
+ tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
+
+ # Round with a list
+ round_list = [1, 2]
+ with self.assertRaises(TypeError):
+ df.round(round_list)
+
+ # Round with a dictionary
+ expected_rounded = DataFrame(
+ {'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
+ round_dict = {'col1': 1, 'col2': 2}
+ tm.assert_frame_equal(df.round(round_dict), expected_rounded)
+
+ # Incomplete dict
+ expected_partially_rounded = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
+ partial_round_dict = {'col2': 1}
+ tm.assert_frame_equal(
+ df.round(partial_round_dict), expected_partially_rounded)
+
+ # Dict with unknown elements
+ wrong_round_dict = {'col3': 2, 'col2': 1}
+ tm.assert_frame_equal(
+ df.round(wrong_round_dict), expected_partially_rounded)
+
+ # float input to `decimals`
+ non_int_round_dict = {'col1': 1, 'col2': 0.5}
+ if sys.version < LooseVersion('2.7'):
+ # np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
+ with self.assert_produces_warning(DeprecationWarning):
+ df.round(non_int_round_dict)
+ else:
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ # String input
+ non_int_round_dict = {'col1': 1, 'col2': 'foo'}
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # List input
+ non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_dict)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Non integer Series inputs
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ non_int_round_Series = Series(non_int_round_dict)
+ with self.assertRaises(TypeError):
+ df.round(non_int_round_Series)
+
+ # Negative numbers
+ negative_round_dict = {'col1': -1, 'col2': -2}
+ big_df = df * 100
+ expected_neg_rounded = DataFrame(
+ {'col1':[110., 210, 310], 'col2':[100., 200, 300]})
+ tm.assert_frame_equal(
+ big_df.round(negative_round_dict), expected_neg_rounded)
+
+ # nan in Series round
+ nan_round_Series = Series({'col1': nan, 'col2':1})
+ expected_nan_round = DataFrame(
+ {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
+ if sys.version < LooseVersion('2.7'):
+ # Rounding with decimal is a ValueError in Python < 2.7
+ with self.assertRaises(ValueError):
+ df.round(nan_round_Series)
+ else:
+ with self.assertRaises(TypeError):
+ df.round(nan_round_Series)
+
+ # Make sure this doesn't break existing Series.round
+ tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
| I've found myself doing a lot of `DataFrame.to_latex` because I'm using pandas to write an academic paper.
I'm constantly messing about with the number of decimal places displayed by doing `np.round(df, 2)` so thought this flexible `round`, with different numbers of decimals per column, should be part of the `DataFrame` API (I'm surprised there isn't already such a piece of functionality.)
Here is an example:
```
In [9]: df = pd.DataFrame(np.random.random([10, 3]), columns=['a', 'b', 'c'])
In [10]: df
Out[10]:
a b c
0 0.761651 0.430963 0.440312
1 0.094071 0.242381 0.149731
2 0.620050 0.462600 0.194143
3 0.614627 0.692106 0.176523
4 0.215396 0.888180 0.380283
5 0.492990 0.200268 0.067020
6 0.804531 0.816366 0.065751
7 0.751224 0.037474 0.884083
8 0.994758 0.450143 0.808945
9 0.373180 0.537589 0.809112
In [11]: df.round(dict(b=2, c=4))
Out[11]:
a b c
0 0.761651 0.43 0.4403
1 0.094071 0.24 0.1497
2 0.620050 0.46 0.1941
3 0.614627 0.69 0.1765
4 0.215396 0.89 0.3803
5 0.492990 0.20 0.0670
6 0.804531 0.82 0.0658
7 0.751224 0.04 0.8841
8 0.994758 0.45 0.8089
9 0.373180 0.54 0.8091
```
You can also round by column number:
```
In [12]: df.round([1, 2, 3])
Out[12]:
a b c
0 0.8 0.43 0.440
1 0.1 0.24 0.150
2 0.6 0.46 0.194
3 0.6 0.69 0.177
4 0.2 0.89 0.380
5 0.5 0.20 0.067
6 0.8 0.82 0.066
7 0.8 0.04 0.884
8 1.0 0.45 0.809
9 0.4 0.54 0.809
```
and any columns which are not explicitly rounded are unaffected:
```
In [13]: df.round([1])
Out[13]:
a b c
0 0.8 0.430963 0.440312
1 0.1 0.242381 0.149731
2 0.6 0.462600 0.194143
3 0.6 0.692106 0.176523
4 0.2 0.888180 0.380283
5 0.5 0.200268 0.067020
6 0.8 0.816366 0.065751
7 0.8 0.037474 0.884083
8 1.0 0.450143 0.808945
9 0.4 0.537589 0.809112
```
Non-integer values raise a `TypeError`, as might be expected:
```
In [15]: df.round({'a':1.2})
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-6f51d3fd917d> in <module>()
----> 1 df.round({'a':1.2})
/home/rob/Dropbox/PhD/pandas/pandas/core/frame.py in round(self, places)
1467
1468 if isinstance(places, dict):
-> 1469 new_cols = [col for col in _dict_round(self, places)]
1470 else:
1471 new_cols = [col for col in _list_round(self, places)]
/home/rob/Dropbox/PhD/pandas/pandas/core/frame.py in _dict_round(df, places)
1455 for col in df:
1456 try:
-> 1457 yield np.round(df[col], places[col])
1458 except KeyError:
1459 yield df[col]
/usr/local/lib/python2.7/dist-packages/numpy/core/fromnumeric.pyc in round_(a, decimals, out)
2646 except AttributeError:
2647 return _wrapit(a, 'round', decimals, out)
-> 2648 return round(decimals, out)
2649
2650
/home/rob/Dropbox/PhD/pandas/pandas/core/series.pyc in round(self, decimals, out)
1209
1210 """
-> 1211 result = _values_from_object(self).round(decimals, out=out)
1212 if out is None:
1213 result = self._constructor(result,
TypeError: integer argument expected, got float
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10568 | 2015-07-14T16:34:04Z | 2015-09-03T13:41:10Z | 2015-09-03T13:41:10Z | 2015-09-03T13:45:36Z |
BUG: #8243 Change unary - to ~. Fixes numpy warning in ols. | diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index 9d22068c1612f..e1951135914e9 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -818,7 +818,7 @@ def _calc_betas(self, x, y):
betas[i] = math.solve(xx, xy)
- mask = -np.isnan(betas).any(axis=1)
+ mask = ~np.isnan(betas).any(axis=1)
have_betas = np.arange(N)[mask]
return betas, have_betas, mask
| https://api.github.com/repos/pandas-dev/pandas/pulls/10562 | 2015-07-13T17:16:49Z | 2015-08-16T00:04:19Z | null | 2015-08-18T13:47:43Z | |
DOC: consistent imports (GH9886) part IV | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index bb424e29cfc21..acddf1bb3fe30 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -6,18 +6,16 @@
import numpy as np
np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
- import matplotlib.pyplot as plt
- plt.close('all')
+ import pandas as pd
+ pd.options.display.max_rows = 15
import matplotlib
try:
matplotlib.style.use('ggplot')
except AttributeError:
- options.display.mpl_style = 'default'
- from pandas.compat import zip
+ pd.options.display.mpl_style = 'default'
+ import matplotlib.pyplot as plt
+ plt.close('all')
*****************************
Group By: split-apply-combine
@@ -105,11 +103,12 @@ consider the following DataFrame:
.. ipython:: python
- df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B' : ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C' : randn(8), 'D' : randn(8)})
+ df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B' : ['one', 'one', 'two', 'three',
+ 'two', 'two', 'one', 'three'],
+ 'C' : np.random.randn(8),
+ 'D' : np.random.randn(8)})
df
We could naturally group by either the ``A`` or ``B`` columns or both:
@@ -142,7 +141,7 @@ output of aggregation functions will only contain unique index values:
lst = [1, 2, 3, 1, 2, 3]
- s = Series([1, 2, 3, 10, 20, 30], lst)
+ s = pd.Series([1, 2, 3, 10, 20, 30], lst)
grouped = s.groupby(level=0)
@@ -189,7 +188,7 @@ however pass ``sort=False`` for potential speedups:
.. ipython:: python
- df2 = DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]})
+ df2 = pd.DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]})
df2.groupby(['X'], sort=True).sum()
df2.groupby(['X'], sort=False).sum()
@@ -203,10 +202,10 @@ however pass ``sort=False`` for potential speedups:
n = 10
weight = np.random.normal(166, 20, size=n)
height = np.random.normal(60, 10, size=n)
- time = date_range('1/1/2000', periods=n)
+ time = pd.date_range('1/1/2000', periods=n)
gender = tm.choice(['male', 'female'], size=n)
- df = DataFrame({'height': height, 'weight': weight,
- 'gender': gender}, index=time)
+ df = pd.DataFrame({'height': height, 'weight': weight,
+ 'gender': gender}, index=time)
.. ipython:: python
@@ -226,11 +225,12 @@ however pass ``sort=False`` for potential speedups:
.. ipython:: python
:suppress:
- df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B' : ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C' : randn(8), 'D' : randn(8)})
+ df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B' : ['one', 'one', 'two', 'three',
+ 'two', 'two', 'one', 'three'],
+ 'C' : np.random.randn(8),
+ 'D' : np.random.randn(8)})
.. _groupby.multiindex:
@@ -248,8 +248,8 @@ natural to group by one of the levels of the hierarchy.
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
tuples
- index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
- s = Series(randn(8), index=index)
+ index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
+ s = pd.Series(np.random.randn(8), index=index)
.. ipython:: python
@@ -281,13 +281,13 @@ Also as of v0.6, grouping with multiple levels is supported.
['doo', 'doo', 'bee', 'bee', 'bop', 'bop', 'bop', 'bop'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
- index = MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
- s = Series(randn(8), index=index)
+ index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
+ s = pd.Series(np.random.randn(8), index=index)
.. ipython:: python
s
- s.groupby(level=['first','second']).sum()
+ s.groupby(level=['first', 'second']).sum()
More on the ``sum`` function and aggregation later.
@@ -499,9 +499,9 @@ to standardize the data within each group:
.. ipython:: python
- index = date_range('10/1/1999', periods=1100)
- ts = Series(np.random.normal(0.5, 2, 1100), index)
- ts = rolling_mean(ts, 100, 100).dropna()
+ index = pd.date_range('10/1/1999', periods=1100)
+ ts = pd.Series(np.random.normal(0.5, 2, 1100), index)
+ ts = pd.rolling_mean(ts, 100, 100).dropna()
ts.head()
ts.tail()
@@ -528,7 +528,7 @@ We can also visually compare the original and transformed data sets.
.. ipython:: python
- compare = DataFrame({'Original': ts, 'Transformed': transformed})
+ compare = pd.DataFrame({'Original': ts, 'Transformed': transformed})
@savefig groupby_transform_plot.png
compare.plot()
@@ -539,11 +539,11 @@ Another common data transform is to replace missing data with the group mean.
:suppress:
cols = ['A', 'B', 'C']
- values = randn(1000, 3)
+ values = np.random.randn(1000, 3)
values[np.random.randint(0, 1000, 100), 0] = np.nan
values[np.random.randint(0, 1000, 50), 1] = np.nan
values[np.random.randint(0, 1000, 200), 2] = np.nan
- data_df = DataFrame(values, columns=cols)
+ data_df = pd.DataFrame(values, columns=cols)
.. ipython:: python
@@ -599,7 +599,7 @@ than 2.
.. ipython:: python
- sf = Series([1, 1, 2, 3, 3, 3])
+ sf = pd.Series([1, 1, 2, 3, 3, 3])
sf.groupby(sf).filter(lambda x: x.sum() > 2)
The argument of ``filter`` must be a function that, applied to the group as a
@@ -610,7 +610,7 @@ with only a couple members.
.. ipython:: python
- dff = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')})
+ dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')})
dff.groupby('B').filter(lambda x: len(x) > 2)
Alternatively, instead of dropping the offending groups, we can return a
@@ -672,9 +672,9 @@ next). This enables some operations to be carried out rather succinctly:
.. ipython:: python
- tsdf = DataFrame(randn(1000, 3),
- index=date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ tsdf = pd.DataFrame(np.random.randn(1000, 3),
+ index=pd.date_range('1/1/2000', periods=1000),
+ columns=['A', 'B', 'C'])
tsdf.ix[::2] = np.nan
grouped = tsdf.groupby(lambda x: x.year)
grouped.fillna(method='pad')
@@ -689,8 +689,8 @@ The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys:
.. ipython:: python
- s = Series([9, 8, 7, 5, 19, 1, 4.2, 3.3])
- g = Series(list('abababab'))
+ s = pd.Series([9, 8, 7, 5, 19, 1, 4.2, 3.3])
+ g = pd.Series(list('abababab'))
gb = s.groupby(g)
gb.nlargest(3)
gb.nsmallest(3)
@@ -721,8 +721,8 @@ The dimension of the returned result can also change:
In [8]: grouped = df.groupby('A')['C']
In [10]: def f(group):
- ....: return DataFrame({'original' : group,
- ....: 'demeaned' : group - group.mean()})
+ ....: return pd.DataFrame({'original' : group,
+ ....: 'demeaned' : group - group.mean()})
....:
In [11]: grouped.apply(f)
@@ -732,8 +732,8 @@ The dimension of the returned result can also change:
.. ipython:: python
def f(x):
- return Series([ x, x**2 ], index = ['x', 'x^s'])
- s = Series(np.random.rand(5))
+ return pd.Series([ x, x**2 ], index = ['x', 'x^s'])
+ s = pd.Series(np.random.rand(5))
s
s.apply(f)
@@ -754,7 +754,7 @@ The dimension of the returned result can also change:
.. ipython:: python
- d = DataFrame({"a":["x", "y"], "b":[1,2]})
+ d = pd.DataFrame({"a":["x", "y"], "b":[1,2]})
def identity(df):
print df
return df
@@ -802,9 +802,9 @@ can be used as group keys. If so, the order of the levels will be preserved:
.. ipython:: python
- data = Series(np.random.randn(100))
+ data = pd.Series(np.random.randn(100))
- factor = qcut(data, [0, .25, .5, .75, 1.])
+ factor = pd.qcut(data, [0, .25, .5, .75, 1.])
data.groupby(factor).mean()
@@ -813,27 +813,28 @@ can be used as group keys. If so, the order of the levels will be preserved:
Grouping with a Grouper specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Your may need to specify a bit more data to properly group. You can
+You may need to specify a bit more data to properly group. You can
use the ``pd.Grouper`` to provide this local control.
.. ipython:: python
- import datetime as DT
-
- df = DataFrame({
- 'Branch' : 'A A A A A A A B'.split(),
- 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
- 'Quantity': [1,3,5,1,8,1,9,3],
- 'Date' : [
- DT.datetime(2013,1,1,13,0),
- DT.datetime(2013,1,1,13,5),
- DT.datetime(2013,10,1,20,0),
- DT.datetime(2013,10,2,10,0),
- DT.datetime(2013,10,1,20,0),
- DT.datetime(2013,10,2,10,0),
- DT.datetime(2013,12,2,12,0),
- DT.datetime(2013,12,2,14,0),
- ]})
+ import datetime
+
+ df = pd.DataFrame({
+ 'Branch' : 'A A A A A A A B'.split(),
+ 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
+ 'Quantity': [1,3,5,1,8,1,9,3],
+ 'Date' : [
+ datetime.datetime(2013,1,1,13,0),
+ datetime.datetime(2013,1,1,13,5),
+ datetime.datetime(2013,10,1,20,0),
+ datetime.datetime(2013,10,2,10,0),
+ datetime.datetime(2013,10,1,20,0),
+ datetime.datetime(2013,10,2,10,0),
+ datetime.datetime(2013,12,2,12,0),
+ datetime.datetime(2013,12,2,14,0),
+ ]
+ })
df
@@ -862,7 +863,7 @@ Just like for a DataFrame or Series you can call head and tail on a groupby:
.. ipython:: python
- df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
+ df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
df
g = df.groupby('A')
@@ -894,7 +895,7 @@ To select from a DataFrame or Series the nth item, use the nth method. This is a
.. ipython:: python
- df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
g.nth(0)
@@ -919,7 +920,7 @@ As with other methods, passing ``as_index=False``, will achieve a filtration, wh
.. ipython:: python
- df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A',as_index=False)
g.nth(0)
@@ -929,8 +930,8 @@ You can also select multiple rows from each group by specifying multiple nth val
.. ipython:: python
- business_dates = date_range(start='4/1/2014', end='6/30/2014', freq='B')
- df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')
+ df = pd.DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, 4th, and last date index for each month
df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
@@ -961,7 +962,7 @@ the values in column 1 where the group is "B" are 3 higher on average.
.. ipython:: python
np.random.seed(1234)
- df = DataFrame(np.random.randn(50, 2))
+ df = pd.DataFrame(np.random.randn(50, 2))
df['g'] = np.random.choice(['A', 'B'], size=50)
df.loc[df['g'] == 'B', 1] += 3
@@ -1010,11 +1011,11 @@ column index name will be used as the name of the inserted column:
.. ipython:: python
df = pd.DataFrame({
- 'a': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
- 'b': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
- 'c': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
- 'd': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
- })
+ 'a': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
+ 'b': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
+ 'c': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
+ 'd': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
+ })
def compute_metrics(x):
result = {'b_sum': x['b'].sum(), 'c_mean': x['c'].mean()}
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index a1912032bc3bf..618a2ae42c65f 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -6,15 +6,10 @@
:suppress:
import numpy as np
- import random
np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- import pandas as pd
- randn = np.random.randn
- randint = np.random.randint
np.set_printoptions(precision=4, suppress=True)
- from pandas.compat import range, zip
+ import pandas as pd
+ pd.options.display.max_rows=15
***************************
Indexing and Selecting Data
@@ -162,10 +157,10 @@ indexing functionality:
.. ipython:: python
- dates = date_range('1/1/2000', periods=8)
- df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
+ dates = pd.date_range('1/1/2000', periods=8)
+ df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
df
- panel = Panel({'one' : df, 'two' : df - df.mean()})
+ panel = pd.Panel({'one' : df, 'two' : df - df.mean()})
panel
.. note::
@@ -208,7 +203,7 @@ as an attribute:
.. ipython:: python
- sa = Series([1,2,3],index=list('abc'))
+ sa = pd.Series([1,2,3],index=list('abc'))
dfa = df.copy()
.. ipython:: python
@@ -307,7 +302,7 @@ Selection By Label
.. ipython:: python
- dfl = DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=date_range('20130101',periods=5))
+ dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5))
dfl
.. code-block:: python
@@ -333,7 +328,7 @@ The ``.loc`` attribute is the primary access method. The following are valid inp
.. ipython:: python
- s1 = Series(np.random.randn(6),index=list('abcdef'))
+ s1 = pd.Series(np.random.randn(6),index=list('abcdef'))
s1
s1.loc['c':]
s1.loc['b']
@@ -349,9 +344,9 @@ With a DataFrame
.. ipython:: python
- df1 = DataFrame(np.random.randn(6,4),
- index=list('abcdef'),
- columns=list('ABCD'))
+ df1 = pd.DataFrame(np.random.randn(6,4),
+ index=list('abcdef'),
+ columns=list('ABCD'))
df1
df1.loc[['a','b','d'],:]
@@ -403,7 +398,7 @@ The ``.iloc`` attribute is the primary access method. The following are valid in
.. ipython:: python
- s1 = Series(np.random.randn(5),index=list(range(0,10,2)))
+ s1 = pd.Series(np.random.randn(5), index=list(range(0,10,2)))
s1
s1.iloc[:3]
s1.iloc[3]
@@ -419,9 +414,9 @@ With a DataFrame
.. ipython:: python
- df1 = DataFrame(np.random.randn(6,4),
- index=list(range(0,12,2)),
- columns=list(range(0,8,2)))
+ df1 = pd.DataFrame(np.random.randn(6,4),
+ index=list(range(0,12,2)),
+ columns=list(range(0,8,2)))
df1
Select via integer slicing
@@ -472,7 +467,7 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy.
x
x[4:10]
x[8:10]
- s = Series(x)
+ s = pd.Series(x)
s
s.iloc[4:10]
s.iloc[8:10]
@@ -488,7 +483,7 @@ returned)
.. ipython:: python
- dfl = DataFrame(np.random.randn(5,2),columns=list('AB'))
+ dfl = pd.DataFrame(np.random.randn(5,2), columns=list('AB'))
dfl
dfl.iloc[:,2:3]
dfl.iloc[:,1:3]
@@ -516,7 +511,7 @@ A random selection of rows or columns from a Series, DataFrame, or Panel with th
.. ipython :: python
- s = Series([0,1,2,3,4,5])
+ s = pd.Series([0,1,2,3,4,5])
# When no arguments are passed, returns 1 row.
s.sample()
@@ -532,7 +527,7 @@ using the ``replace`` option:
.. ipython :: python
- s = Series([0,1,2,3,4,5])
+ s = pd.Series([0,1,2,3,4,5])
# Without replacement (default):
s.sample(n=6, replace=False)
@@ -547,7 +542,7 @@ to have different probabilities, you can pass the ``sample`` function sampling w
.. ipython :: python
- s = Series([0,1,2,3,4,5])
+ s = pd.Series([0,1,2,3,4,5])
example_weights = [0, 0, 0.2, 0.2, 0.2, 0.4]
s.sample(n=3, weights=example_weights)
@@ -561,21 +556,21 @@ as a string.
.. ipython :: python
- df2 = DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]})
+ df2 = pd.DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]})
df2.sample(n = 3, weights = 'weight_column')
``sample`` also allows users to sample columns instead of rows using the ``axis`` argument.
.. ipython :: python
- df3 = DataFrame({'col1':[1,2,3], 'col2':[2,3,4]})
+ df3 = pd.DataFrame({'col1':[1,2,3], 'col2':[2,3,4]})
df3.sample(n=1, axis=1)
Finally, one can also set a seed for ``sample``'s random number generator using the ``random_state`` argument, which will accept either an integer (as a seed) or a numpy RandomState object.
.. ipython :: python
- df4 = DataFrame({'col1':[1,2,3], 'col2':[2,3,4]})
+ df4 = pd.DataFrame({'col1':[1,2,3], 'col2':[2,3,4]})
# With a given seed, the sample will always draw the same rows.
df4.sample(n=2, random_state=2)
@@ -594,7 +589,7 @@ In the ``Series`` case this is effectively an appending operation
.. ipython:: python
- se = Series([1,2,3])
+ se = pd.Series([1,2,3])
se
se[5] = 5.
se
@@ -603,7 +598,7 @@ A ``DataFrame`` can be enlarged on either axis via ``.loc``
.. ipython:: python
- dfi = DataFrame(np.arange(6).reshape(3,2),
+ dfi = pd.DataFrame(np.arange(6).reshape(3,2),
columns=['A','B'])
dfi
dfi.loc[:,'C'] = dfi.loc[:,'A']
@@ -661,7 +656,7 @@ Using a boolean vector to index a Series works exactly as in a numpy ndarray:
.. ipython:: python
- s = Series(range(-3, 4))
+ s = pd.Series(range(-3, 4))
s
s[s > 0]
s[(s < -1) | (s > 0.5)]
@@ -680,9 +675,9 @@ more complex criteria:
.. ipython:: python
- df2 = DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'],
- 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'],
- 'c' : randn(7)})
+ df2 = pd.DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'],
+ 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'],
+ 'c' : np.random.randn(7)})
# only want 'two' or 'three'
criterion = df2['a'].map(lambda x: x.startswith('t'))
@@ -713,7 +708,7 @@ select rows where one or more columns have values you want:
.. ipython:: python
- s = Series(np.arange(5),index=np.arange(5)[::-1],dtype='int64')
+ s = pd.Series(np.arange(5), index=np.arange(5)[::-1], dtype='int64')
s
s.isin([2, 4, 6])
s[s.isin([2, 4, 6])]
@@ -733,8 +728,8 @@ in the membership check:
.. ipython:: python
- s_mi = Series(np.arange(6),
- index=pd.MultiIndex.from_product([[0, 1], ['a', 'b', 'c']]))
+ s_mi = pd.Series(np.arange(6),
+ index=pd.MultiIndex.from_product([[0, 1], ['a', 'b', 'c']]))
s_mi
s_mi.iloc[s_mi.index.isin([(1, 'a'), (2, 'b'), (0, 'c')])]
s_mi.iloc[s_mi.index.isin(['a', 'c', 'e'], level=1)]
@@ -746,8 +741,8 @@ wherever the element is in the sequence of values.
.. ipython:: python
- df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
- 'ids2': ['a', 'n', 'c', 'n']})
+ df = pd.DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
+ 'ids2': ['a', 'n', 'c', 'n']})
values = ['a', 'b', 1, 3]
@@ -801,8 +796,8 @@ Equivalent is ``df.where(df < 0)``
.. ipython:: python
:suppress:
- dates = date_range('1/1/2000', periods=8)
- df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
+ dates = pd.date_range('1/1/2000', periods=8)
+ df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
.. ipython:: python
@@ -889,16 +884,10 @@ method that allows selection using an expression.
You can get the value of the frame where column ``b`` has values
between the values of columns ``a`` and ``c``. For example:
-.. ipython:: python
- :suppress:
-
- from numpy.random import randint, rand
- np.random.seed(1234)
-
.. ipython:: python
n = 10
- df = DataFrame(rand(n, 3), columns=list('abc'))
+ df = pd.DataFrame(np.random.rand(n, 3), columns=list('abc'))
df
# pure python
@@ -912,7 +901,7 @@ with the name ``a``.
.. ipython:: python
- df = DataFrame(randint(n / 2, size=(n, 2)), columns=list('bc'))
+ df = pd.DataFrame(np.random.randint(n / 2, size=(n, 2)), columns=list('bc'))
df.index.name = 'a'
df
df.query('a < b and b < c')
@@ -928,7 +917,7 @@ If instead you don't want to or cannot name your index, you can use the name
.. ipython:: python
- df = DataFrame(randint(n, size=(n, 2)), columns=list('bc'))
+ df = pd.DataFrame(np.random.randint(n, size=(n, 2)), columns=list('bc'))
df
df.query('index < b < c')
@@ -946,7 +935,7 @@ If instead you don't want to or cannot name your index, you can use the name
.. ipython:: python
- df = DataFrame({'a': randint(5, size=5)})
+ df = pd.DataFrame({'a': np.random.randint(5, size=5)})
df.index.name = 'a'
df.query('a > 2') # uses the column 'a', not the index
@@ -970,23 +959,20 @@ You can also use the levels of a ``DataFrame`` with a
.. ipython:: python
- import pandas.util.testing as tm
-
n = 10
- colors = tm.choice(['red', 'green'], size=n)
- foods = tm.choice(['eggs', 'ham'], size=n)
+ colors = np.random.choice(['red', 'green'], size=n)
+ foods = np.random.choice(['eggs', 'ham'], size=n)
colors
foods
- index = MultiIndex.from_arrays([colors, foods], names=['color', 'food'])
- df = DataFrame(randn(n, 2), index=index)
+ index = pd.MultiIndex.from_arrays([colors, foods], names=['color', 'food'])
+ df = pd.DataFrame(np.random.randn(n, 2), index=index)
df
df.query('color == "red"')
If the levels of the ``MultiIndex`` are unnamed, you can refer to them using
special names:
-
.. ipython:: python
df.index.names = [None, None]
@@ -1008,9 +994,9 @@ having to specify which frame you're interested in querying
.. ipython:: python
- df = DataFrame(rand(n, 3), columns=list('abc'))
+ df = pd.DataFrame(np.random.rand(n, 3), columns=list('abc'))
df
- df2 = DataFrame(rand(n + 2, 3), columns=df.columns)
+ df2 = pd.DataFrame(np.random.rand(n + 2, 3), columns=df.columns)
df2
expr = '0.0 <= a <= c <= 0.5'
map(lambda frame: frame.query(expr), [df, df2])
@@ -1022,7 +1008,7 @@ Full numpy-like syntax
.. ipython:: python
- df = DataFrame(randint(n, size=(n, 3)), columns=list('abc'))
+ df = pd.DataFrame(np.random.randint(n, size=(n, 3)), columns=list('abc'))
df
df.query('(a < b) & (b < c)')
df[(df.a < df.b) & (df.b < df.c)]
@@ -1065,8 +1051,9 @@ The ``in`` and ``not in`` operators
.. ipython:: python
# get all rows where columns "a" and "b" have overlapping values
- df = DataFrame({'a': list('aabbccddeeff'), 'b': list('aaaabbbbcccc'),
- 'c': randint(5, size=12), 'd': randint(9, size=12)})
+ df = pd.DataFrame({'a': list('aabbccddeeff'), 'b': list('aaaabbbbcccc'),
+ 'c': np.random.randint(5, size=12),
+ 'd': np.random.randint(9, size=12)})
df
df.query('a in b')
@@ -1139,8 +1126,8 @@ You can negate boolean expressions with the word ``not`` or the ``~`` operator.
.. ipython:: python
- df = DataFrame(rand(n, 3), columns=list('abc'))
- df['bools'] = rand(len(df)) > 0.5
+ df = pd.DataFrame(np.random.rand(n, 3), columns=list('abc'))
+ df['bools'] = np.random.rand(len(df)) > 0.5
df.query('~bools')
df.query('not bools')
df.query('not bools') == df[~df.bools]
@@ -1192,7 +1179,7 @@ floating point values generated using ``numpy.random.randn()``.
.. ipython:: python
:suppress:
- df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
df2 = df.copy()
@@ -1214,9 +1201,9 @@ should be taken instead.
.. ipython:: python
- df2 = DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'],
- 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'],
- 'c' : np.random.randn(7)})
+ df2 = pd.DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'],
+ 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'],
+ 'c' : np.random.randn(7)})
df2.duplicated(['a','b'])
df2.drop_duplicates(['a','b'])
df2.drop_duplicates(['a','b'], take_last=True)
@@ -1242,7 +1229,7 @@ default value.
.. ipython:: python
- s = Series([1,2,3], index=['a','b','c'])
+ s = pd.Series([1,2,3], index=['a','b','c'])
s.get('a') # equivalent to s['a']
s.get('x', default=-1)
@@ -1267,7 +1254,7 @@ numpy array. For instance,
.. ipython:: python
- dflookup = DataFrame(np.random.rand(20,4), columns = ['A','B','C','D'])
+ dflookup = pd.DataFrame(np.random.rand(20,4), columns = ['A','B','C','D'])
dflookup.lookup(list(range(0,10,2)), ['B','C','A','B','D'])
.. _indexing.class:
@@ -1287,7 +1274,7 @@ lookups, data alignment, and reindexing. The easiest way to create an
.. ipython:: python
- index = Index(['e', 'd', 'a', 'b'])
+ index = pd.Index(['e', 'd', 'a', 'b'])
index
'd' in index
@@ -1296,26 +1283,26 @@ You can also pass a ``name`` to be stored in the index:
.. ipython:: python
- index = Index(['e', 'd', 'a', 'b'], name='something')
+ index = pd.Index(['e', 'd', 'a', 'b'], name='something')
index.name
The name, if set, will be shown in the console display:
.. ipython:: python
- index = Index(list(range(5)), name='rows')
- columns = Index(['A', 'B', 'C'], name='cols')
- df = DataFrame(np.random.randn(5, 3), index=index, columns=columns)
+ index = pd.Index(list(range(5)), name='rows')
+ columns = pd.Index(['A', 'B', 'C'], name='cols')
+ df = pd.DataFrame(np.random.randn(5, 3), index=index, columns=columns)
df
df['A']
+.. _indexing.set_metadata:
+
Setting metadata
~~~~~~~~~~~~~~~~
.. versionadded:: 0.13.0
-.. _indexing.set_metadata:
-
Indexes are "mostly immutable", but it is possible to set and change their
metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
``labels``).
@@ -1328,7 +1315,7 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
.. ipython:: python
- ind = Index([1, 2, 3])
+ ind = pd.Index([1, 2, 3])
ind.rename("apple")
ind
ind.set_names(["apple"], inplace=True)
@@ -1342,8 +1329,7 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
.. ipython:: python
-
- index = MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
+ index = pd.MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
index
index.levels[1]
index.set_levels(["a", "b"], level=1)
@@ -1364,8 +1350,8 @@ operators. Difference is provided via the ``.difference()`` method.
.. ipython:: python
- a = Index(['c', 'b', 'a'])
- b = Index(['c', 'e', 'd'])
+ a = pd.Index(['c', 'b', 'a'])
+ b = pd.Index(['c', 'e', 'd'])
a | b
a & b
a.difference(b)
@@ -1377,8 +1363,8 @@ with duplicates dropped.
.. ipython:: python
- idx1 = Index([1, 2, 3, 4])
- idx2 = Index([2, 3, 4, 5])
+ idx1 = pd.Index([1, 2, 3, 4])
+ idx2 = pd.Index([2, 3, 4, 5])
idx1.sym_diff(idx2)
idx1 ^ idx2
@@ -1401,10 +1387,10 @@ indexed DataFrame:
.. ipython:: python
:suppress:
- data = DataFrame({'a' : ['bar', 'bar', 'foo', 'foo'],
- 'b' : ['one', 'two', 'one', 'two'],
- 'c' : ['z', 'y', 'x', 'w'],
- 'd' : [1., 2., 3, 4]})
+ data = pd.DataFrame({'a' : ['bar', 'bar', 'foo', 'foo'],
+ 'b' : ['one', 'two', 'one', 'two'],
+ 'c' : ['z', 'y', 'x', 'w'],
+ 'd' : [1., 2., 3, 4]})
.. ipython:: python
@@ -1482,12 +1468,12 @@ When setting values in a pandas object, care must be taken to avoid what is call
.. ipython:: python
- dfmi = DataFrame([list('abcd'),
- list('efgh'),
- list('ijkl'),
- list('mnop')],
- columns=MultiIndex.from_product([['one','two'],
- ['first','second']]))
+ dfmi = pd.DataFrame([list('abcd'),
+ list('efgh'),
+ list('ijkl'),
+ list('mnop')],
+ columns=pd.MultiIndex.from_product([['one','two'],
+ ['first','second']]))
dfmi
Compare these two access methods:
@@ -1543,9 +1529,9 @@ which can take the values ``['raise','warn',None]``, where showing a warning is
.. ipython:: python
:okwarning:
- dfb = DataFrame({'a' : ['one', 'one', 'two',
- 'three', 'two', 'one', 'six'],
- 'c' : np.arange(7)})
+ dfb = pd.DataFrame({'a' : ['one', 'one', 'two',
+ 'three', 'two', 'one', 'six'],
+ 'c' : np.arange(7)})
# This will show the SettingWithCopyWarning
# but the frame values will be set
@@ -1573,7 +1559,7 @@ This is the correct access method
.. ipython:: python
- dfc = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
+ dfc = pd.DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
dfc.loc[0,'A'] = 11
dfc
diff --git a/doc/source/internals.rst b/doc/source/internals.rst
index 8b4f7360fc235..5899c3089cdac 100644
--- a/doc/source/internals.rst
+++ b/doc/source/internals.rst
@@ -6,15 +6,10 @@
:suppress:
import numpy as np
- import random
np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- import pandas as pd
- randn = np.random.randn
- randint = np.random.randint
np.set_printoptions(precision=4, suppress=True)
- from pandas.compat import range, zip
+ import pandas as pd
+ pd.options.display.max_rows = 15
*********
Internals
@@ -81,7 +76,7 @@ integer **labels**, and the level **names**:
.. ipython:: python
- index = MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
+ index = pd.MultiIndex.from_product([range(3), ['one', 'two']], names=['first', 'second'])
index
index.levels
index.labels
@@ -210,7 +205,7 @@ Below is an example to define 2 original properties, "internal_cache" as a tempo
class SubclassedDataFrame2(DataFrame):
# temporary properties
- _internal_names = DataFrame._internal_names + ['internal_cache']
+ _internal_names = pd.DataFrame._internal_names + ['internal_cache']
_internal_names_set = set(_internal_names)
# normal properties
@@ -244,5 +239,3 @@ Below is an example to define 2 original properties, "internal_cache" as a tempo
# properties defined in _metadata are retained
>>> df[['A', 'B']].added_property
property
-
-
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 7a4318fb02cfc..4c829c3252533 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2407,7 +2407,7 @@ for some advanced strategies
As of version 0.15.0, pandas requires ``PyTables`` >= 3.0.0. Stores written with prior versions of pandas / ``PyTables`` >= 2.3 are fully compatible (this was the previous minimum ``PyTables`` required version).
.. warning::
-
+
There is a ``PyTables`` indexing bug which may appear when querying stores using an index. If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2. Stores created previously will need to be rewritten using the updated version.
.. ipython:: python
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 5a350b4d9a1e7..6f1a272e3c40d 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -82,7 +82,7 @@ pandas provides the :func:`~pandas.core.common.isnull` and
.. ipython:: python
df2['one']
- isnull(df2['one'])
+ pd.isnull(df2['one'])
df2['four'].notnull()
**Summary:** ``NaN`` and ``None`` (in object arrays) are considered
@@ -99,7 +99,7 @@ pandas objects provide intercompatibility between ``NaT`` and ``NaN``.
.. ipython:: python
df2 = df.copy()
- df2['timestamp'] = Timestamp('20120101')
+ df2['timestamp'] = pd.Timestamp('20120101')
df2
df2.ix[['a','c','h'],['one','timestamp']] = np.nan
df2
@@ -260,7 +260,7 @@ use case of this is to fill a DataFrame with the mean of that column.
.. ipython:: python
- dff = pd.DataFrame(np.random.randn(10,3),columns=list('ABC'))
+ dff = pd.DataFrame(np.random.randn(10,3), columns=list('ABC'))
dff.iloc[3:5,0] = np.nan
dff.iloc[4:6,1] = np.nan
dff.iloc[5:8,2] = np.nan
@@ -276,7 +276,7 @@ a Series in this case.
.. ipython:: python
- dff.where(notnull(dff),dff.mean(),axis='columns')
+ dff.where(pd.notnull(dff), dff.mean(), axis='columns')
.. _missing_data.dropna:
@@ -321,7 +321,7 @@ performs linear interpolation at missing datapoints.
:suppress:
np.random.seed(123456)
- idx = date_range('1/1/2000', periods=100, freq='BM')
+ idx = pd.date_range('1/1/2000', periods=100, freq='BM')
ts = pd.Series(np.random.randn(100), index=idx)
ts[1:20] = np.nan
ts[60:80] = np.nan
@@ -368,7 +368,7 @@ You can also interpolate with a DataFrame:
.. ipython:: python
df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8],
- 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]})
+ 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]})
df
df.interpolate()
@@ -425,7 +425,7 @@ at the new values.
ser = pd.Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
- new_index = ser.index | Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
+ new_index = ser.index | pd.Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
interp_s = ser.reindex(new_index).interpolate(method='pchip')
interp_s[49:51]
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 9ede87422b21c..7e140b1b2deaf 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -154,7 +154,7 @@ lines are replaced by an ellipsis.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(7,2))
+ df = pd.DataFrame(np.random.randn(7,2))
pd.set_option('max_rows', 7)
df
pd.set_option('max_rows', 5)
@@ -166,7 +166,7 @@ dataframes to stretch across pages, wrapped over the full column vs row-wise.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(5,10))
+ df = pd.DataFrame(np.random.randn(5,10))
pd.set_option('expand_frame_repr', True)
df
pd.set_option('expand_frame_repr', False)
@@ -178,7 +178,7 @@ dataframes to stretch across pages, wrapped over the full column vs row-wise.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(10,10))
+ df = pd.DataFrame(np.random.randn(10,10))
pd.set_option('max_rows', 5)
pd.set_option('large_repr', 'truncate')
df
@@ -192,8 +192,8 @@ of this length or longer will be truncated with an ellipsis.
.. ipython:: python
- df=pd.DataFrame(np.array([['foo', 'bar', 'bim', 'uncomfortably long string'],
- ['horse', 'cow', 'banana', 'apple']]))
+ df = pd.DataFrame(np.array([['foo', 'bar', 'bim', 'uncomfortably long string'],
+ ['horse', 'cow', 'banana', 'apple']]))
pd.set_option('max_colwidth',40)
df
pd.set_option('max_colwidth', 6)
@@ -205,7 +205,7 @@ will be given.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(10,10))
+ df = pd.DataFrame(np.random.randn(10,10))
pd.set_option('max_info_columns', 11)
df.info()
pd.set_option('max_info_columns', 5)
@@ -219,7 +219,7 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa
.. ipython:: python
- df=pd.DataFrame(np.random.choice([0,1,np.nan],size=(10,10)))
+ df =pd.DataFrame(np.random.choice([0,1,np.nan], size=(10,10)))
df
pd.set_option('max_info_rows', 11)
df.info()
@@ -232,7 +232,7 @@ suggestion.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(5,5))
+ df = pd.DataFrame(np.random.randn(5,5))
pd.set_option('precision',7)
df
pd.set_option('precision',4)
@@ -244,7 +244,7 @@ precision at which the number is stored.
.. ipython:: python
- df=pd.DataFrame(np.random.randn(6,6))
+ df = pd.DataFrame(np.random.randn(6,6))
pd.set_option('chop_threshold', 0)
df
pd.set_option('chop_threshold', .5)
@@ -256,7 +256,8 @@ Options are 'right', and 'left'.
.. ipython:: python
- df=pd.DataFrame(np.array([np.random.randn(6), np.random.randint(1,9,6)*.1, np.zeros(6)]).T, columns=['A', 'B', 'C'], dtype='float')
+ df = pd.DataFrame(np.array([np.random.randn(6), np.random.randint(1,9,6)*.1, np.zeros(6)]).T,
+ columns=['A', 'B', 'C'], dtype='float')
pd.set_option('colheader_justify', 'right')
df
pd.set_option('colheader_justify', 'left')
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
index da37c92c88ecf..74cdc5a526585 100644
--- a/doc/source/r_interface.rst
+++ b/doc/source/r_interface.rst
@@ -5,8 +5,8 @@
.. ipython:: python
:suppress:
- from pandas import *
- options.display.max_rows=15
+ import pandas as pd
+ pd.options.display.max_rows = 15
******************
@@ -136,10 +136,8 @@ DataFrames into the equivalent R object (that is, **data.frame**):
.. ipython:: python
- from pandas import DataFrame
-
- df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C':[7,8,9]},
- index=["one", "two", "three"])
+ df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C':[7,8,9]},
+ index=["one", "two", "three"])
r_dataframe = com.convert_to_r_dataframe(df)
print(type(r_dataframe))
| Further work on #9886
| https://api.github.com/repos/pandas-dev/pandas/pulls/10561 | 2015-07-13T07:53:35Z | 2015-07-13T13:12:02Z | 2015-07-13T13:12:02Z | 2015-07-13T13:12:02Z |
BUG: pd.eval with numexpr engine coerces 1 element numpy array to scalar | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..6ab299eb70eb5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -366,6 +366,7 @@ Bug Fixes
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
+- Bug in `pd.eval` using ``numexpr`` engine coerces 1 element numpy array to scalar (:issue:`10546`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
- Bug in `pandas.read_csv` with kwargs ``index_col=False``, ``index_col=['a', 'b']`` or ``dtype``
diff --git a/pandas/computation/align.py b/pandas/computation/align.py
index 2e0845bddf7e2..9834dd1a9e7fc 100644
--- a/pandas/computation/align.py
+++ b/pandas/computation/align.py
@@ -172,12 +172,11 @@ def _reconstruct_object(typ, obj, axes, dtype):
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
-
- try:
- ret = ret_value.item()
- except (ValueError, IndexError):
- # XXX: we catch IndexError to absorb a
- # regression in numpy 1.7.0
- # fixed by numpy/numpy@04b89c63
- ret = ret_value
- return ret
+ # The condition is to distinguish 0-dim array (returned in case of scalar)
+ # and 1 element array
+ # e.g. np.array(0) and np.array([0])
+ if len(obj.shape) == 1 and len(obj) == 1:
+ if not isinstance(ret_value, np.ndarray):
+ ret_value = np.array([ret_value]).astype(res_t)
+
+ return ret_value
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 61bc40e34b5a3..d455d9d0d8679 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -10,7 +10,7 @@
from numpy.random import randn, rand, randint
import numpy as np
-from numpy.testing import assert_array_equal, assert_allclose
+from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas as pd
@@ -220,7 +220,7 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
expected = _eval_single_bin(
lhs_new, binop, rhs_new, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
skip_these = _scalar_skip
@@ -240,7 +240,7 @@ def check_operands(left, right, cmp_op):
for ex in (ex1, ex2, ex3):
result = pd.eval(ex, engine=self.engine,
parser=self.parser)
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = 'lhs {0} rhs'.format(cmp1)
@@ -251,13 +251,13 @@ def check_simple_cmp_op(self, lhs, cmp1, rhs):
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
ex = 'lhs {0} rhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs,
@@ -273,7 +273,7 @@ def check_alignment(self, result, nlhs, ghs, op):
pass
else:
expected = self.ne.evaluate('nlhs {0} ghs'.format(op))
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
# modulus, pow, and floor division require special casing
@@ -291,7 +291,7 @@ def check_floor_division(self, lhs, arith1, rhs):
if self.engine == 'python':
res = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs // rhs
- assert_array_equal(res, expected)
+ tm.assert_numpy_array_equivalent(res, expected)
else:
self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs,
'rhs': rhs},
@@ -325,7 +325,7 @@ def check_pow(self, lhs, arith1, rhs):
if (np.isscalar(lhs) and np.isscalar(rhs) and
_is_py3_complex_incompat(result, expected)):
- self.assertRaises(AssertionError, assert_array_equal, result,
+ self.assertRaises(AssertionError, tm.assert_numpy_array_equivalent, result,
expected)
else:
assert_allclose(result, expected)
@@ -345,11 +345,11 @@ def check_single_invert_op(self, lhs, cmp1, rhs):
elb = np.array([bool(el)])
expected = ~elb
result = pd.eval('~elb', engine=self.engine, parser=self.parser)
- assert_array_equal(expected, result)
+ tm.assert_numpy_array_equivalent(expected, result)
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
- assert_array_equal(result, pd.eval('~elb', engine=engine,
+ tm.assert_numpy_array_equivalent(result, pd.eval('~elb', engine=engine,
parser=self.parser))
def check_compound_invert_op(self, lhs, cmp1, rhs):
@@ -370,13 +370,13 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
else:
expected = ~expected
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- assert_array_equal(expected, result)
+ tm.assert_numpy_array_equivalent(expected, result)
# make sure the other engines work the same as this one
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
ev = pd.eval(ex, engine=self.engine, parser=self.parser)
- assert_array_equal(ev, result)
+ tm.assert_numpy_array_equivalent(ev, result)
def ex(self, op, var_name='lhs'):
return '{0}{1}'.format(op, var_name)
@@ -620,6 +620,38 @@ def test_disallow_scalar_bool_ops(self):
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
+ def test_identical(self):
+ # GH 10546
+ x = 1
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ self.assertEqual(result, 1)
+ self.assertTrue(np.isscalar(result))
+
+ x = 1.5
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ self.assertEqual(result, 1.5)
+ self.assertTrue(np.isscalar(result))
+
+ x = False
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ self.assertEqual(result, False)
+ self.assertTrue(np.isscalar(result))
+
+ x = np.array([1])
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ tm.assert_numpy_array_equivalent(result, np.array([1]))
+ self.assertEqual(result.shape, (1, ))
+
+ x = np.array([1.5])
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ tm.assert_numpy_array_equivalent(result, np.array([1.5]))
+ self.assertEqual(result.shape, (1, ))
+
+ x = np.array([False])
+ result = pd.eval('x', engine=self.engine, parser=self.parser)
+ tm.assert_numpy_array_equivalent(result, np.array([False]))
+ self.assertEqual(result.shape, (1, ))
+
class TestEvalNumexprPython(TestEvalNumexprPandas):
@@ -675,7 +707,7 @@ def check_alignment(self, result, nlhs, ghs, op):
pass
else:
expected = eval('nlhs {0} ghs'.format(op))
- assert_array_equal(result, expected)
+ tm.assert_numpy_array_equivalent(result, expected)
class TestEvalPythonPandas(TestEvalPythonPython):
@@ -1086,10 +1118,10 @@ def test_truediv(self):
if PY3:
res = self.eval(ex, truediv=False)
- assert_array_equal(res, np.array([1.0]))
+ tm.assert_numpy_array_equivalent(res, np.array([1.0]))
res = self.eval(ex, truediv=True)
- assert_array_equal(res, np.array([1.0]))
+ tm.assert_numpy_array_equivalent(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
@@ -1108,10 +1140,10 @@ def test_truediv(self):
self.assertEqual(res, expec)
else:
res = self.eval(ex, truediv=False)
- assert_array_equal(res, np.array([1]))
+ tm.assert_numpy_array_equivalent(res, np.array([1]))
res = self.eval(ex, truediv=True)
- assert_array_equal(res, np.array([1.0]))
+ tm.assert_numpy_array_equivalent(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
@@ -1414,8 +1446,8 @@ class TestScope(object):
def check_global_scope(self, e, engine, parser):
tm.skip_if_no_ne(engine)
- assert_array_equal(_var_s * 2, pd.eval(e, engine=engine,
- parser=parser))
+ tm.assert_numpy_array_equivalent(_var_s * 2, pd.eval(e, engine=engine,
+ parser=parser))
def test_global_scope(self):
e = '_var_s * 2'
| Closes #10546. Used `assert_numpy_atray_equivalent` to guarantee array comparison, not scalar. This should be prior to #10542 and being changed to use `assert_numpy_array_equal` in #10542.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10558 | 2015-07-12T21:35:36Z | 2015-07-20T21:19:08Z | 2015-07-20T21:19:08Z | 2015-07-20T21:19:12Z |
Pickle subclass metadata | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 05b69bae42c28..939a5b9dd1d42 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -51,6 +51,7 @@ Other API Changes
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
+- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
.. _whatsnew_0170.deprecations:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bdddc03ed1e10..f39e953284f26 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -80,8 +80,8 @@ class NDFrame(PandasObject):
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache',
'is_copy', '_subtyp', '_index',
- '_default_kind', '_default_fill_value',
- '__array_struct__','__array_interface__']
+ '_default_kind', '_default_fill_value', '_metadata',
+ '__array_struct__', '__array_interface__']
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_metadata = []
@@ -760,7 +760,9 @@ def to_dense(self):
# Picklability
def __getstate__(self):
- return self._data
+ meta = dict((k, getattr(self, k, None)) for k in self._metadata)
+ return dict(_data=self._data, _typ=self._typ,
+ _metadata=self._metadata, **meta)
def __setstate__(self, state):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7158303cd836d..062a32413286f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -438,10 +438,6 @@ def imag(self, v):
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
- # we are preserving name here
- def __getstate__(self):
- return dict(_data=self._data, name=self.name)
-
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ae6102751fb41..4dea73a3a73a1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -45,7 +45,8 @@
assertRaisesRegexp,
assertRaises,
makeCustomDataframe as mkdf,
- ensure_clean)
+ ensure_clean,
+ SubclassedDataFrame)
from pandas.core.indexing import IndexingError
from pandas.core.common import PandasError
@@ -14501,16 +14502,8 @@ def test_assign_bad(self):
def test_dataframe_metadata(self):
- class TestDataFrame(DataFrame):
- _metadata = ['testattr']
-
- @property
- def _constructor(self):
- return TestDataFrame
-
-
- df = TestDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
- index=['a', 'b', 'c'])
+ df = SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
+ index=['a', 'b', 'c'])
df.testattr = 'XXX'
self.assertEqual(df.testattr, 'XXX')
@@ -14519,6 +14512,11 @@ def _constructor(self):
self.assertEqual(df.iloc[[0, 1], :].testattr, 'XXX')
# GH9776
self.assertEqual(df.iloc[0:1, :].testattr, 'XXX')
+ # GH10553
+ unpickled = self.round_trip_pickle(df)
+ assert_frame_equal(df, unpickled)
+ self.assertEqual(df._metadata, unpickled._metadata)
+ self.assertEqual(df.testattr, unpickled.testattr)
def test_to_panel_expanddim(self):
# GH 9762
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3096bddcf6247..7378e3504b5ca 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1856,3 +1856,11 @@ def inner(*args, **kwargs):
thread.join()
return inner
return wrapper
+
+
+class SubclassedDataFrame(DataFrame):
+ _metadata = ['testattr']
+
+ @property
+ def _constructor(self):
+ return SubclassedDataFrame
| Fixes #10553
| https://api.github.com/repos/pandas-dev/pandas/pulls/10557 | 2015-07-12T20:17:27Z | 2015-07-13T02:40:09Z | 2015-07-13T02:40:09Z | 2015-07-13T11:31:29Z |
ENH: Added functionality in resample to resolve #10530 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 70d616ca72c1b..643226b84baff 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -94,6 +94,8 @@ Other enhancements
- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`)
+- Added functionality to use ``base`` when resampling a ``TimeDeltaIndex`` (:issue:`10530`)
+
- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`)
- The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent. (:issue:`7599`)
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 53c1292204f71..0ecdb43895f07 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -238,6 +238,10 @@ def _get_time_delta_bins(self, ax):
end_stamps = labels + 1
bins = ax.searchsorted(end_stamps, side='left')
+ # Addresses GH #10530
+ if self.base > 0:
+ labels += type(self.freq)(self.base)
+
return binner, bins, labels
def _get_time_period_bins(self, ax):
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 9ec336466266f..3a533b5e0b298 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -11,6 +11,7 @@
from pandas.core.groupby import DataError
from pandas.tseries.index import date_range
+from pandas.tseries.tdi import timedelta_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
@@ -627,6 +628,19 @@ def test_resample_base(self):
freq='5min')
self.assertTrue(resampled.index.equals(exp_rng))
+ def test_resample_base_with_timedeltaindex(self):
+ rng = timedelta_range(start = '0s', periods = 25, freq = 's')
+ ts = Series(np.random.randn(len(rng)), index = rng)
+
+ with_base = ts.resample('2s', base = 5)
+ without_base = ts.resample('2s')
+
+ exp_without_base = timedelta_range(start = '0s', end = '25s', freq = '2s')
+ exp_with_base = timedelta_range(start = '5s', end = '29s', freq = '2s')
+
+ self.assertTrue(without_base.index.equals(exp_without_base))
+ self.assertTrue(with_base.index.equals(exp_with_base))
+
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
| closes #10530
I'm not 100% sure that I implemented this per the documentation or that my tests are complete but I'd be glad to make any necessary changes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10543 | 2015-07-11T04:41:10Z | 2015-08-15T22:41:23Z | null | 2015-08-15T22:41:23Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.