title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
DOC: correct DataFrame.pivot docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1798a35168265..dfe7e90c134fc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3868,9 +3868,8 @@ def last_valid_index(self):
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
- unique values from index / columns to form axes and return either
- DataFrame or Panel, depending on whether you request a single value
- column (DataFrame) or all columns (Panel)
+ unique values from index / columns to form axes of the resulting
+ DataFrame.
Parameters
----------
@@ -3880,7 +3879,20 @@ def pivot(self, index=None, columns=None, values=None):
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
- Column name to use for populating new frame's values
+ Column name to use for populating new frame's values. If not
+ specified, all remaining columns will be used and the result will
+ have hierarchically indexed columns
+
+ Returns
+ -------
+ pivoted : DataFrame
+
+ See also
+ --------
+ DataFrame.pivot_table : generalization of pivot that can handle
+ duplicate values for one index/column pair
+ DataFrame.unstack : pivot based on the index values instead of a
+ column
Notes
-----
@@ -3889,30 +3901,30 @@ def pivot(self, index=None, columns=None, values=None):
Examples
--------
+
+ >>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
- 0 one A 1.
- 1 one B 2.
- 2 one C 3.
- 3 two A 4.
- 4 two B 5.
- 5 two C 6.
-
- >>> df.pivot('foo', 'bar', 'baz')
+ 0 one A 1
+ 1 one B 2
+ 2 one C 3
+ 3 two A 4
+ 4 two B 5
+ 5 two C 6
+
+ >>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
- >>> df.pivot('foo', 'bar')['baz']
+ >>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
- Returns
- -------
- pivoted : DataFrame
- If no values column specified, will have hierarchically indexed
- columns
+
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
| The mention of panels that are created is not correct. You get a multi-index, I noticed
| https://api.github.com/repos/pandas-dev/pandas/pulls/14430 | 2016-10-15T20:29:41Z | 2016-10-22T09:50:20Z | 2016-10-22T09:50:20Z | 2016-10-22T09:50:20Z |
PERF: improved perf in .to_json when lines=True | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 5c03408cbf20f..fe57ed4a54975 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -21,7 +21,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-
+- Improved Performance in ``.to_json()`` when ``lines=True`` (:issue:`14408`)
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 66a8e76c09a6f..1e258101a5d86 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -605,25 +605,9 @@ def _convert_to_line_delimits(s):
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
- num_open_brackets_seen = 0
- commas_to_replace = []
- in_quotes = False
- for idx, char in enumerate(s): # iter through to find all
- if char == '"' and idx > 0 and s[idx - 1] != '\\':
- in_quotes = ~in_quotes
- elif char == ',': # commas that should be \n
- if num_open_brackets_seen == 0 and not in_quotes:
- commas_to_replace.append(idx)
- elif char == '{':
- if not in_quotes:
- num_open_brackets_seen += 1
- elif char == '}':
- if not in_quotes:
- num_open_brackets_seen -= 1
- s_arr = np.array(list(s)) # Turn to an array to set
- s_arr[commas_to_replace] = '\n' # all commas at once.
- s = ''.join(s_arr)
- return s
+
+ from pandas.lib import convert_json_to_lines
+ return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", level=0):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index e7672de5c835e..b56a02b245d69 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -1087,6 +1087,44 @@ def string_array_replace_from_nan_rep(
return arr
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def convert_json_to_lines(object arr):
+ """
+ replace comma separated json with line feeds, paying special attention
+ to quotes & brackets
+ """
+ cdef:
+ Py_ssize_t i = 0, num_open_brackets_seen = 0, in_quotes = 0, length
+ ndarray[uint8_t] narr
+ unsigned char v, comma, left_bracket, right_brack, newline
+
+ newline = ord('\n')
+ comma = ord(',')
+ left_bracket = ord('{')
+ right_bracket = ord('}')
+ quote = ord('"')
+ backslash = ord('\\')
+
+ narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy()
+ length = narr.shape[0]
+ for i in range(length):
+ v = narr[i]
+ if v == quote and i > 0 and narr[i - 1] != backslash:
+ in_quotes = ~in_quotes
+ if v == comma: # commas that should be \n
+ if num_open_brackets_seen == 0 and not in_quotes:
+ narr[i] = newline
+ elif v == left_bracket:
+ if not in_quotes:
+ num_open_brackets_seen += 1
+ elif v == right_bracket:
+ if not in_quotes:
+ num_open_brackets_seen -= 1
+
+ return narr.tostring().decode('utf-8')
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
def write_csv_rows(list data, ndarray data_index,
| closes #14408
master
```
In [1]: np.random.seed(1234)
In [2]: N = 100000
...: C = 5
...: df = DataFrame(dict([('float{0}'.format(i), np.random.randn(N)) for i in range(C)]))
In [3]: %timeit df.to_json('foo.json',orient='records',lines=True)
1 loop, best of 3: 3.67 s per loop
```
PR (with proper encoding/decoding, IOW work directly on the bytes and minimize copies)
```
In [5]: %timeit df.to_json('foo.json',orient='records',lines=True)
10 loops, best of 3: 137 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14429 | 2016-10-14T23:53:09Z | 2016-10-15T20:03:48Z | null | 2016-10-15T20:03:48Z |
Bug: Grouping by index and column fails on DataFrame with single index (GH14327) | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 5c03408cbf20f..0046e2553ad6b 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -46,3 +46,4 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
+- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 3c376e3188eac..5223c0ac270f3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2201,36 +2201,12 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
- inds = index.labels[level]
- level_index = index.levels[level]
-
if self.name is None:
self.name = index.names[level]
- # XXX complete hack
-
- if grouper is not None:
- level_values = index.levels[level].take(inds)
- self.grouper = level_values.map(self.grouper)
- else:
- # all levels may not be observed
- labels, uniques = algos.factorize(inds, sort=True)
-
- if len(uniques) > 0 and uniques[0] == -1:
- # handle NAs
- mask = inds != -1
- ok_labels, uniques = algos.factorize(inds[mask], sort=True)
-
- labels = np.empty(len(inds), dtype=inds.dtype)
- labels[mask] = ok_labels
- labels[~mask] = -1
-
- if len(uniques) < len(level_index):
- level_index = level_index.take(uniques)
+ self.grouper, self._labels, self._group_index = \
+ index._get_grouper_for_level(self.grouper, level)
- self._labels = labels
- self._group_index = level_index
- self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 5082fc84982c6..1c24a0db34b2b 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -432,6 +432,36 @@ def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
+ _index_shared_docs['_get_grouper_for_level'] = """
+ Get index grouper corresponding to an index level
+
+ Parameters
+ ----------
+ mapper: Group mapping function or None
+ Function mapping index values to groups
+ level : int or None
+ Index level
+
+ Returns
+ -------
+ grouper : Index
+ Index of values to group on
+ labels : ndarray of int or None
+ Array of locations in level_index
+ uniques : Index or None
+ Index of unique values for level
+ """
+
+ @Appender(_index_shared_docs['_get_grouper_for_level'])
+ def _get_grouper_for_level(self, mapper, level=None):
+ assert level is None or level == 0
+ if mapper is None:
+ grouper = self
+ else:
+ grouper = self.map(mapper)
+
+ return grouper, None, None
+
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 0c465da24a17e..a9f452db69659 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -539,6 +539,37 @@ def _format_native_types(self, na_rep='nan', **kwargs):
return mi.values
+ @Appender(_index_shared_docs['_get_grouper_for_level'])
+ def _get_grouper_for_level(self, mapper, level):
+ indexer = self.labels[level]
+ level_index = self.levels[level]
+
+ if mapper is not None:
+ # Handle group mapping function and return
+ level_values = self.levels[level].take(indexer)
+ grouper = level_values.map(mapper)
+ return grouper, None, None
+
+ labels, uniques = algos.factorize(indexer, sort=True)
+
+ if len(uniques) > 0 and uniques[0] == -1:
+ # Handle NAs
+ mask = indexer != -1
+ ok_labels, uniques = algos.factorize(indexer[mask],
+ sort=True)
+
+ labels = np.empty(len(indexer), dtype=indexer.dtype)
+ labels[mask] = ok_labels
+ labels[~mask] = -1
+
+ if len(uniques) < len(level_index):
+ # Remove unobserved levels from level_index
+ level_index = level_index.take(uniques)
+
+ grouper = level_index.take(labels)
+
+ return grouper, labels, level_index
+
@property
def _constructor(self):
return MultiIndex.from_tuples
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 02917ab18c29f..f3791ee1d5c91 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -458,6 +458,39 @@ def test_grouper_creation_bug(self):
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
+ def test_grouper_column_and_index(self):
+ # GH 14327
+
+ # Grouping a multi-index frame by a column and an index level should
+ # be equivalent to resetting the index and grouping by two columns
+ idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 3),
+ ('b', 1), ('b', 2), ('b', 3)])
+ idx.names = ['outer', 'inner']
+ df_multi = pd.DataFrame({"A": np.arange(6),
+ 'B': ['one', 'one', 'two',
+ 'two', 'one', 'one']},
+ index=idx)
+ result = df_multi.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_multi.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_multi.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_multi.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
+ # Grouping a single-index frame by a column and the index should
+ # be equivalent to resetting the index and grouping by two columns
+ df_single = df_multi.reset_index('outer')
+ result = df_single.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_single.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_single.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_single.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
def test_grouper_getting_correct_binner(self):
# GH 10063
| - [x] closes #14327
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
This PR is a continuation of #14333. See discussion there for explanation of why this new PR was needed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14428 | 2016-10-14T23:39:26Z | 2016-10-15T19:25:41Z | 2016-10-15T19:25:41Z | 2016-10-15T19:25:48Z |
ENH: merge_asof() has left_index/right_index and left_by/right_by (#14253) | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..c968ed56c585f 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -15,6 +15,17 @@ Highlights include:
:backlinks: none
+
+.. _whatsnew_0191.api.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
+
+- ``pd.merge_asof()`` can take ``left_index``/``right_index`` and ``left_by``/``right_by`` (:issue:`14253`)
+
+
+
+
.. _whatsnew_0191.performance:
Performance Improvements
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8c43195f5552..61398a656567f 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -259,7 +259,8 @@ def _merger(x, y):
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
- by=None,
+ left_index=False, right_index=False,
+ by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True):
@@ -288,9 +289,29 @@ def merge_asof(left, right, on=None,
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
+ left_index : boolean
+ Use the index of the left DataFrame as the join key.
+
+ .. versionadded:: 0.19.1
+
+ right_index : boolean
+ Use the index of the right DataFrame as the join key.
+
+ .. versionadded:: 0.19.1
+
by : column name
Group both the left and right DataFrames by the group column; perform
the merge operation on these pieces and recombine.
+ left_by : column name
+ Field name to group by in the left DataFrame.
+
+ .. versionadded:: 0.19.1
+
+ right_by : column name
+ Field name to group by in the right DataFrame.
+
+ .. versionadded:: 0.19.1
+
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
@@ -418,7 +439,9 @@ def merge_asof(left, right, on=None,
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
- by=by, suffixes=suffixes,
+ left_index=left_index, right_index=right_index,
+ by=by, left_by=left_by, right_by=right_by,
+ suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches)
return op.get_result()
@@ -641,7 +664,7 @@ def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
- if self.left_index and self.right_index:
+ if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
@@ -731,12 +754,21 @@ def _get_merge_keys(self):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
- right_keys.append(right[rk]._values)
- join_names.append(rk)
+ if rk is not None:
+ right_keys.append(right[rk]._values)
+ join_names.append(rk)
+ else:
+ # kludge for merge_asof(right_index=True)
+ right_keys.append(right.index.values)
+ join_names.append(right.index.name)
else:
if not is_rkey(rk):
- right_keys.append(right[rk]._values)
- if lk == rk:
+ if rk is not None:
+ right_keys.append(right[rk]._values)
+ else:
+ # kludge for merge_asof(right_index=True)
+ right_keys.append(right.index.values)
+ if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
@@ -744,8 +776,13 @@ def _get_merge_keys(self):
left_drop.append(lk)
else:
right_keys.append(rk)
- left_keys.append(left[lk]._values)
- join_names.append(lk)
+ if lk is not None:
+ left_keys.append(left[lk]._values)
+ join_names.append(lk)
+ else:
+ # kludge for merge_asof(left_index=True)
+ left_keys.append(left.index.values)
+ join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
@@ -870,13 +907,15 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
- def __init__(self, left, right, on=None, left_on=None,
- right_on=None, axis=1,
+ def __init__(self, left, right, on=None, left_on=None, right_on=None,
+ left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
+ left_index=left_index,
+ right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
@@ -949,19 +988,23 @@ def _get_cython_type(dtype):
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
- def __init__(self, left, right, on=None, by=None, left_on=None,
- right_on=None, axis=1,
- suffixes=('_x', '_y'), copy=True,
+ def __init__(self, left, right, on=None, left_on=None, right_on=None,
+ left_index=False, right_index=False,
+ by=None, left_by=None, right_by=None,
+ axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True):
self.by = by
+ self.left_by = left_by
+ self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
- right_on=right_on, axis=axis,
+ right_on=right_on, left_index=left_index,
+ right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
@@ -969,23 +1012,44 @@ def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
- if len(self.left_on) != 1:
+ if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
- if len(self.right_on) != 1:
+ if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
+ if self.left_index and isinstance(self.left.index, MultiIndex):
+ raise MergeError("left can only have one index")
+
+ if self.right_index and isinstance(self.right.index, MultiIndex):
+ raise MergeError("right can only have one index")
+
+ # set 'by' columns
+ if self.by is not None:
+ if self.left_by is not None or self.right_by is not None:
+ raise MergeError('Can only pass by OR left_by '
+ 'and right_by')
+ self.left_by = self.right_by = self.by
+ if self.left_by is None and self.right_by is not None:
+ raise MergeError('missing left_by')
+ if self.left_by is not None and self.right_by is None:
+ raise MergeError('missing right_by')
+
# add by to our key-list so we can have it in the
# output as a key
- if self.by is not None:
- if not is_list_like(self.by):
- self.by = [self.by]
+ if self.left_by is not None:
+ if not is_list_like(self.left_by):
+ self.left_by = [self.left_by]
+ if not is_list_like(self.right_by):
+ self.right_by = [self.right_by]
- if len(self.by) != 1:
+ if len(self.left_by) != 1:
+ raise MergeError("can only asof by a single key")
+ if len(self.right_by) != 1:
raise MergeError("can only asof by a single key")
- self.left_on = self.by + list(self.left_on)
- self.right_on = self.by + list(self.right_on)
+ self.left_on = self.left_by + list(self.left_on)
+ self.right_on = self.right_by + list(self.right_on)
@property
def _asof_key(self):
@@ -1008,7 +1072,7 @@ def _get_merge_keys(self):
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
- lt = left_join_keys[self.left_on.index(self._asof_key)]
+ lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
@@ -1038,8 +1102,10 @@ def _get_join_indexers(self):
""" return the join indexers """
# values to compare
- left_values = self.left_join_keys[-1]
- right_values = self.right_join_keys[-1]
+ left_values = (self.left.index.values if self.left_index else
+ self.left_join_keys[-1])
+ right_values = (self.right.index.values if self.right_index else
+ self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
@@ -1057,7 +1123,7 @@ def _get_join_indexers(self):
tolerance = tolerance.value
# a "by" parameter requires special handling
- if self.by is not None:
+ if self.left_by is not None:
left_by_values = self.left_join_keys[0]
right_by_values = self.right_join_keys[0]
diff --git a/pandas/tools/tests/test_merge_asof.py b/pandas/tools/tests/test_merge_asof.py
index f413618624592..c3e325fa37155 100644
--- a/pandas/tools/tests/test_merge_asof.py
+++ b/pandas/tools/tests/test_merge_asof.py
@@ -117,6 +117,75 @@ def test_basic_categorical(self):
by='ticker')
assert_frame_equal(result, expected)
+ def test_basic_left_index(self):
+
+ expected = self.asof
+ trades = self.trades.set_index('time')
+ quotes = self.quotes
+
+ result = merge_asof(trades, quotes,
+ left_index=True,
+ right_on='time',
+ by='ticker')
+ # left-only index uses right's index, oddly
+ expected.index = result.index
+ # time column appears after left's columns
+ expected = expected[result.columns]
+ assert_frame_equal(result, expected)
+
+ def test_basic_right_index(self):
+
+ expected = self.asof
+ trades = self.trades
+ quotes = self.quotes.set_index('time')
+
+ result = merge_asof(trades, quotes,
+ left_on='time',
+ right_index=True,
+ by='ticker')
+ assert_frame_equal(result, expected)
+
+ def test_basic_left_index_right_index(self):
+
+ expected = self.asof.set_index('time')
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index('time')
+
+ result = merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True,
+ by='ticker')
+ assert_frame_equal(result, expected)
+
+ def test_multi_index(self):
+
+ # MultiIndex is prohibited
+ trades = self.trades.set_index(['time', 'price'])
+ quotes = self.quotes.set_index('time')
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True)
+
+ trades = self.trades.set_index('time')
+ quotes = self.quotes.set_index(['time', 'bid'])
+ with self.assertRaises(MergeError):
+ merge_asof(trades, quotes,
+ left_index=True,
+ right_index=True)
+
+ def test_basic_left_by_right_by(self):
+
+ expected = self.asof
+ trades = self.trades
+ quotes = self.quotes
+
+ result = merge_asof(trades, quotes,
+ on='time',
+ left_by='ticker',
+ right_by='ticker')
+ assert_frame_equal(result, expected)
+
def test_missing_right_by(self):
expected = self.asof
| - [x] closes #14253
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14426 | 2016-10-14T15:08:45Z | 2016-10-28T17:42:29Z | null | 2016-10-28T17:44:18Z |
BUG: String indexing against object dtype may raise AttributeError | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 2a421e5d2eacd..6dada92099a7f 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -37,6 +37,7 @@ Bug Fixes
- Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`)
+- Bug in string indexing against data with ``object`` ``Index`` may raise ``AttributeError`` (:issue:`14424`)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 5082fc84982c6..8b3ec86a44f11 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -2936,6 +2936,11 @@ def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
+ def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
+ # this is for partial string indexing,
+ # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
+ raise NotImplementedError
+
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index fa406a27bef69..a50d3d28e5a11 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -3613,6 +3613,27 @@ def test_iloc_non_unique_indexing(self):
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
+ def test_string_slice(self):
+ # GH 14424
+ # string indexing against datetimelike with object
+ # dtype should properly raises KeyError
+ df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
+ dtype=object))
+ self.assertTrue(df.index.is_all_dates)
+ with tm.assertRaises(KeyError):
+ df['2011']
+
+ with tm.assertRaises(KeyError):
+ df.loc['2011', 0]
+
+ df = pd.DataFrame()
+ self.assertFalse(df.index.is_all_dates)
+ with tm.assertRaises(KeyError):
+ df['2011']
+
+ with tm.assertRaises(KeyError):
+ df.loc['2011', 0]
+
def test_mi_access(self):
# GH 4145
| - [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
String indexing may raise `AttributeError`, rather than `KeyError`.
```
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')], dtype=object))
df.index.is_all_dates
# True
df['2011']
# AttributeError: 'Index' object has no attribute '_get_string_slice'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14424 | 2016-10-14T11:21:48Z | 2016-10-22T09:54:33Z | 2016-10-22T09:54:33Z | 2016-10-23T04:50:43Z |
Period factorization | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index e12b00dd06b39..5f3671012e6d5 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -548,6 +548,32 @@ def time_groupby_sum(self):
self.df.groupby(['a'])['b'].sum()
+class groupby_period(object):
+ # GH 14338
+ goal_time = 0.2
+
+ def make_grouper(self, N):
+ return pd.period_range('1900-01-01', freq='D', periods=N)
+
+ def setup(self):
+ N = 10000
+ self.grouper = self.make_grouper(N)
+ self.df = pd.DataFrame(np.random.randn(N, 2))
+
+ def time_groupby_sum(self):
+ self.df.groupby(self.grouper).sum()
+
+
+class groupby_datetime(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N)
+
+
+class groupby_datetimetz(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N,
+ tz='US/Central')
+
#----------------------------------------------------------------------
# Series.value_counts
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..8843a7849c200 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -20,7 +20,7 @@ Highlights include:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-
+ - Fixed performance regression in factorization of ``Period`` data (:issue:`14338`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ee59d6552bb2f..8644d4568e44d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -285,18 +285,27 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
- from pandas import Index, Series, DatetimeIndex
-
- vals = np.asarray(values)
-
- # localize to UTC
- is_datetimetz_type = is_datetimetz(values)
- if is_datetimetz_type:
- values = DatetimeIndex(values)
- vals = values.asi8
+ from pandas import Index, Series, DatetimeIndex, PeriodIndex
+
+ # handling two possibilities here
+ # - for a numpy datetimelike simply view as i8 then cast back
+ # - for an extension datetimelike view as i8 then
+ # reconstruct from boxed values to transfer metadata
+ dtype = None
+ if needs_i8_conversion(values):
+ if is_period_dtype(values):
+ values = PeriodIndex(values)
+ vals = values.asi8
+ elif is_datetimetz(values):
+ values = DatetimeIndex(values)
+ vals = values.asi8
+ else:
+ # numpy dtype
+ dtype = values.dtype
+ vals = values.view(np.int64)
+ else:
+ vals = np.asarray(values)
- is_datetime = is_datetime64_dtype(vals)
- is_timedelta = is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
@@ -311,13 +320,9 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
- if is_datetimetz_type:
- # reset tz
- uniques = values._shallow_copy(uniques)
- elif is_datetime:
- uniques = uniques.astype('M8[ns]')
- elif is_timedelta:
- uniques = uniques.astype('m8[ns]')
+ if dtype is not None:
+ uniques = uniques.astype(dtype)
+
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
| - [x] closes #14338
- [x] tests not needed / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
asv
```
before after ratio
[c41c6511] [96b364a4]
- 2.44s 46.28ms 0.02 groupby.groupby_period.time_groupby_sum
```
Continuation of #14348
| https://api.github.com/repos/pandas-dev/pandas/pulls/14419 | 2016-10-13T20:17:07Z | 2016-10-15T19:27:11Z | 2016-10-15T19:27:11Z | 2016-11-30T01:02:08Z |
Concat with axis rows | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..01e9d2ff4ce7f 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -44,4 +44,5 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``pd.concat`` where ``axis`` cannot take string parameters ``'rows'`` or ``'columns'`` (:issue:`14369`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index b7cd8a1c01224..81aa694577fb5 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -347,6 +347,65 @@ def test_concat_named_keys(self):
names=[None, None]))
assert_frame_equal(concatted_unnamed, expected_unnamed)
+ def test_concat_axis_parameter(self):
+ # GH 14369
+ df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2))
+ df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2))
+
+ # Index/row/0 DataFrame
+ expected_index = pd.DataFrame(
+ {'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
+
+ concatted_index = pd.concat([df1, df2], axis='index')
+ assert_frame_equal(concatted_index, expected_index)
+
+ concatted_row = pd.concat([df1, df2], axis='rows')
+ assert_frame_equal(concatted_row, expected_index)
+
+ concatted_0 = pd.concat([df1, df2], axis=0)
+ assert_frame_equal(concatted_0, expected_index)
+
+ # Columns/1 DataFrame
+ expected_columns = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A'])
+
+ concatted_columns = pd.concat([df1, df2], axis='columns')
+ assert_frame_equal(concatted_columns, expected_columns)
+
+ concatted_1 = pd.concat([df1, df2], axis=1)
+ assert_frame_equal(concatted_1, expected_columns)
+
+ series1 = pd.Series([0.1, 0.2])
+ series2 = pd.Series([0.3, 0.4])
+
+ # Index/row/0 Series
+ expected_index_series = pd.Series(
+ [0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+
+ concatted_index_series = pd.concat([series1, series2], axis='index')
+ assert_series_equal(concatted_index_series, expected_index_series)
+
+ concatted_row_series = pd.concat([series1, series2], axis='rows')
+ assert_series_equal(concatted_row_series, expected_index_series)
+
+ concatted_0_series = pd.concat([series1, series2], axis=0)
+ assert_series_equal(concatted_0_series, expected_index_series)
+
+ # Columns/1 Series
+ expected_columns_series = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1])
+
+ concatted_columns_series = pd.concat(
+ [series1, series2], axis='columns')
+ assert_frame_equal(concatted_columns_series, expected_columns_series)
+
+ concatted_1_series = pd.concat([series1, series2], axis=1)
+ assert_frame_equal(concatted_1_series, expected_columns_series)
+
+ # Testing ValueError
+ with assertRaisesRegexp(ValueError, 'No axis named'):
+ pd.concat([series1, series2], axis='something')
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8c43195f5552..ce7f8908d7506 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1283,7 +1283,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
- axis : {0, 1, ...}, default 0
+ axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
@@ -1411,6 +1411,12 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
sample = objs[0]
self.objs = objs
+ # Standardize axis parameter to int
+ if isinstance(sample, Series):
+ axis = DataFrame()._get_axis_number(axis)
+ else:
+ axis = sample._get_axis_number(axis)
+
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
| - [x] closes #14369
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
I closed #14389 and opened with one in hopes of the new commits showing up.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14416 | 2016-10-13T14:37:04Z | 2016-10-15T19:59:53Z | null | 2016-10-19T13:50:07Z |
Bug: Made it so that 0 was included in uint8 | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 5180b9a092f6c..1a86ecae2faf3 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -48,3 +48,6 @@ Bug Fixes
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
+- Bug in ``pd.to_numeric`` where it would not downcast a 0 to a uint8 (:issue:`14404`)
+- Bug in ``pd.to_numeric`` where it would not downcast a 0 properly. (:issue:`14401`)
+- Bug in ``pd.to_numeric`` where a 0 was not unsigned on a downcast = 'unsigned' argument (:issue:`14401`)
diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py
index 8c16308d79a31..54cfd1dacb87e 100644
--- a/pandas/tools/tests/test_util.py
+++ b/pandas/tools/tests/test_util.py
@@ -401,6 +401,62 @@ def test_downcast(self):
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
+ # check that the smallest and largest values in each integer type pass to each type.
+ integer_dtype_min_max = {
+ 'int8': [np.iinfo(np.int8).min, np.iinfo(np.int8).max],
+ 'int16': [np.iinfo(np.int16).min, np.iinfo(np.int16).max],
+ 'int32': [np.iinfo(np.int32).min, np.iinfo(np.int32).max],
+ 'int64': [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
+ }
+
+ for dtype, min_max in integer_dtype_min_max.items():
+ series = pd.to_numeric(pd.Series(min_max), downcast = 'integer')
+ tm.assert_equal(series.dtype, dtype)
+
+
+ unsigned_dtype_min_max = {
+ 'uint8': [np.iinfo(np.uint8).min, np.iinfo(np.uint8).max],
+ 'uint16': [np.iinfo(np.uint16).min, np.iinfo(np.uint16).max],
+ 'uint32': [np.iinfo(np.uint32).min, np.iinfo(np.uint32).max],
+ # 'uint64': [np.iinfo(np.uint64).min, np.iinfo(np.uint64).max]
+ }
+
+ for dtype, min_max in unsigned_dtype_min_max.items():
+ series = pd.to_numeric(pd.Series(min_max), downcast = 'unsigned')
+ tm.assert_equal(series.dtype, dtype)
+
+ #check to see if the minimum number to shift integer types actually shifts
+
+ integer_dtype_min_max_plus = {
+ 'int16': [np.iinfo(np.int8).min, np.iinfo(np.int8).max + 1],
+ 'int32': [np.iinfo(np.int16).min, np.iinfo(np.int16).max + 1],
+ 'int64': [np.iinfo(np.int32).min, np.iinfo(np.int32).max + 1],
+ }
+
+ for dtype, min_max in integer_dtype_min_max_plus.items():
+ series = pd.to_numeric(pd.Series(min_max), downcast = 'integer')
+ tm.assert_equal(series.dtype, dtype)
+
+ integer_dtype_min_max_minus = {
+ 'int16': [np.iinfo(np.int8).min - 1, np.iinfo(np.int16).max],
+ 'int32': [np.iinfo(np.int16).min - 1, np.iinfo(np.int32).max],
+ 'int64': [np.iinfo(np.int32).min - 1, np.iinfo(np.int64).max]
+ }
+
+ for dtype, min_max in integer_dtype_min_max_minus.items():
+ series = pd.to_numeric(pd.Series(min_max), downcast = 'integer')
+ tm.assert_equal(series.dtype, dtype)
+
+ unsigned_dtype_min_max_plus = {
+ 'uint16': [np.iinfo(np.uint8).min, np.iinfo(np.uint8).max + 1],
+ 'uint32': [np.iinfo(np.uint16).min, np.iinfo(np.uint16).max + 1],
+ # 'uint64': [np.iinfo(np.uint32).min, np.iinfo(np.uint32).max + 1],
+ }
+
+ for dtype, min_max in unsigned_dtype_min_max_plus.items():
+ series = pd.to_numeric(pd.Series(min_max), downcast = 'unsigned')
+ tm.assert_equal(series.dtype, dtype)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index fec56328c1721..b50bf9dc448bc 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -205,7 +205,7 @@ def to_numeric(arg, errors='raise', downcast=None):
if downcast in ('integer', 'signed'):
typecodes = np.typecodes['Integer']
- elif downcast == 'unsigned' and np.min(values) > 0:
+ elif downcast == 'unsigned' and np.min(values) >= 0:
typecodes = np.typecodes['UnsignedInteger']
elif downcast == 'float':
typecodes = np.typecodes['Float']
| - [ ] closes #14401
- [ ] tests added / passed
- [ ] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
Decided to restart. Sorry for the inconvenience. -> #14472
| https://api.github.com/repos/pandas-dev/pandas/pulls/14412 | 2016-10-13T04:25:29Z | 2016-10-21T23:55:53Z | null | 2016-10-22T08:49:42Z |
DOC: pydata/pandas -> pandas-dev/pandas | diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index 2b10cb88a3134..de9aa18937985 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -20,4 +20,4 @@ def setup(self):
self.cur_index = self.df.index
def time_setattr_dataframe_index(self):
- self.df.index = self.cur_index
\ No newline at end of file
+ self.df.index = self.cur_index
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 265ffbc7261ca..f68cf9399c546 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -49,4 +49,4 @@ def setup(self):
self.s = Series(([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * 1000))
def time_index_from_series_ctor(self):
- Index(self.s)
\ No newline at end of file
+ Index(self.s)
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 85f3c1628bd8b..6f40611e68531 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -1703,4 +1703,4 @@ def setup(self):
self.dict_list = [dict(zip(self.columns, row)) for row in self.frame.values]
def time_series_ctor_from_dict(self):
- Series(self.some_dict)
\ No newline at end of file
+ Series(self.some_dict)
diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py
index 7638cc2a0f8df..659fc4941da54 100644
--- a/asv_bench/benchmarks/hdfstore_bench.py
+++ b/asv_bench/benchmarks/hdfstore_bench.py
@@ -348,4 +348,4 @@ def remove(self, f):
try:
os.remove(self.f)
except:
- pass
\ No newline at end of file
+ pass
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index a0a1b560d36f3..2c94f9b2b1e8c 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -344,4 +344,4 @@ def setup(self):
self.mi = MultiIndex.from_product([self.level1, self.level2])
def time_multiindex_with_datetime_level_sliced(self):
- self.mi[:10].values
\ No newline at end of file
+ self.mi[:10].values
diff --git a/asv_bench/benchmarks/io_sql.py b/asv_bench/benchmarks/io_sql.py
index 9a6b21f9e067a..c583ac1768c90 100644
--- a/asv_bench/benchmarks/io_sql.py
+++ b/asv_bench/benchmarks/io_sql.py
@@ -212,4 +212,4 @@ def setup(self):
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_sql_write_sqlalchemy(self):
- self.df.to_sql('test1', self.engine, if_exists='replace')
\ No newline at end of file
+ self.df.to_sql('test1', self.engine, if_exists='replace')
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index 0b0e73847aa96..4f6fd4a5a2df8 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -61,4 +61,4 @@ def setup(self):
self.data_frames[x] = self.df
def time_panel_from_dict_two_different_indexes(self):
- Panel.from_dict(self.data_frames)
\ No newline at end of file
+ Panel.from_dict(self.data_frames)
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index 90118eaf6e407..0bd572db2211a 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -53,4 +53,4 @@ def setup(self):
self.panel = Panel(np.random.randn(100, len(self.index), 1000))
def time_panel_shift_minor(self):
- self.panel.shift(1, axis='minor')
\ No newline at end of file
+ self.panel.shift(1, axis='minor')
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index e9f33ebfce0bd..869ddd8d6fa49 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -45,4 +45,4 @@ def setup(self):
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
- self.ts.replace(np.nan, 0.0, inplace=True)
\ No newline at end of file
+ self.ts.replace(np.nan, 0.0, inplace=True)
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 604fa5092a231..ab235e085986c 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -73,4 +73,4 @@ def setup(self):
break
def time_unstack_sparse_keyspace(self):
- self.idf.unstack()
\ No newline at end of file
+ self.idf.unstack()
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index daf5135e64c40..12fbb2478c2a5 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -258,4 +258,4 @@ def time_rolling_skew(self):
rolling_skew(self.arr, self.win)
def time_rolling_kurt(self):
- rolling_kurt(self.arr, self.win)
\ No newline at end of file
+ rolling_kurt(self.arr, self.win)
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index e4f91b1b9c0c6..d64606214ca6a 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -390,4 +390,4 @@ def time_strings_upper(self):
self.many.str.upper()
def make_series(self, letters, strlen, size):
- return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
\ No newline at end of file
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
diff --git a/doc/README.rst b/doc/README.rst
index a93ad32a4c8f8..a3733846d9ed1 100644
--- a/doc/README.rst
+++ b/doc/README.rst
@@ -155,9 +155,9 @@ Where to start?
---------------
There are a number of issues listed under `Docs
-<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Good as first PR
-<https://github.com/pydata/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
where you could start out.
Or maybe you have an idea of your own, by using pandas, looking for something
diff --git a/doc/_templates/autosummary/accessor_attribute.rst b/doc/_templates/autosummary/accessor_attribute.rst
index e38a9f22f9d99..a2f0eb5e068c4 100644
--- a/doc/_templates/autosummary/accessor_attribute.rst
+++ b/doc/_templates/autosummary/accessor_attribute.rst
@@ -3,4 +3,4 @@
.. currentmodule:: {{ module.split('.')[0] }}
-.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
\ No newline at end of file
+.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
diff --git a/doc/_templates/autosummary/accessor_method.rst b/doc/_templates/autosummary/accessor_method.rst
index 8175d8615ceb2..43dfc3b813120 100644
--- a/doc/_templates/autosummary/accessor_method.rst
+++ b/doc/_templates/autosummary/accessor_method.rst
@@ -3,4 +3,4 @@
.. currentmodule:: {{ module.split('.')[0] }}
-.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
\ No newline at end of file
+.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index f52f72b49dd31..090998570a358 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -973,7 +973,7 @@ are not numeric data (even in the case that ``.categories`` is numeric).
print("TypeError: " + str(e))
.. note::
- If such a function works, please file a bug at https://github.com/pydata/pandas!
+ If such a function works, please file a bug at https://github.com/pandas-dev/pandas!
dtype in apply
~~~~~~~~~~~~~~
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst
index 85d432b546f21..7ec91d251f15d 100644
--- a/doc/source/comparison_with_sas.rst
+++ b/doc/source/comparison_with_sas.rst
@@ -116,7 +116,7 @@ Reading External Data
Like SAS, pandas provides utilities for reading in data from
many formats. The ``tips`` dataset, found within the pandas
-tests (`csv <https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv>`_)
+tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_)
will be used in many of the following examples.
SAS provides ``PROC IMPORT`` to read csv data into a data set.
@@ -131,7 +131,7 @@ The pandas method is :func:`read_csv`, which works similarly.
.. ipython:: python
- url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
+ url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst
index 099a0e9469058..7962e0e69faa1 100644
--- a/doc/source/comparison_with_sql.rst
+++ b/doc/source/comparison_with_sql.rst
@@ -23,7 +23,7 @@ structure.
.. ipython:: python
- url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
+ url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fd3a2493a53e8..6ccd83c741f7a 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -301,9 +301,9 @@
autosummary_generate = glob.glob("*.rst")
# extlinks alias
-extlinks = {'issue': ('https://github.com/pydata/pandas/issues/%s',
+extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
- 'wiki': ('https://github.com/pydata/pandas/wiki/%s',
+ 'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
ipython_exec_lines = [
@@ -468,10 +468,10 @@ def linkcode_resolve(domain, info):
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
- return "http://github.com/pydata/pandas/blob/master/pandas/%s%s" % (
+ return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % (
fn, linespec)
else:
- return "http://github.com/pydata/pandas/blob/v%s/pandas/%s%s" % (
+ return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % (
pandas.__version__, fn, linespec)
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7f336abcaa6d7..3e500291db859 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -14,11 +14,11 @@ All contributions, bug reports, bug fixes, documentation improvements,
enhancements and ideas are welcome.
If you are simply looking to start working with the *pandas* codebase, navigate to the
-`GitHub "issues" tab <https://github.com/pydata/pandas/issues>`_ and start looking through
+`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through
interesting issues. There are a number of issues listed under `Docs
-<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Difficulty Novice
-<https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
+<https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
where you could start out.
Or maybe through using *pandas* you have an idea of your own or are looking for something
@@ -27,7 +27,7 @@ about it!
Feel free to ask questions on the `mailing list
<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter
-<https://gitter.im/pydata/pandas>`_.
+<https://gitter.im/pandas-dev/pandas>`_.
Bug reports and enhancement requests
====================================
@@ -79,7 +79,7 @@ It can very quickly become overwhelming, but sticking to the guidelines below wi
straightforward and mostly trouble free. As always, if you are having difficulties please
feel free to ask for help.
-The code is hosted on `GitHub <https://www.github.com/pydata/pandas>`_. To
+The code is hosted on `GitHub <https://www.github.com/pandas-dev/pandas>`_. To
contribute you will need to sign up for a `free GitHub account
<https://github.com/signup/free>`_. We use `Git <http://git-scm.com/>`_ for
version control to allow many people to work together on the project.
@@ -103,12 +103,12 @@ Forking
-------
You will need your own fork to work on the code. Go to the `pandas project
-page <https://github.com/pydata/pandas>`_ and hit the ``Fork`` button. You will
+page <https://github.com/pandas-dev/pandas>`_ and hit the ``Fork`` button. You will
want to clone your fork to your machine::
git clone git@github.com:your-user-name/pandas.git pandas-yourname
cd pandas-yourname
- git remote add upstream git://github.com/pydata/pandas.git
+ git remote add upstream git://github.com/pandas-dev/pandas.git
This creates the directory `pandas-yourname` and connects your repository to
the upstream (main project) *pandas* repository.
@@ -467,7 +467,7 @@ and make these changes with::
pep8radius master --diff --in-place
Additional standards are outlined on the `code style wiki
-page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
+page <https://github.com/pandas-dev/pandas/wiki/Code-Style-and-Conventions>`_.
Please try to maintain backward compatibility. *pandas* has lots of users with lots of
existing code, so don't break it if at all possible. If you think breakage is required,
@@ -501,7 +501,7 @@ All tests should go into the ``tests`` subdirectory of the specific package.
This folder contains many current examples of tests, and we suggest looking to these for
inspiration. If your test requires working with files or
network connectivity, there is more information on the `testing page
-<https://github.com/pydata/pandas/wiki/Testing>`_ of the wiki.
+<https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki.
The ``pandas.util.testing`` module has many special ``assert`` functions that
make it easier to make statements about whether Series or DataFrame objects are
@@ -639,7 +639,7 @@ on Travis-CI. The first step is to create a `service account
Integration tests for ``pandas.io.gbq`` are skipped in pull requests because
the credentials that are required for running Google BigQuery integration
tests are `encrypted <https://docs.travis-ci.com/user/encrypting-files/>`__
-on Travis-CI and are only accessible from the pydata/pandas repository. The
+on Travis-CI and are only accessible from the pandas-dev/pandas repository. The
credentials won't be available on forks of pandas. Here are the steps to run
gbq integration tests on a forked repository:
@@ -688,7 +688,7 @@ performance regressions.
You can run specific benchmarks using the ``-r`` flag, which takes a regular expression.
-See the `performance testing wiki <https://github.com/pydata/pandas/wiki/Performance-Testing>`_ for information
+See the `performance testing wiki <https://github.com/pandas-dev/pandas/wiki/Performance-Testing>`_ for information
on how to write a benchmark.
Documenting your code
@@ -712,8 +712,8 @@ directive is used. The sphinx syntax for that is:
This will put the text *New in version 0.17.0* wherever you put the sphinx
directive. This should also be put in the docstring when adding a new function
-or method (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
-or a new keyword argument (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
+or method (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
+or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
Contributing your changes to *pandas*
=====================================
@@ -806,8 +806,8 @@ like::
origin git@github.com:yourname/pandas.git (fetch)
origin git@github.com:yourname/pandas.git (push)
- upstream git://github.com/pydata/pandas.git (fetch)
- upstream git://github.com/pydata/pandas.git (push)
+ upstream git://github.com/pandas-dev/pandas.git (fetch)
+ upstream git://github.com/pandas-dev/pandas.git (push)
Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to
happen, a pull request needs to be submitted on GitHub.
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 38a816060e1bc..a4ba21d495790 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -200,7 +200,7 @@ The :ref:`indexing <indexing>` docs.
df[(df.AAA <= 6) & (df.index.isin([0,2,4]))]
`Use loc for label-oriented slicing and iloc positional slicing
-<https://github.com/pydata/pandas/issues/2904>`__
+<https://github.com/pandas-dev/pandas/issues/2904>`__
.. ipython:: python
@@ -410,7 +410,7 @@ Sorting
df.sort_values(by=('Labs', 'II'), ascending=False)
`Partial Selection, the need for sortedness;
-<https://github.com/pydata/pandas/issues/2995>`__
+<https://github.com/pandas-dev/pandas/issues/2995>`__
Levels
******
@@ -787,7 +787,7 @@ The :ref:`Resample <timeseries.resampling>` docs.
<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__
`Using TimeGrouper and another grouping to create subgroups, then apply a custom function
-<https://github.com/pydata/pandas/issues/3791>`__
+<https://github.com/pandas-dev/pandas/issues/3791>`__
`Resampling with custom periods
<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__
@@ -823,7 +823,7 @@ ignore_index is needed in pandas < v0.13, and depending on df construction
df = df1.append(df2,ignore_index=True); df
`Self Join of a DataFrame
-<https://github.com/pydata/pandas/issues/2996>`__
+<https://github.com/pandas-dev/pandas/issues/2996>`__
.. ipython:: python
@@ -936,7 +936,7 @@ using that handle to read.
<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
`Dealing with bad lines
-<http://github.com/pydata/pandas/issues/2886>`__
+<http://github.com/pandas-dev/pandas/issues/2886>`__
`Dealing with bad lines II
<http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/>`__
@@ -1075,7 +1075,7 @@ The :ref:`HDFStores <io.hdf5>` docs
<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
`Managing heterogeneous data using a linked multiple table hierarchy
-<http://github.com/pydata/pandas/issues/3032>`__
+<http://github.com/pandas-dev/pandas/issues/3032>`__
`Merging on-disk tables with millions of rows
<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__
@@ -1216,7 +1216,7 @@ Timedeltas
The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
`Using timedeltas
-<http://github.com/pydata/pandas/pull/2899>`__
+<http://github.com/pandas-dev/pandas/pull/2899>`__
.. ipython:: python
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 17ebd1f163f4f..d42d1a9091421 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -143,7 +143,7 @@ both "column wise min/max and global min/max coloring."
API
-----
-`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
+`pandas-datareader <https://github.com/pandas-dev/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.org/en/latest/>`_:
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 99d7486cde2d0..cfac5c257184d 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -391,7 +391,7 @@ This is because ``reindex_like`` silently inserts ``NaNs`` and the ``dtype``
changes accordingly. This can cause some issues when using ``numpy`` ``ufuncs``
such as ``numpy.logical_and``.
-See the `this old issue <https://github.com/pydata/pandas/issues/2388>`__ for a more
+See the `this old issue <https://github.com/pandas-dev/pandas/issues/2388>`__ for a more
detailed discussion.
Parsing Dates from Text Files
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6295e6f6cbb68..f1b05d3579e5c 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -13,7 +13,7 @@ This is the recommended installation method for most users.
Instructions for installing from source,
`PyPI <http://pypi.python.org/pypi/pandas>`__, various Linux distributions, or a
-`development version <http://github.com/pydata/pandas>`__ are also provided.
+`development version <http://github.com/pandas-dev/pandas>`__ are also provided.
Python version support
----------------------
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c07cfe4cd5574..1a8ccdf7b2d86 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2035,7 +2035,7 @@ You can even pass in an instance of ``StringIO`` if you so desire
that having so many network-accessing functions slows down the documentation
build. If you spot an error or an example that doesn't run, please do not
hesitate to report it over on `pandas GitHub issues page
- <http://www.github.com/pydata/pandas/issues>`__.
+ <http://www.github.com/pandas-dev/pandas/issues>`__.
Read a URL and match a table that contains specific text
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index b1addddc2121d..92caeec319169 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -81,7 +81,7 @@ Getting Support
---------------
The first stop for pandas issues and ideas is the `Github Issue Tracker
-<https://github.com/pydata/pandas/issues>`__. If you have a general question,
+<https://github.com/pandas-dev/pandas/issues>`__. If you have a general question,
pandas community experts can answer through `Stack Overflow
<http://stackoverflow.com/questions/tagged/pandas>`__.
@@ -103,7 +103,7 @@ training, and consulting for pandas.
pandas is only made possible by a group of people around the world like you
who have contributed new code, bug reports, fixes, comments and ideas. A
-complete list can be found `on Github <http://www.github.com/pydata/pandas/contributors>`__.
+complete list can be found `on Github <http://www.github.com/pandas-dev/pandas/contributors>`__.
Development Team
----------------
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
index f3df1ebdf25cb..b487fbc883c72 100644
--- a/doc/source/r_interface.rst
+++ b/doc/source/r_interface.rst
@@ -71,7 +71,7 @@ The ``convert_to_r_matrix`` function can be replaced by the normal
Not all conversion functions in rpy2 are working exactly the same as the
current methods in pandas. If you experience problems or limitations in
comparison to the ones in pandas, please report this at the
- `issue tracker <https://github.com/pydata/pandas/issues>`_.
+ `issue tracker <https://github.com/pandas-dev/pandas/issues>`_.
See also the documentation of the `rpy2 <http://rpy2.bitbucket.org/>`__ project.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7e987fcff31b3..d210065f04459 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -20,7 +20,7 @@ Release Notes
*************
This is the list of changes to pandas between each release. For full details,
-see the commit logs at http://github.com/pydata/pandas
+see the commit logs at http://github.com/pandas-dev/pandas
**What is it**
@@ -33,7 +33,7 @@ analysis / manipulation tool available in any language.
**Where to get it**
-* Source code: http://github.com/pydata/pandas
+* Source code: http://github.com/pandas-dev/pandas
* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
* Documentation: http://pandas.pydata.org
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
index 019aa82fed1aa..e2c713ac8519a 100644
--- a/doc/source/remote_data.rst
+++ b/doc/source/remote_data.rst
@@ -13,7 +13,7 @@ DataReader
The sub-package ``pandas.io.data`` is removed in favor of a separately
installable `pandas-datareader package
-<https://github.com/pydata/pandas-datareader>`_. This will allow the data
+<https://github.com/pandas-dev/pandas-datareader>`_. This will allow the data
modules to be independently updated to your pandas installation. The API for
``pandas-datareader v0.1.1`` is the same as in ``pandas v0.16.1``.
(:issue:`8961`)
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 6e05c3ff0457a..e3b186abe53fc 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -892,7 +892,7 @@ for Fourier series. By coloring these curves differently for each class
it is possible to visualize data clustering. Curves belonging to samples
of the same class will usually be closer together and form larger structures.
-**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__.
.. ipython:: python
@@ -1044,7 +1044,7 @@ forces acting on our sample are at an equilibrium) is where a dot representing
our sample will be drawn. Depending on which class that sample belongs it will
be colored differently.
-**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__.
.. ipython:: python
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 990018f2f7f3b..1b8930dcae0f1 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -392,7 +392,7 @@ def __reduce__(self): # optional, for pickle support
return type(self), args, None, None, list(self.items())
-# https://github.com/pydata/pandas/pull/9123
+# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 72fbc3906cafb..f480eae2dd04d 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -1693,11 +1693,11 @@ def test_result_types(self):
self.check_result_type(np.float64, np.float64)
def test_result_types2(self):
- # xref https://github.com/pydata/pandas/issues/12293
+ # xref https://github.com/pandas-dev/pandas/issues/12293
raise nose.SkipTest("unreliable tests on complex128")
# Did not test complex64 because DataFrame is converting it to
- # complex128. Due to https://github.com/pydata/pandas/issues/10952
+ # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
self.check_result_type(np.complex128, np.complex128)
def test_undefined_func(self):
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index db48f2a46eaf3..9efaff6060909 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1681,7 +1681,7 @@ def __setitem__(self, key, value):
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
- # https://github.com/pydata/pandas/issues/6168
+ # https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
@@ -1690,7 +1690,7 @@ def __setitem__(self, key, value):
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
- # https://github.com/pydata/pandas/issues/7820
+ # https://github.com/pandas-dev/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
diff --git a/pandas/io/data.py b/pandas/io/data.py
index e76790a6ab98b..09c7aef0cde1a 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -1,6 +1,6 @@
raise ImportError(
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
+ "(https://github.com/pandas-dev/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.")
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index d6f8660f20ef6..8038cc500f6cd 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -236,7 +236,7 @@ def get_user_account_credentials(self):
return credentials
def get_service_account_credentials(self):
- # Bug fix for https://github.com/pydata/pandas/issues/12572
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
diff --git a/pandas/io/tests/json/test_pandas.py b/pandas/io/tests/json/test_pandas.py
index 47bdd25572fc7..ffac5d5f4746e 100644
--- a/pandas/io/tests/json/test_pandas.py
+++ b/pandas/io/tests/json/test_pandas.py
@@ -767,7 +767,7 @@ def test_round_trip_exception_(self):
@network
def test_url(self):
- url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
+ url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
self.assertEqual(result[c].dtype, 'datetime64[ns]')
diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py
index 0b59b695e1dca..0219e16391be8 100644
--- a/pandas/io/tests/parser/common.py
+++ b/pandas/io/tests/parser/common.py
@@ -629,7 +629,7 @@ def test_read_csv_parse_simple_list(self):
@tm.network
def test_url(self):
# HTTP(S)
- url = ('https://raw.github.com/pydata/pandas/master/'
+ url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/io/tests/parser/data/salary.table.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
diff --git a/pandas/io/tests/parser/test_network.py b/pandas/io/tests/parser/test_network.py
index 8b8a6de36fc03..7e2f039853e2f 100644
--- a/pandas/io/tests/parser/test_network.py
+++ b/pandas/io/tests/parser/test_network.py
@@ -23,7 +23,7 @@ def setUp(self):
@tm.network
def test_url_gz(self):
- url = ('https://raw.github.com/pydata/pandas/'
+ url = ('https://raw.github.com/pandas-dev/pandas/'
'master/pandas/io/tests/parser/data/salary.table.gz')
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index d163b05aa01d4..998e71076b7c0 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -543,7 +543,7 @@ def test_read_xlrd_Book(self):
@tm.network
def test_read_from_http_url(self):
- url = ('https://raw.github.com/pydata/pandas/master/'
+ url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/io/tests/data/test1' + self.ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1')
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index 0ea4b5204e150..cca1580b84195 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -150,7 +150,7 @@ def _test_imports():
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
- # Bug fix for https://github.com/pydata/pandas/issues/12572
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
@@ -651,7 +651,7 @@ def test_download_dataset_larger_than_200k_rows(self):
self.assertEqual(len(df.drop_duplicates()), test_size)
def test_zero_rows(self):
- # Bug fix for https://github.com/pydata/pandas/issues/10273
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/10273
df = gbq.read_gbq("SELECT title, id "
"FROM [publicdata:samples.wikipedia] "
"WHERE timestamp=-9999999",
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index cf61ad9a35935..91042775ba19d 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -544,7 +544,7 @@ def test_sparse_frame(self):
class TestCompression(TestPackers):
- """See https://github.com/pydata/pandas/pull/9783
+ """See https://github.com/pandas-dev/pandas/pull/9783
"""
def setUp(self):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 198a4017b5af7..af8989baabbc0 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -1610,7 +1610,7 @@ def test_double_precision(self):
def test_connectable_issue_example(self):
# This tests the example raised in issue
- # https://github.com/pydata/pandas/issues/10104
+ # https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
index 5dc4d9ce1adc4..2183290c7e074 100644
--- a/pandas/io/wb.py
+++ b/pandas/io/wb.py
@@ -1,6 +1,6 @@
raise ImportError(
"The pandas.io.wb module is moved to a separate package "
"(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
+ "(https://github.com/pandas-dev/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.")
diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/formats/test_style.py
index 3083750e582fc..2fec04b9c1aa3 100644
--- a/pandas/tests/formats/test_style.py
+++ b/pandas/tests/formats/test_style.py
@@ -144,7 +144,7 @@ def test_set_properties_subset(self):
self.assertEqual(result, expected)
def test_empty_index_name_doesnt_display(self):
- # https://github.com/pydata/pandas/pull/12090#issuecomment-180695902
+ # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
@@ -175,7 +175,7 @@ def test_empty_index_name_doesnt_display(self):
self.assertEqual(result['head'], expected)
def test_index_name(self):
- # https://github.com/pydata/pandas/issues/11655
+ # https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
@@ -195,7 +195,7 @@ def test_index_name(self):
self.assertEqual(result['head'], expected)
def test_multiindex_name(self):
- # https://github.com/pydata/pandas/issues/11655
+ # https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
@@ -217,7 +217,7 @@ def test_multiindex_name(self):
self.assertEqual(result['head'], expected)
def test_numeric_columns(self):
- # https://github.com/pydata/pandas/issues/12125
+ # https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 2cb62a60f885b..9ef2802cb950f 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -392,7 +392,7 @@ def test_boolean_selection(self):
def test_indexing_with_category(self):
- # https://github.com/pydata/pandas/issues/12564
+ # https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 333792c5ffdb2..0916693ade2ce 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -100,7 +100,7 @@ def test_boxplot_return_type_none(self):
@slow
def test_boxplot_return_type_legacy(self):
- # API change in https://github.com/pydata/pandas/pull/7096
+ # API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(randn(6, 4),
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 4d0c1e9213b17..87cf89ebf0a9d 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -84,7 +84,7 @@ def test_plot(self):
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
- # present). see: https://github.com/pydata/pandas/issues/9737
+ # present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
@@ -927,7 +927,7 @@ def test_plot_scatter_with_c(self):
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
- # See https://github.com/pydata/pandas/issues/8852 for bug report
+ # See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
@@ -2115,7 +2115,7 @@ def test_pie_df_nan(self):
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
- # see https://github.com/pydata/pandas/issues/8390
+ # see https://github.com/pandas-dev/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i + 1:])
@@ -2336,9 +2336,9 @@ def _check_errorbar_color(containers, expected, has_err='has_xerr'):
@slow
def test_sharex_and_ax(self):
- # https://github.com/pydata/pandas/issues/9737 using gridspec, the axis
- # in fig.get_axis() are sorted differently than pandas expected them,
- # so make sure that only the right ones are removed
+ # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
+ # the axis in fig.get_axis() are sorted differently than pandas
+ # expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
@@ -2388,9 +2388,9 @@ def _check(axes):
@slow
def test_sharey_and_ax(self):
- # https://github.com/pydata/pandas/issues/9737 using gridspec, the axis
- # in fig.get_axis() are sorted differently than pandas expected them,
- # so make sure that only the right ones are removed
+ # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
+ # the axis in fig.get_axis() are sorted differently than pandas
+ # expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 8f2ab0ed28839..ed441f2f85572 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -273,7 +273,7 @@ def f():
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index f688ec2d43789..086946d05d7a6 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1412,7 +1412,7 @@ def tester(a, b):
# NotImplemented
# this is an alignment issue; these are equivalent
- # https://github.com/pydata/pandas/issues/5284
+ # https://github.com/pandas-dev/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 092e02ee261a0..f89f41abd0d35 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -595,7 +595,7 @@ def test_categorical_zeroes(self):
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
- # https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
+ # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=True),
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index a494a0d53b123..f01fff035a3c5 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -191,7 +191,7 @@ def f():
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
- # https://github.com/pydata/pandas/issues/3678
+ # https://github.com/pandas-dev/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
@@ -618,7 +618,7 @@ def test_describe(self):
index=exp_index)
tm.assert_frame_equal(desc, expected)
- # https://github.com/pydata/pandas/issues/3678
+ # https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
@@ -1547,7 +1547,7 @@ def test_memory_usage(self):
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
- # https://github.com/pydata/pandas/issues/8420
+ # https://github.com/pandas-dev/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
@@ -1633,7 +1633,7 @@ def test_reflected_comparison_with_scalars(self):
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
- # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
@@ -3829,7 +3829,7 @@ def f():
self.assertRaises(TypeError, f)
- # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
@@ -4303,14 +4303,14 @@ def test_cat_accessor_api(self):
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
- # https://github.com/pydata/pandas/issues/10661
+ # https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
@@ -4385,7 +4385,7 @@ def test_str_accessor_api_for_categorical(self):
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
- # https://github.com/pydata/pandas/issues/10661
+ # https://github.com/pandas-dev/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 62ad4c5aa4338..ea226851c9101 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -427,7 +427,7 @@ def f3(key):
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
- # See https://github.com/pydata/pandas/issues/8514
+ # See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 01c1d48c6d5c0..02917ab18c29f 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -6443,7 +6443,7 @@ def test_transform_doesnt_clobber_ints(self):
def test_groupby_categorical_two_columns(self):
- # https://github.com/pydata/pandas/issues/8138
+ # https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 4019bbe20ea1a..9a3505c3421e0 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2604,7 +2604,7 @@ def test_cat_on_filtered_index(self):
self.assertEqual(str_multiple.loc[1], '2011 2 2')
def test_str_cat_raises_intuitive_error(self):
- # https://github.com/pydata/pandas/issues/11334
+ # https://github.com/pandas-dev/pandas/issues/11334
s = Series(['a', 'b', 'c', 'd'])
message = "Did you mean to supply a `sep` keyword?"
with tm.assertRaisesRegexp(ValueError, message):
@@ -2661,7 +2661,7 @@ def test_index_str_accessor_visibility(self):
idx.str
def test_str_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
s = Series(list('aabbcde'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7fd0b1044f9d7..d46dc4d355b4c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -722,7 +722,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
- >>> df = read_csv('https://raw.github.com/pydata/pandas/master'
+ >>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270',
'#4ECDC4', '#C7F464'))
@@ -2773,7 +2773,7 @@ def plot_group(keys, values, ax):
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
- # https://github.com/pydata/pandas/pull/12216#issuecomment-241175580
+ # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index f1a209053445a..d02c403cb3c66 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -1281,7 +1281,7 @@ def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
- # See https://github.com/pydata/pandas/issues/8683
+ # See https://github.com/pandas-dev/pandas/issues/8683
first_tzinfo = first.tzinfo
first = first.tz_localize(None)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index b3da62c8d2db5..1735ac4e2efa5 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -4606,7 +4606,7 @@ def test_parse_time_string(self):
self.assertEqual(reso, reso_lower)
def test_parse_time_quarter_w_dash(self):
- # https://github.com/pydata/pandas/issue/9688
+ # https://github.com/pandas-dev/pandas/issue/9688
pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988'), ]
for dashed, normal in pairs:
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 204808dd510a0..9d3d27f3224b4 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1678,7 +1678,7 @@ def test_resample_anchored_multiday(self):
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
- # See: https://github.com/pydata/pandas/issues/8683
+ # See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index a85a606075911..714a596406c03 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -903,7 +903,7 @@ def test_utc_with_system_utc(self):
def test_tz_convert_hour_overflow_dst(self):
# Regression test for:
- # https://github.com/pydata/pandas/issues/13306
+ # https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
@@ -943,7 +943,7 @@ def test_tz_convert_hour_overflow_dst(self):
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
- # https://github.com/pydata/pandas/issues/13306
+ # https://github.com/pandas-dev/pandas/issues/13306
tz = self.tzstr('US/Eastern')
@@ -985,7 +985,7 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
- # See https://github.com/pydata/pandas/issues/4496 for details.
+ # See https://github.com/pandas-dev/pandas/issues/4496 for details.
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py
index f00273bc75199..49273bacccf98 100755
--- a/scripts/find_undoc_args.py
+++ b/scripts/find_undoc_args.py
@@ -19,7 +19,7 @@
parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True,
help='name of package to import and examine',action='store')
parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False,
- help='github project where the the code lives, e.g. "pydata/pandas"',
+ help='github project where the the code lives, e.g. "pandas-dev/pandas"',
default=None,action='store')
args = parser.parse_args()
diff --git a/scripts/gen_release_notes.py b/scripts/gen_release_notes.py
index 02ba4f57c189d..7e4ffca59a0ab 100644
--- a/scripts/gen_release_notes.py
+++ b/scripts/gen_release_notes.py
@@ -46,7 +46,7 @@ def get_issues():
def _get_page(page_number):
- gh_url = ('https://api.github.com/repos/pydata/pandas/issues?'
+ gh_url = ('https://api.github.com/repos/pandas-dev/pandas/issues?'
'milestone=*&state=closed&assignee=*&page=%d') % page_number
with urlopen(gh_url) as resp:
rs = resp.readlines()[0]
diff --git a/scripts/touchup_gh_issues.py b/scripts/touchup_gh_issues.py
index 96ee220f55a02..8aa6d426156f0 100755
--- a/scripts/touchup_gh_issues.py
+++ b/scripts/touchup_gh_issues.py
@@ -14,7 +14,7 @@
pat = "((?:\s*GH\s*)?)#(\d{3,4})([^_]|$)?"
rep_pat = r"\1GH\2_\3"
-anchor_pat = ".. _GH{id}: https://github.com/pydata/pandas/issues/{id}"
+anchor_pat = ".. _GH{id}: https://github.com/pandas-dev/pandas/issues/{id}"
section_pat = "^pandas\s[\d\.]+\s*$"
diff --git a/vb_suite/perf_HEAD.py b/vb_suite/perf_HEAD.py
index c14a1795f01e0..143d943b9eadf 100755
--- a/vb_suite/perf_HEAD.py
+++ b/vb_suite/perf_HEAD.py
@@ -192,7 +192,7 @@ def get_build_results(build):
return convert_json_to_df(r_url)
-def get_all_results(repo_id=53976): # travis pydata/pandas id
+def get_all_results(repo_id=53976): # travis pandas-dev/pandas id
"""Fetches the VBENCH results for all travis builds, and returns a list of result df
unsuccesful individual vbenches are dropped.
diff --git a/vb_suite/suite.py b/vb_suite/suite.py
index 70a6278c0852d..45053b6610896 100644
--- a/vb_suite/suite.py
+++ b/vb_suite/suite.py
@@ -67,7 +67,7 @@
TMP_DIR = config.get('setup', 'tmp_dir')
except:
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
- REPO_URL = 'git@github.com:pydata/pandas.git'
+ REPO_URL = 'git@github.com:pandas-dev/pandas.git'
DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db')
TMP_DIR = os.path.join(HOME, 'tmp/vb_pandas')
@@ -138,7 +138,7 @@ def generate_rst_files(benchmarks):
The ``.pandas_vb_common`` setup script can be found here_
-.. _here: https://github.com/pydata/pandas/tree/master/vb_suite
+.. _here: https://github.com/pandas-dev/pandas/tree/master/vb_suite
Produced on a machine with
| as main repo was updated
| https://api.github.com/repos/pandas-dev/pandas/pulls/14409 | 2016-10-12T23:46:34Z | 2016-10-13T19:59:21Z | 2016-10-13T19:59:21Z | 2016-10-13T19:59:31Z |
Convert readthedocs links for their .org -> .io migration for hosted projects | diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 352acee23df2d..cf604822d6eea 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -278,7 +278,7 @@ Please try to maintain backward compatibility. *pandas* has lots of users with l
Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore, it is worth getting in the habit of writing tests ahead of time so this is never an issue.
-Like many packages, *pandas* uses the [Nose testing system](http://nose.readthedocs.org/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
+Like many packages, *pandas* uses the [Nose testing system](https://nose.readthedocs.io/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
#### Writing tests
@@ -323,7 +323,7 @@ Performance matters and it is worth considering whether your code has introduced
>
> The asv benchmark suite was translated from the previous framework, vbench, so many stylistic issues are likely a result of automated transformation of the code.
-To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](http://asv.readthedocs.org/en/latest/installing.html).
+To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](https://asv.readthedocs.io/en/latest/installing.html).
To install asv:
@@ -360,7 +360,7 @@ This command is equivalent to:
This will launch every test only once, display stderr from the benchmarks, and use your local `python` that comes from your `$PATH`.
-Information on how to write a benchmark can be found in the [asv documentation](http://asv.readthedocs.org/en/latest/writing_benchmarks.html).
+Information on how to write a benchmark can be found in the [asv documentation](https://asv.readthedocs.io/en/latest/writing_benchmarks.html).
#### Running the vbench performance test suite (phasing out)
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fd3a2493a53e8..4f916c6ba5290 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -295,7 +295,7 @@
'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
- 'py': ('http://pylib.readthedocs.org/en/latest/', None)
+ 'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7f336abcaa6d7..446a40a7ec4b4 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -360,7 +360,7 @@ follow the Numpy Docstring Standard (see above), but you don't need to install
this because a local copy of numpydoc is included in the *pandas* source
code.
`nbconvert <https://nbconvert.readthedocs.io/en/latest/>`_ and
-`nbformat <http://nbformat.readthedocs.io/en/latest/>`_ are required to build
+`nbformat <https://nbformat.readthedocs.io/en/latest/>`_ are required to build
the Jupyter notebooks included in the documentation.
If you have a conda environment named ``pandas_dev``, you can install the extra
@@ -490,7 +490,7 @@ Adding tests is one of the most common requests after code is pushed to *pandas*
it is worth getting in the habit of writing tests ahead of time so this is never an issue.
Like many packages, *pandas* uses the `Nose testing system
-<http://nose.readthedocs.org/en/latest/index.html>`_ and the convenient
+<https://nose.readthedocs.io/en/latest/index.html>`_ and the convenient
extensions in `numpy.testing
<http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
@@ -569,7 +569,7 @@ supports both python2 and python3.
To use all features of asv, you will need either ``conda`` or
``virtualenv``. For more details please check the `asv installation
-webpage <http://asv.readthedocs.org/en/latest/installing.html>`_.
+webpage <https://asv.readthedocs.io/en/latest/installing.html>`_.
To install asv::
@@ -624,7 +624,7 @@ This will display stderr from the benchmarks, and use your local
``python`` that comes from your ``$PATH``.
Information on how to write a benchmark and how to use asv can be found in the
-`asv documentation <http://asv.readthedocs.org/en/latest/writing_benchmarks.html>`_.
+`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_.
.. _contributing.gbq_integration_tests:
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 38a816060e1bc..27462a08b0011 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -877,7 +877,7 @@ The :ref:`Plotting <visualization>` docs.
<http://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot>`__
`Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter
-<http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__
+<https://pandas-xlsxwriter-charts.readthedocs.io/>`__
`Boxplot for each quartile of a stratifying variable
<http://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas>`__
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 17ebd1f163f4f..087b265ee83f2 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -145,7 +145,7 @@ API
`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.org/en/latest/>`_:
+``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.io/en/latest/>`_:
The following data feeds are available:
@@ -170,7 +170,7 @@ PyDatastream is a Python interface to the
SOAP API to return indexed Pandas DataFrames or Panels with financial data.
This package requires valid credentials for this API (non free).
-`pandaSDMX <http://pandasdmx.readthedocs.org>`__
+`pandaSDMX <https://pandasdmx.readthedocs.io>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pandaSDMX is an extensible library to retrieve and acquire statistical data
and metadata disseminated in
@@ -215,7 +215,7 @@ dimensional arrays, rather than the tabular data for which pandas excels.
Out-of-core
-------------
-`Dask <https://dask.readthedocs.org/en/latest/>`__
+`Dask <https://dask.readthedocs.io/en/latest/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dask is a flexible parallel computing library for analytics. Dask
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6295e6f6cbb68..73685e0be8e7e 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -189,7 +189,7 @@ pandas is equipped with an exhaustive set of unit tests covering about 97% of
the codebase as of this writing. To run it on your machine to verify that
everything is working (and you have all of the dependencies, soft and hard,
installed), make sure you have `nose
-<http://readthedocs.org/docs/nose/en/latest/>`__ and run:
+<https://nose.readthedocs.io/en/latest/>`__ and run:
::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c07cfe4cd5574..811fca4344121 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2639,8 +2639,8 @@ config options <options>` ``io.excel.xlsx.writer`` and
``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx``
files if `Xlsxwriter`_ is not available.
-.. _XlsxWriter: http://xlsxwriter.readthedocs.org
-.. _openpyxl: http://openpyxl.readthedocs.org/
+.. _XlsxWriter: https://xlsxwriter.readthedocs.io
+.. _openpyxl: https://openpyxl.readthedocs.io/
.. _xlwt: http://www.python-excel.org
To specify which writer you want to use, you can pass an engine keyword
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
index f3df1ebdf25cb..bde97d88a0ee7 100644
--- a/doc/source/r_interface.rst
+++ b/doc/source/r_interface.rst
@@ -17,7 +17,7 @@ rpy2 / R interface
In v0.16.0, the ``pandas.rpy`` interface has been **deprecated and will be
removed in a future version**. Similar functionality can be accessed
- through the `rpy2 <http://rpy2.readthedocs.io/>`__ project.
+ through the `rpy2 <https://rpy2.readthedocs.io/>`__ project.
See the :ref:`updating <rpy.updating>` section for a guide to port your
code from the ``pandas.rpy`` to ``rpy2`` functions.
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst
index e92798ea17448..c25e734a046b2 100644
--- a/doc/source/tutorials.rst
+++ b/doc/source/tutorials.rst
@@ -138,7 +138,7 @@ Modern Pandas
Excel charts with pandas, vincent and xlsxwriter
------------------------------------------------
-- `Using Pandas and XlsxWriter to create Excel charts <http://pandas-xlsxwriter-charts.readthedocs.org/>`_
+- `Using Pandas and XlsxWriter to create Excel charts <https://pandas-xlsxwriter-charts.readthedocs.io/>`_
Various Tutorials
-----------------
diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt
index a91e0ab9e4961..181cd401c85d6 100644
--- a/doc/source/whatsnew/v0.14.0.txt
+++ b/doc/source/whatsnew/v0.14.0.txt
@@ -401,7 +401,7 @@ through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`).
All databases supported by SQLAlchemy can be used, such
as PostgreSQL, MySQL, Oracle, Microsoft SQL server (see documentation of
SQLAlchemy on `included dialects
-<http://sqlalchemy.readthedocs.org/en/latest/dialects/index.html>`_).
+<https://sqlalchemy.readthedocs.io/en/latest/dialects/index.html>`_).
The functionality of providing DBAPI connection objects will only be supported
for sqlite3 in the future. The ``'mysql'`` flavor is deprecated.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fc13224d3fe6e..9cb299593076d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -141,7 +141,7 @@ as well as the ``.sum()`` operation.
Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library.
-.. _dask: https://dask.readthedocs.org/en/latest/
+.. _dask: https://dask.readthedocs.io/en/latest/
.. _QT: https://wiki.python.org/moin/PyQt
.. _whatsnew_0170.plot:
| As per [their blog post of the 27th April](https://blog.readthedocs.com/securing-subdomains/) ‘Securing subdomains’:
> Starting today, Read the Docs will start hosting projects from subdomains on the domain readthedocs.io, instead of on readthedocs.org. This change addresses some security concerns around site cookies while hosting user generated data on the same domain as our dashboard.
Test Plan: Manually visited all the links I’ve modified.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14406 | 2016-10-12T21:40:28Z | 2016-10-12T22:10:31Z | 2016-10-12T22:10:31Z | 2016-10-12T22:10:37Z |
BUG: Bug in localizing an ambiguous timezone when a boolean is passed | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..0f7edc7a99abc 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -35,7 +35,7 @@ Bug Fixes
-
+- Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index a85a606075911..c7e4f03fcd792 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -17,7 +17,8 @@
from pytz import NonExistentTimeError
import pandas.util.testing as tm
-from pandas.util.testing import assert_frame_equal, set_timezone
+from pandas.util.testing import (assert_frame_equal, assert_series_equal,
+ set_timezone)
from pandas.compat import lrange, zip
try:
@@ -535,6 +536,44 @@ def test_ambiguous_nat(self):
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
self.assert_numpy_array_equal(di_test.values, localized.values)
+ def test_ambiguous_bool(self):
+ # make sure that we are correctly accepting bool values as ambiguous
+
+ # gh-14402
+ t = Timestamp('2015-11-01 01:00:03')
+ expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
+ expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
+
+ def f():
+ t.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = t.tz_localize('US/Central', ambiguous=True)
+ self.assertEqual(result, expected0)
+
+ result = t.tz_localize('US/Central', ambiguous=False)
+ self.assertEqual(result, expected1)
+
+ s = Series([t])
+ expected0 = Series([expected0])
+ expected1 = Series([expected1])
+
+ def f():
+ s.dt.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=True)
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[True])
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=False)
+ assert_series_equal(result, expected1)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[False])
+ assert_series_equal(result, expected1)
+
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
@@ -629,14 +668,14 @@ def test_localized_at_time_between_time(self):
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
def test_string_index_alias_tz_aware(self):
@@ -723,7 +762,7 @@ def test_frame_no_datetime64_dtype(self):
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
@@ -1324,7 +1363,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
@@ -1336,7 +1375,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
utc = rng1.tz
self.assertEqual(utc, ts_result.index.tz)
@@ -1352,7 +1391,7 @@ def test_append_aware(self):
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
@@ -1368,7 +1407,7 @@ def test_append_dst(self):
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
- tm.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
def test_append_aware_naive(self):
@@ -1429,7 +1468,7 @@ def test_arith_utc_convert(self):
expected = uts1 + uts2
self.assertEqual(result.index.tz, pytz.UTC)
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 9073ad0abd535..bab45595cd60f 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4155,6 +4155,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
"""
cdef:
ndarray[int64_t] trans, deltas, idx_shifted
+ ndarray ambiguous_array
Py_ssize_t i, idx, pos, ntrans, n = len(vals)
int64_t *tdata
int64_t v, left, right
@@ -4190,11 +4191,18 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
infer_dst = True
elif ambiguous == 'NaT':
fill = True
+ elif isinstance(ambiguous, bool):
+ is_dst = True
+ if ambiguous:
+ ambiguous_array = np.ones(len(vals), dtype=bool)
+ else:
+ ambiguous_array = np.zeros(len(vals), dtype=bool)
elif hasattr(ambiguous, '__iter__'):
is_dst = True
if len(ambiguous) != len(vals):
raise ValueError(
"Length of ambiguous bool-array must be the same size as vals")
+ ambiguous_array = np.asarray(ambiguous)
trans, deltas, typ = _get_dst_info(tz)
@@ -4286,7 +4294,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
if infer_dst and dst_hours[i] != NPY_NAT:
result[i] = dst_hours[i]
elif is_dst:
- if ambiguous[i]:
+ if ambiguous_array[i]:
result[i] = left
else:
result[i] = right
| closes #14402
```
In [1]: s = pd.Series([Timestamp('2015-11-01 01:00:03')])
In [2]: s
Out[2]:
0 2015-11-01 01:00:03
dtype: datetime64[ns]
In [3]: s.dt.tz_localize('US/Central', ambiguous=False)
Out[3]:
0 2015-11-01 01:00:03-06:00
dtype: datetime64[ns, US/Central]
In [4]: s.dt.tz_localize('US/Central', ambiguous=True)
Out[4]:
0 2015-11-01 01:00:03-05:00
dtype: datetime64[ns, US/Central]
In [5]: s.dt.tz_localize('US/Central')
AmbiguousTimeError: Cannot infer dst time from Timestamp('2015-11-01 01:00:03'), try using the 'ambiguous' argument
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14405 | 2016-10-12T21:37:07Z | 2016-10-13T10:14:19Z | null | 2016-10-13T10:14:19Z |
Bug: Made it so that 0 was included in uint8 | diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index fec56328c1721..b50bf9dc448bc 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -205,7 +205,7 @@ def to_numeric(arg, errors='raise', downcast=None):
if downcast in ('integer', 'signed'):
typecodes = np.typecodes['Integer']
- elif downcast == 'unsigned' and np.min(values) > 0:
+ elif downcast == 'unsigned' and np.min(values) >= 0:
typecodes = np.typecodes['UnsignedInteger']
elif downcast == 'float':
typecodes = np.typecodes['Float']
| - [ ] closes #14401
- [ ] tests added / passed
- [ ] passes git diff upstream/master | flake8 --diff
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14404 | 2016-10-12T21:17:48Z | 2016-10-13T04:23:00Z | null | 2016-10-13T04:23:00Z |
BUG: Dataframe constructor when given dict with None value | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..6dddebecd06e8 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -32,6 +32,7 @@ Bug Fixes
~~~~~~~~~
+- Bug in ``pd.DataFrame`` where constructor fails when given dict with ``None`` value (:issue:`14381`)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1c6b13885dd01..188204d83d985 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2915,8 +2915,8 @@ def create_from_value(value, index, dtype):
return subarr
- # scalar like
- if subarr.ndim == 0:
+ # scalar like, GH
+ if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d21db5ba52a45..e55ba3e161ed9 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -259,6 +259,14 @@ def test_constructor_dict(self):
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assert_index_equal(frame.index, Index([], dtype=np.int64))
+ # GH 14381
+ # Dict with None value
+ frame_none = DataFrame(dict(a=None), index=[0])
+ frame_none_list = DataFrame(dict(a=[None]), index=[0])
+ tm.assert_equal(frame_none.get_value(0, 'a'), None)
+ tm.assert_equal(frame_none_list.get_value(0, 'a'), None)
+ tm.assert_frame_equal(frame_none, frame_none_list)
+
# GH10856
# dict with scalar values should raise error, even if columns passed
with tm.assertRaises(ValueError):
| - [x] closes #14381
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14392 | 2016-10-11T01:38:12Z | 2016-10-31T20:53:51Z | 2016-10-31T20:53:51Z | 2016-10-31T20:54:03Z |
[BUG] handle } in line delimited json | diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py
index 3f80c4c0c6338..5419571c75b43 100644
--- a/asv_bench/benchmarks/packers.py
+++ b/asv_bench/benchmarks/packers.py
@@ -547,6 +547,31 @@ def remove(self, f):
pass
+class packers_write_json_lines(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.f = '__test__.msg'
+ self.N = 100000
+ self.C = 5
+ self.index = date_range('20000101', periods=self.N, freq='H')
+ self.df = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
+ self.remove(self.f)
+ self.df.index = np.arange(self.N)
+
+ def time_packers_write_json_lines(self):
+ self.df.to_json(self.f, orient="records", lines=True)
+
+ def teardown(self):
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
+
class packers_write_json_T(object):
goal_time = 0.2
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..843dc980d420c 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -45,3 +45,4 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
+- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
diff --git a/pandas/io/json.py b/pandas/io/json.py
index e697351484f68..66a8e76c09a6f 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -607,14 +607,19 @@ def _convert_to_line_delimits(s):
s = s[1:-1]
num_open_brackets_seen = 0
commas_to_replace = []
+ in_quotes = False
for idx, char in enumerate(s): # iter through to find all
- if char == ',': # commas that should be \n
- if num_open_brackets_seen == 0:
+ if char == '"' and idx > 0 and s[idx - 1] != '\\':
+ in_quotes = ~in_quotes
+ elif char == ',': # commas that should be \n
+ if num_open_brackets_seen == 0 and not in_quotes:
commas_to_replace.append(idx)
elif char == '{':
- num_open_brackets_seen += 1
+ if not in_quotes:
+ num_open_brackets_seen += 1
elif char == '}':
- num_open_brackets_seen -= 1
+ if not in_quotes:
+ num_open_brackets_seen -= 1
s_arr = np.array(list(s)) # Turn to an array to set
s_arr[commas_to_replace] = '\n' # all commas at once.
s = ''.join(s_arr)
diff --git a/pandas/io/tests/json/test_pandas.py b/pandas/io/tests/json/test_pandas.py
index 47bdd25572fc7..65311b5160aa7 100644
--- a/pandas/io/tests/json/test_pandas.py
+++ b/pandas/io/tests/json/test_pandas.py
@@ -962,6 +962,12 @@ def test_to_jsonl(self):
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
self.assertEqual(result, expected)
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
+ self.assertEqual(result, expected)
+ assert_frame_equal(pd.read_json(result, lines=True), df)
+
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
| - [x] closes #14390
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14391 | 2016-10-10T23:08:01Z | 2016-10-15T13:38:33Z | null | 2016-10-15T19:18:43Z |
BUG: Allow concat to take string axis names | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..01e9d2ff4ce7f 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -44,4 +44,5 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``pd.concat`` where ``axis`` cannot take string parameters ``'rows'`` or ``'columns'`` (:issue:`14369`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index b7cd8a1c01224..2aa0b39051390 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -347,6 +347,42 @@ def test_concat_named_keys(self):
names=[None, None]))
assert_frame_equal(concatted_unnamed, expected_unnamed)
+ def test_concat_axis_parameter(self):
+ # GH 14369
+ df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2))
+ df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2))
+
+ expected_index = pd.DataFrame(
+ {'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
+ concatted_index = pd.concat([df1, df2], axis='index')
+ assert_frame_equal(concatted_index, expected_index)
+
+ concatted_row = pd.concat([df1, df2], axis='rows')
+ assert_frame_equal(concatted_row, expected_index)
+
+ concatted_0 = pd.concat([df1, df2], axis=0)
+ assert_frame_equal(concatted_0, expected_index)
+
+ expected_columns = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A'])
+ concatted_columns = pd.concat([df1, df2], axis='columns')
+ assert_frame_equal(concatted_columns, expected_columns)
+
+ concatted_1 = pd.concat([df1, df2], axis=1)
+ assert_frame_equal(concatted_1, expected_columns)
+
+ series1 = pd.Series([0.1, 0.2])
+ series2 = pd.Series([0.3, 0.4])
+
+ expected_row_series = pd.Series(
+ [0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+ concatted_row_series = pd.concat([series1, series2], axis='rows')
+ assert_series_equal(concatted_row_series, expected_row_series)
+
+ # A Series has no 'columns' axis
+ with assertRaisesRegexp(ValueError, 'No axis named columns'):
+ pd.concat([series1, series2], axis='columns')
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8c43195f5552..798f268ee4549 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1283,7 +1283,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
- axis : {0, 1, ...}, default 0
+ axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
@@ -1411,6 +1411,10 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
sample = objs[0]
self.objs = objs
+ # Check for string axis parameter
+ if isinstance(axis, str):
+ axis = objs[0]._get_axis_number(axis)
+
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
| Continued in #14416
---
- [x] closes #14369
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
This uses [`_get_axis_number`](https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L327-L338) to convert a string `axis` parameter to an integer. This will allow the `concat` method to use any other aliases given to dataframes axes in the future while not disrupting other uses of `axis` in the `concat` method. If this pattern works well, it could be used for the other [merge functions](https://github.com/pydata/pandas/blob/master/pandas/tools/merge.py) also.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14389 | 2016-10-10T21:58:56Z | 2016-10-13T14:35:45Z | null | 2023-05-11T01:14:18Z |
DOC: Add details to DataFrame groupby transform | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 8484ccd69a983..cbe3588104439 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -580,9 +580,21 @@ Transformation
--------------
The ``transform`` method returns an object that is indexed the same (same size)
-as the one being grouped. Thus, the passed transform function should return a
-result that is the same size as the group chunk. For example, suppose we wished
-to standardize the data within each group:
+as the one being grouped. The transform function must:
+
+* Return a result that is either the same size as the group chunk or
+ broadcastable to the size of the group chunk (e.g., a scalar,
+ ``grouped.transform(lambda x: x.iloc[-1])``).
+* Operate column-by-column on the group chunk. The transform is applied to
+ the first group chunk using chunk.apply.
+* Not perform in-place operations on the group chunk. Group chunks should
+ be treated as immutable, and changes to a group chunk may produce unexpected
+ results. For example, when using ``fillna``, ``inplace`` must be ``False``
+ (``grouped.transform(lambda x: x.fillna(inplace=False))``).
+* (Optionally) operates on the entire group chunk. If this is supported, a
+ fast path is used starting from the *second* chunk.
+
+For example, suppose we wished to standardize the data within each group:
.. ipython:: python
@@ -620,6 +632,21 @@ We can also visually compare the original and transformed data sets.
@savefig groupby_transform_plot.png
compare.plot()
+Transformation functions that have lower dimension outputs are broadcast to
+match the shape of the input array.
+
+.. ipython:: python
+
+ data_range = lambda x: x.max() - x.min()
+ ts.groupby(key).transform(data_range)
+
+Alternatively the built-in methods can be could be used to produce the same
+outputs
+
+.. ipython:: python
+
+ ts.groupby(key).transform('max') - ts.groupby(key).transform('min')
+
Another common data transform is to replace missing data with the group mean.
.. ipython:: python
@@ -664,8 +691,9 @@ and that the transformed data contains no NAs.
.. note::
- Some functions when applied to a groupby object will automatically transform the input, returning
- an object of the same shape as the original. Passing ``as_index=False`` will not affect these transformation methods.
+ Some functions when applied to a groupby object will automatically transform
+ the input, returning an object of the same shape as the original. Passing
+ ``as_index=False`` will not affect these transformation methods.
For example: ``fillna, ffill, bfill, shift``.
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index ba2de295fa0a9..c52ddb8bf7016 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3674,10 +3674,25 @@ def transform(self, func, *args, **kwargs):
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
+ The current implementation imposes three requirements on f:
+
+ * f must return a value that either has the same shape as the input
+ subframe or can be broadcast to the shape of the input subframe.
+ For example, f returns a scalar it will be broadcast to have the
+ same shape as the input subframe.
+ * f must support application column-by-column in the subframe. If f
+ also supports application to the entire subframe, then a fast path
+ is used starting from the second chunk.
+ * f must not mutate subframes. Mutation is not supported and may
+ produce unexpected results.
+
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
+ # Same shape
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
+ # Broadcastable
+ >>> grouped.transform(lambda x: x.max() - x.min())
"""
# optimized transforms
| - [X] closes #13543
- [X] tests added / passed
- [X] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
Add requirements for user function in groupby transform
closes #13543
[skip ci]
| https://api.github.com/repos/pandas-dev/pandas/pulls/14388 | 2016-10-10T17:21:22Z | 2017-03-25T18:40:39Z | null | 2018-04-22T21:12:07Z |
BUG: Fix issue with inserting duplicate columns in a dataframe (GH14291) | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..16a933b6ce8ee 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -45,3 +45,4 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
+- Bug in ``DataFrame.insert`` where multiple calls with duplicate columns can fail (:issue:`14291`)
\ No newline at end of file
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1798a35168265..7d33f929352ae 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2487,7 +2487,7 @@ def _set_item(self, key, value):
# check if we are modifying a copy
# try to set first as we want an invalid
- # value exeption to occur first
+ # value exception to occur first
if len(self):
self._check_setitem_copy()
@@ -2506,7 +2506,7 @@ def insert(self, loc, column, value, allow_duplicates=False):
value : int, Series, or array-like
"""
self._ensure_valid_index(value)
- value = self._sanitize_column(column, value)
+ value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
@@ -2590,9 +2590,15 @@ def assign(self, **kwargs):
return data
- def _sanitize_column(self, key, value):
- # Need to make sure new columns (which go into the BlockManager as new
- # blocks) are always copied
+ def _sanitize_column(self, key, value, broadcast=True):
+ """
+ Ensures new columns (which go into the BlockManager as new blocks) are
+ always copied.
+
+ The "broadcast" parameter indicates whether all columns with the given
+ key should be returned. The default behavior is desirable when
+ calling this method prior to modifying existing values in a DataFrame.
+ """
def reindexer(value):
# reindex if necessary
@@ -2665,7 +2671,7 @@ def reindexer(value):
return value
# broadcast across multiple columns if necessary
- if key in self.columns and value.ndim == 1:
+ if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 8eeff045d1fac..0ccbf08daf41b 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -302,7 +302,12 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
- def _sanitize_column(self, key, value):
+ def _sanitize_column(self, key, value, broadcast=True):
+ """
+ The "broadcast" parameter was added to match the method signature of
+ DataFrame._sanitize_column. However, this method does not make use of
+ broadcasting.
+ """
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 5beab1565e538..7059ab969587d 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -163,6 +163,15 @@ def test_insert(self):
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
+ # GH 14291
+ df = DataFrame()
+ df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
+ df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
+ df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
+ exp = DataFrame([['a', 'a', 'a'], ['b', 'b', 'b'],
+ ['c', 'c', 'c']], columns=['A', 'A', 'A'])
+ assert_frame_equal(df, exp)
+
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
| - [x] closes #14291
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
I've been sitting on this simple fix because it seems a little kludgy. SparseDataFrame doesn't do anything parallel to broadcasting in its _sanitize_column method... should it?
| https://api.github.com/repos/pandas-dev/pandas/pulls/14384 | 2016-10-10T01:02:27Z | 2016-10-15T22:18:27Z | null | 2016-10-15T22:18:27Z |
ENH: feather support in the pandas IO api | diff --git a/appveyor.yml b/appveyor.yml
index 84c34b34626b9..a8e5218ab2c9f 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -80,6 +80,7 @@ install:
- cmd: conda config --set ssl_verify false
# add the pandas channel *before* defaults to have defaults take priority
+ - cmd: conda config --add channels conda-forge
- cmd: conda config --add channels pandas
- cmd: conda config --remove channels defaults
- cmd: conda config --add channels defaults
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index f35e216550a2d..542d22d9fa871 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -71,7 +71,8 @@ else
conda config --set always_yes true --set changeps1 false || exit 1
conda update -q conda
- # add the pandas channel *before* defaults to have defaults take priority
+ # add the pandas channel to take priority
+ # to add extra packages
echo "add channels"
conda config --add channels pandas || exit 1
conda config --remove channels defaults || exit 1
diff --git a/ci/requirements-2.7-64.run b/ci/requirements-2.7-64.run
index 94472dafd565d..f953682f52d45 100644
--- a/ci/requirements-2.7-64.run
+++ b/ci/requirements-2.7-64.run
@@ -3,7 +3,7 @@ pytz
numpy=1.10*
xlwt
numexpr
-pytables
+pytables==3.2.2
matplotlib
openpyxl
xlrd
diff --git a/ci/requirements-2.7.sh b/ci/requirements-2.7.sh
new file mode 100644
index 0000000000000..64d470e5c6e0e
--- /dev/null
+++ b/ci/requirements-2.7.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+source activate pandas
+
+echo "install 27"
+
+conda install -n pandas -c conda-forge feather-format
diff --git a/ci/requirements-3.5-64.run b/ci/requirements-3.5-64.run
index 96de21e3daa5e..905c2ff3625bd 100644
--- a/ci/requirements-3.5-64.run
+++ b/ci/requirements-3.5-64.run
@@ -1,11 +1,12 @@
python-dateutil
pytz
-numpy=1.10*
+numpy
openpyxl
xlsxwriter
xlrd
xlwt
scipy
+feather-format
numexpr
pytables
matplotlib
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
index 1d1cb38fd57a6..e15ca6079b4fe 100644
--- a/ci/requirements-3.5.run
+++ b/ci/requirements-3.5.run
@@ -18,6 +18,4 @@ pymysql
psycopg2
xarray
s3fs
-
-# incompat with conda ATM
-# beautiful-soup
+beautifulsoup4
diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh
new file mode 100644
index 0000000000000..d0f0b81802dc6
--- /dev/null
+++ b/ci/requirements-3.5.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+source activate pandas
+
+echo "install 35"
+
+conda install -n pandas -c conda-forge feather-format
diff --git a/ci/requirements-3.5_OSX.run b/ci/requirements-3.5_OSX.run
index eceb2f9cdcebc..1d83474d10f2f 100644
--- a/ci/requirements-3.5_OSX.run
+++ b/ci/requirements-3.5_OSX.run
@@ -13,6 +13,4 @@ jinja2
bottleneck
xarray
s3fs
-
-# incompat with conda ATM
-# beautiful-soup
+beautifulsoup4
diff --git a/ci/requirements-3.5_OSX.sh b/ci/requirements-3.5_OSX.sh
new file mode 100644
index 0000000000000..cfbd2882a8a2d
--- /dev/null
+++ b/ci/requirements-3.5_OSX.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+source activate pandas
+
+echo "install 35_OSX"
+
+conda install -n pandas -c conda-forge feather-format
diff --git a/doc/source/api.rst b/doc/source/api.rst
index b8157929bd940..272dfe72eafe7 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -83,6 +83,14 @@ HDFStore: PyTables (HDF5)
HDFStore.get
HDFStore.select
+Feather
+~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ read_feather
+
SAS
~~~
@@ -1015,6 +1023,7 @@ Serialization / IO / Conversion
DataFrame.to_excel
DataFrame.to_json
DataFrame.to_html
+ DataFrame.to_feather
DataFrame.to_latex
DataFrame.to_stata
DataFrame.to_msgpack
diff --git a/doc/source/install.rst b/doc/source/install.rst
index f62342fa52e5c..4787b3356ee9f 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -247,6 +247,7 @@ Optional Dependencies
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
+* `Feather Format <https://github.com/wesm/feather>`__: necessary for feather-based storage, version 0.3.1 or higher.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are:
- `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9d51d2599d668..259f9605d8313 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -34,6 +34,7 @@ object.
* :ref:`read_csv<io.read_csv_table>`
* :ref:`read_excel<io.excel_reader>`
* :ref:`read_hdf<io.hdf5>`
+ * :ref:`read_feather<io.feather>`
* :ref:`read_sql<io.sql>`
* :ref:`read_json<io.json_reader>`
* :ref:`read_msgpack<io.msgpack>` (experimental)
@@ -49,6 +50,7 @@ The corresponding ``writer`` functions are object methods that are accessed like
* :ref:`to_csv<io.store_in_csv>`
* :ref:`to_excel<io.excel_writer>`
* :ref:`to_hdf<io.hdf5>`
+ * :ref:`to_feather<io.feather>`
* :ref:`to_sql<io.sql>`
* :ref:`to_json<io.json_writer>`
* :ref:`to_msgpack<io.msgpack>` (experimental)
@@ -4152,6 +4154,68 @@ object). This cannot be changed after table creation.
os.remove('store.h5')
+.. _io.feather:
+
+Feather
+-------
+
+.. versionadded:: 0.20.0
+
+Feather provides binary columnar serialization for data frames. It is designed to make reading and writing data
+frames efficient, and to make sharing data across data analysis languages easy.
+
+Feather is designed to faithfully serialize and de-serialize DataFrames, supporting all of the pandas
+dtypes, including extension dtypes such as categorical and datetime with tz.
+
+Several caveats.
+
+- This is a newer library, and the format, though stable, is not guaranteed to be backward compatible
+ to the earlier versions.
+- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an
+ error if a non-default one is provided. You can simply ``.reset_index()`` in order to store the index.
+- Duplicate column names and non-string columns names are not supported
+- Non supported types include ``Period`` and actual python object types. These will raise a helpful error message
+ on an attempt at serialization.
+
+See the `Full Documentation <https://github.com/wesm/feather>`__
+
+.. ipython:: python
+
+ df = pd.DataFrame({'a': list('abc'),
+ 'b': list(range(1, 4)),
+ 'c': np.arange(3, 6).astype('u1'),
+ 'd': np.arange(4.0, 7.0, dtype='float64'),
+ 'e': [True, False, True],
+ 'f': pd.Categorical(list('abc')),
+ 'g': pd.date_range('20130101', periods=3),
+ 'h': pd.date_range('20130101', periods=3, tz='US/Eastern'),
+ 'i': pd.date_range('20130101', periods=3, freq='ns')})
+
+ df
+ df.dtypes
+
+Write to a feather file.
+
+.. ipython:: python
+
+ df.to_feather('example.fth)
+
+Read from a feather file.
+
+.. ipython:: python
+
+ result = pd.read_feather('example.fth')
+ result
+
+ # we preserve dtypes
+ result.dtypes
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('example.fth')
+
.. _io.sql:
SQL Queries
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 40bd8bc4154a6..0873e4b34b0b1 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -22,6 +22,9 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations
New features
~~~~~~~~~~~~
+- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
+
+
.. _whatsnew_0200.enhancements.dataio_dtype:
diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py
index bc126447213ca..b13b4d7de60ca 100644
--- a/pandas/api/tests/test_api.py
+++ b/pandas/api/tests/test_api.py
@@ -95,7 +95,7 @@ class TestPDApi(Base, tm.TestCase):
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
- 'read_table']
+ 'read_table', 'read_feather']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ba1e08ecc482f..d12b8af35469b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1477,6 +1477,21 @@ def to_stata(self, fname, convert_dates=None, write_index=True,
variable_labels=variable_labels)
writer.write_file()
+ def to_feather(self, fname):
+ """
+ write out the binary feather-format for DataFrames
+
+ .. versionadded:: 0.20.0
+
+ Parameters
+ ----------
+ fname : str
+ string file path
+
+ """
+ from pandas.io.feather_format import to_feather
+ to_feather(self, fname)
+
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 920ece9c4c3a8..0bd86c85b4b8b 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -12,6 +12,7 @@
from pandas.io.html import read_html
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas.sasreader import read_sas
+from pandas.io.feather_format import read_feather
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
new file mode 100644
index 0000000000000..ac74ac4823613
--- /dev/null
+++ b/pandas/io/feather_format.py
@@ -0,0 +1,101 @@
+""" feather-format compat """
+
+from distutils.version import LooseVersion
+from pandas import DataFrame, RangeIndex, Int64Index
+from pandas.compat import range
+
+
+def _try_import():
+ # since pandas is a dependency of feather
+ # we need to import on first use
+
+ try:
+ import feather
+ except ImportError:
+
+ # give a nice error message
+ raise ImportError("the feather-format library is not installed\n"
+ "you can install via conda\n"
+ "conda install feather-format -c conda-forge\n"
+ "or via pip\n"
+ "pip install feather-format\n")
+
+ try:
+ feather.__version__ >= LooseVersion('0.3.1')
+ except AttributeError:
+ raise ImportError("the feather-format library must be >= "
+ "version 0.3.1\n"
+ "you can install via conda\n"
+ "conda install feather-format -c conda-forge"
+ "or via pip\n"
+ "pip install feather-format\n")
+
+ return feather
+
+
+def to_feather(df, path):
+ """
+ Write a DataFrame to the feather-format
+
+ Parameters
+ ----------
+ df : DataFrame
+ path : string
+ File path
+ """
+ if not isinstance(df, DataFrame):
+ raise ValueError("feather only support IO with DataFrames")
+
+ feather = _try_import()
+ valid_types = {'string', 'unicode'}
+
+ # validate index
+ # --------------
+
+ # validate that we have only a default index
+ # raise on anything else as we don't serialize the index
+
+ if not isinstance(df.index, Int64Index):
+ raise ValueError("feather does not serializing {} "
+ "for the index; you can .reset_index()"
+ "to make the index into column(s)".format(
+ type(df.index)))
+
+ if not df.index.equals(RangeIndex.from_range(range(len(df)))):
+ raise ValueError("feather does not serializing a non-default index "
+ "for the index; you can .reset_index()"
+ "to make the index into column(s)")
+
+ if df.index.name is not None:
+ raise ValueError("feather does not serialize index meta-data on a "
+ "default index")
+
+ # validate columns
+ # ----------------
+
+ # must have value column names (strings only)
+ if df.columns.inferred_type not in valid_types:
+ raise ValueError("feather must have string column names")
+
+ feather.write_dataframe(df, path)
+
+
+def read_feather(path):
+ """
+ Load a feather-format object from the file path
+
+ .. versionadded 0.20.0
+
+ Parameters
+ ----------
+ path : string
+ File path
+
+ Returns
+ -------
+ type of object stored in file
+
+ """
+
+ feather = _try_import()
+ return feather.read_dataframe(path)
diff --git a/pandas/io/tests/test_feather.py b/pandas/io/tests/test_feather.py
new file mode 100644
index 0000000000000..b8b85d7dbbece
--- /dev/null
+++ b/pandas/io/tests/test_feather.py
@@ -0,0 +1,123 @@
+""" test feather-format compat """
+
+import nose
+
+import numpy as np
+import pandas as pd
+
+from pandas.io.feather_format import to_feather, read_feather
+
+try:
+ import feather # noqa
+except ImportError:
+ raise nose.SkipTest('no feather-format installed')
+
+from feather import FeatherError
+import pandas.util.testing as tm
+from pandas.util.testing import assert_frame_equal, ensure_clean
+
+
+class TestFeather(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def setUp(self):
+ pass
+
+ def check_error_on_write(self, df, exc):
+ # check that we are raising the exception
+ # on writing
+
+ def f():
+ with ensure_clean() as path:
+ to_feather(df, path)
+ self.assertRaises(exc, f)
+
+ def check_round_trip(self, df):
+
+ with ensure_clean() as path:
+ to_feather(df, path)
+ result = read_feather(path)
+ assert_frame_equal(result, df)
+
+ def test_error(self):
+
+ for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
+ np.array([1, 2, 3])]:
+ self.check_error_on_write(obj, ValueError)
+
+ def test_basic(self):
+
+ df = pd.DataFrame({'a': list('abc'),
+ 'b': list(range(1, 4)),
+ 'c': np.arange(3, 6).astype('u1'),
+ 'd': np.arange(4.0, 7.0, dtype='float64'),
+ 'e': [True, False, True],
+ 'f': pd.Categorical(list('abc')),
+ 'g': pd.date_range('20130101', periods=3),
+ 'h': pd.date_range('20130101', periods=3,
+ tz='US/Eastern'),
+ 'i': pd.date_range('20130101', periods=3,
+ freq='ns')})
+
+ self.check_round_trip(df)
+
+ def test_strided_data_issues(self):
+
+ # strided data issuehttps://github.com/wesm/feather/issues/97
+ df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('abc'))
+ self.check_error_on_write(df, FeatherError)
+
+ def test_duplicate_columns(self):
+
+ # https://github.com/wesm/feather/issues/53
+ # not currently able to handle duplicate columns
+ df = pd.DataFrame(np.arange(12).reshape(4, 3),
+ columns=list('aaa')).copy()
+ self.check_error_on_write(df, ValueError)
+
+ def test_stringify_columns(self):
+
+ df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
+ self.check_error_on_write(df, ValueError)
+
+ def test_unsupported(self):
+
+ # period
+ df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
+ self.check_error_on_write(df, ValueError)
+
+ # non-strings
+ df = pd.DataFrame({'a': ['a', 1, 2.0]})
+ self.check_error_on_write(df, ValueError)
+
+ def test_write_with_index(self):
+
+ df = pd.DataFrame({'A': [1, 2, 3]})
+ self.check_round_trip(df)
+
+ # non-default index
+ for index in [[2, 3, 4],
+ pd.date_range('20130101', periods=3),
+ list('abc'),
+ [1, 3, 4],
+ pd.MultiIndex.from_tuples([('a', 1), ('a', 2),
+ ('b', 1)]),
+ ]:
+
+ df.index = index
+ self.check_error_on_write(df, ValueError)
+
+ # index with meta-data
+ df.index = [0, 1, 2]
+ df.index.name = 'foo'
+ self.check_error_on_write(df, ValueError)
+
+ # column multi-index
+ df.index = [0, 1, 2]
+ df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]),
+ self.check_error_on_write(df, ValueError)
+
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index 657681d4c33ce..fac76be676398 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -80,6 +80,7 @@ def show_versions(as_json=False):
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
+ ("feather", lambda mod: mod.version.version),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
| closes #13092
| https://api.github.com/repos/pandas-dev/pandas/pulls/14383 | 2016-10-10T00:34:48Z | 2016-12-26T22:49:17Z | null | 2016-12-29T12:06:01Z |
BUG: pivot_table may raise TypeError without values | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 5180b9a092f6c..00171ca9794f0 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -48,3 +48,6 @@ Bug Fixes
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
+
+- Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns``
+ is not scalar and ``values`` is not specified (:issue:`14380`)
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 94b464f6fca6c..9e064a1d1fc99 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -101,10 +101,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
else:
values_multi = False
values = [values]
- else:
- values = list(data.columns.drop(keys))
- if values_passed:
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
@@ -117,6 +114,15 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
if len(to_filter) < len(data.columns):
data = data[to_filter]
+ else:
+ values = data.columns
+ for key in keys:
+ try:
+ values = values.drop(key)
+ except (TypeError, ValueError):
+ pass
+ values = list(values)
+
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 75c6db23b4bc7..5944fa1b34611 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -131,6 +131,39 @@ def test_pivot_dtypes(self):
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
+ def test_pivot_no_values(self):
+ # GH 14380
+ idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
+ '2011-01-01', '2011-01-02'])
+ df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
+ index=idx)
+ res = df.pivot_table(index=df.index.month, columns=df.index.day)
+
+ exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
+ exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
+ index=[1, 2], columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
+ df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
+ 'dt': pd.date_range('2011-01-01', freq='D',
+ periods=5)},
+ index=idx)
+ res = df.pivot_table(index=df.index.month,
+ columns=pd.Grouper(key='dt', freq='M'))
+ exp_columns = pd.MultiIndex.from_tuples([('A',
+ pd.Timestamp('2011-01-31'))])
+ exp_columns.names = [None, 'dt']
+ exp = pd.DataFrame([3.25, 2.0],
+ index=[1, 2], columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
+ res = df.pivot_table(index=pd.Grouper(freq='A'),
+ columns=pd.Grouper(key='dt', freq='M'))
+ exp = pd.DataFrame([3],
+ index=pd.DatetimeIndex(['2011-12-31']),
+ columns=exp_columns)
+ tm.assert_frame_equal(res, exp)
+
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
| - [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
`pivot_table` raises `TypeError` when `index` or `columns` is array-like and `values` is not specified.
```
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02', '2011-01-01', '2011-01-02']))
df.pivot_table(index=df.index.month, columns=df.index.day)
# TypeError: unhashable type: 'numpy.ndarray'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14380 | 2016-10-08T22:40:07Z | 2016-10-20T10:59:43Z | null | 2016-10-20T11:46:29Z |
BUG: Convert float freqstrs to ints at finer resolution | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index db5bd22393e64..545b4380d9b75 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -58,4 +58,4 @@ Bug Fixes
- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
- Bug in ``df.groupby`` where ``TypeError`` raised when ``pd.Grouper(key=...)`` is passed in a list (:issue:`14334`)
- Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns``
- is not scalar and ``values`` is not specified (:issue:`14380`)
\ No newline at end of file
+ is not scalar and ``values`` is not specified (:issue:`14380`)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index d0009efd2d994..5cc9d575521f3 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -52,6 +52,9 @@ Other enhancements
- ``pd.read_excel`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`)
+
+- Multiple offset aliases with decimal points are now supported (e.g. '0.5min' is parsed as '30s') (:issue:`8419`)
+
- New ``UnsortedIndexError`` (subclass of ``KeyError``) raised when indexing/slicing into an
unsorted MultiIndex (:issue:`11897`). This allows differentiation between errors due to lack
of sorting or an incorrect key. See :ref:`here <advanced.unsorted>`
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index 5565f25937394..2d92b9f192328 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -45,12 +45,12 @@ cdef bint PY2 = version_info[0] == 2
cdef int64_t NPY_NAT = util.get_nat()
-cdef int US_RESO = frequencies.US_RESO
-cdef int MS_RESO = frequencies.MS_RESO
-cdef int S_RESO = frequencies.S_RESO
-cdef int T_RESO = frequencies.T_RESO
-cdef int H_RESO = frequencies.H_RESO
-cdef int D_RESO = frequencies.D_RESO
+cdef int RESO_US = frequencies.RESO_US
+cdef int RESO_MS = frequencies.RESO_MS
+cdef int RESO_SEC = frequencies.RESO_SEC
+cdef int RESO_MIN = frequencies.RESO_MIN
+cdef int RESO_HR = frequencies.RESO_HR
+cdef int RESO_DAY = frequencies.RESO_DAY
cdef extern from "period_helper.h":
ctypedef struct date_info:
@@ -516,7 +516,7 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
pandas_datetimestruct dts
- int reso = D_RESO, curr_reso
+ int reso = RESO_DAY, curr_reso
if tz is not None:
tz = maybe_get_tz(tz)
@@ -535,20 +535,20 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None):
cdef inline int _reso_stamp(pandas_datetimestruct *dts):
if dts.us != 0:
if dts.us % 1000 == 0:
- return MS_RESO
- return US_RESO
+ return RESO_MS
+ return RESO_US
elif dts.sec != 0:
- return S_RESO
+ return RESO_SEC
elif dts.min != 0:
- return T_RESO
+ return RESO_MIN
elif dts.hour != 0:
- return H_RESO
- return D_RESO
+ return RESO_HR
+ return RESO_DAY
cdef _reso_local(ndarray[int64_t] stamps, object tz):
cdef:
Py_ssize_t n = len(stamps)
- int reso = D_RESO, curr_reso
+ int reso = RESO_DAY, curr_reso
ndarray[int64_t] trans, deltas, pos
pandas_datetimestruct dts
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index ac094c1f545f3..e0c602bf5a037 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -38,32 +38,55 @@ class FreqGroup(object):
FR_NS = 12000
-US_RESO = 0
-MS_RESO = 1
-S_RESO = 2
-T_RESO = 3
-H_RESO = 4
-D_RESO = 5
+RESO_NS = 0
+RESO_US = 1
+RESO_MS = 2
+RESO_SEC = 3
+RESO_MIN = 4
+RESO_HR = 5
+RESO_DAY = 6
class Resolution(object):
- # defined in period.pyx
- # note that these are different from freq codes
- RESO_US = US_RESO
- RESO_MS = MS_RESO
- RESO_SEC = S_RESO
- RESO_MIN = T_RESO
- RESO_HR = H_RESO
- RESO_DAY = D_RESO
+ RESO_US = RESO_US
+ RESO_MS = RESO_MS
+ RESO_SEC = RESO_SEC
+ RESO_MIN = RESO_MIN
+ RESO_HR = RESO_HR
+ RESO_DAY = RESO_DAY
_reso_str_map = {
+ RESO_NS: 'nanosecond',
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
- RESO_DAY: 'day'}
+ RESO_DAY: 'day'
+ }
+
+ # factor to multiply a value by to convert it to the next finer grained
+ # resolution
+ _reso_mult_map = {
+ RESO_NS: None,
+ RESO_US: 1000,
+ RESO_MS: 1000,
+ RESO_SEC: 1000,
+ RESO_MIN: 60,
+ RESO_HR: 60,
+ RESO_DAY: 24
+ }
+
+ _reso_str_bump_map = {
+ 'D': 'H',
+ 'H': 'T',
+ 'T': 'S',
+ 'S': 'L',
+ 'L': 'U',
+ 'U': 'N',
+ 'N': None
+ }
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
@@ -160,6 +183,47 @@ def get_reso_from_freq(cls, freq):
"""
return cls.get_reso(cls.get_str_from_freq(freq))
+ @classmethod
+ def get_stride_from_decimal(cls, value, freq):
+ """
+ Convert freq with decimal stride into a higher freq with integer stride
+
+ Parameters
+ ----------
+ value : integer or float
+ freq : string
+ Frequency string
+
+ Raises
+ ------
+ ValueError
+ If the float cannot be converted to an integer at any resolution.
+
+ Example
+ -------
+ >>> Resolution.get_stride_from_decimal(1.5, 'T')
+ (90, 'S')
+
+ >>> Resolution.get_stride_from_decimal(1.04, 'H')
+ (3744, 'S')
+
+ >>> Resolution.get_stride_from_decimal(1, 'D')
+ (1, 'D')
+ """
+
+ if np.isclose(value % 1, 0):
+ return int(value), freq
+ else:
+ start_reso = cls.get_reso_from_freq(freq)
+ if start_reso == 0:
+ raise ValueError(
+ "Could not convert to integer offset at any resolution"
+ )
+
+ next_value = cls._reso_mult_map[start_reso] * value
+ next_name = cls._reso_str_bump_map[freq]
+ return cls.get_stride_from_decimal(next_value, next_name)
+
def get_to_timestamp_base(base):
"""
@@ -472,12 +536,17 @@ def to_offset(freq):
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
- offset = get_offset(name)
+ prefix = _lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
+ if prefix in Resolution._reso_str_bump_map.keys():
+ stride, name = Resolution.get_stride_from_decimal(
+ float(stride), prefix
+ )
stride = int(stride)
+ offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
@@ -493,7 +562,9 @@ def to_offset(freq):
# hack to handle WOM-1MON
-opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)')
+opattern = re.compile(
+ r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)'
+)
def _base_and_stride(freqstr):
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 5ba98f15aed8d..dfb7b26371d7a 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -39,6 +39,21 @@ def test_to_offset_multiple(self):
expected = offsets.Hour(3)
assert (result == expected)
+ freqstr = '2h 20.5min'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Second(8430)
+ assert (result == expected)
+
+ freqstr = '1.5min'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Second(90)
+ assert (result == expected)
+
+ freqstr = '0.5S'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Milli(500)
+ assert (result == expected)
+
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
@@ -49,6 +64,16 @@ def test_to_offset_multiple(self):
expected = offsets.Milli(10075)
assert (result == expected)
+ freqstr = '1s0.25ms'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Micro(1000250)
+ assert (result == expected)
+
+ freqstr = '1s0.25L'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Micro(1000250)
+ assert (result == expected)
+
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
@@ -107,10 +132,8 @@ def test_to_offset_invalid(self):
frequencies.to_offset('-2-3U')
with tm.assertRaisesRegexp(ValueError, 'Invalid frequency: -2D:3H'):
frequencies.to_offset('-2D:3H')
-
- # ToDo: Must be fixed in #8419
- with tm.assertRaisesRegexp(ValueError, 'Invalid frequency: .5S'):
- frequencies.to_offset('.5S')
+ with tm.assertRaisesRegexp(ValueError, 'Invalid frequency: 1.5.0S'):
+ frequencies.to_offset('1.5.0S')
# split offsets with spaces are valid
assert frequencies.to_offset('2D 3H') == offsets.Hour(51)
@@ -379,6 +402,26 @@ def test_freq_to_reso(self):
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
+ def test_resolution_bumping(self):
+ # GH 14378
+ Reso = frequencies.Resolution
+
+ self.assertEqual(Reso.get_stride_from_decimal(1.5, 'T'), (90, 'S'))
+ self.assertEqual(Reso.get_stride_from_decimal(62.4, 'T'), (3744, 'S'))
+ self.assertEqual(Reso.get_stride_from_decimal(1.04, 'H'), (3744, 'S'))
+ self.assertEqual(Reso.get_stride_from_decimal(1, 'D'), (1, 'D'))
+ self.assertEqual(Reso.get_stride_from_decimal(0.342931, 'H'),
+ (1234551600, 'U'))
+ self.assertEqual(Reso.get_stride_from_decimal(1.2345, 'D'),
+ (106660800, 'L'))
+
+ with self.assertRaises(ValueError):
+ Reso.get_stride_from_decimal(0.5, 'N')
+
+ # too much precision in the input can prevent
+ with self.assertRaises(ValueError):
+ Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H')
+
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index b45f867be65dd..58ec1561b2535 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -14,7 +14,7 @@
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.frequencies import (
get_freq,
- US_RESO, MS_RESO, S_RESO, H_RESO, D_RESO, T_RESO
+ RESO_US, RESO_MS, RESO_SEC, RESO_HR, RESO_DAY, RESO_MIN
)
import pandas.tseries.tools as tools
import pandas.tseries.offsets as offsets
@@ -1528,11 +1528,11 @@ def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
- [D_RESO, D_RESO,
- D_RESO, D_RESO,
- H_RESO, T_RESO,
- S_RESO, MS_RESO,
- US_RESO]):
+ [RESO_DAY, RESO_DAY,
+ RESO_DAY, RESO_DAY,
+ RESO_HR, RESO_MIN,
+ RESO_SEC, RESO_MS,
+ RESO_US]):
for tz in [None, 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Eastern']:
idx = date_range(start='2013-04-01', periods=30, freq=freq,
| - [x] closes #8419
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Passing `'0.5min'` as a frequency string should generate 30 second
intervals, rather than five minute intervals. By recursively increasing
resolution until one is found for which the frequency is an integer,
this commit ensures that that's the case for resolutions from days to
microsecond
| https://api.github.com/repos/pandas-dev/pandas/pulls/14378 | 2016-10-08T16:32:21Z | 2016-12-14T16:08:04Z | 2016-12-14T16:08:04Z | 2019-01-18T18:25:57Z |
API: add DataFrame.nunique() and DataFrameGroupBy.nunique() | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index adbe73aa5c5ef..9f491302a4d6f 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -433,6 +433,20 @@ def time_frame_from_records_generator_nrows(self):
+#-----------------------------------------------------------------------------
+# nunique
+
+class frame_nunique(object):
+
+ def setup(self):
+ self.data = np.random.randn(10000, 1000)
+ self.df = DataFrame(self.data)
+
+ def time_frame_nunique(self):
+ self.df.nunique()
+
+
+
#-----------------------------------------------------------------------------
# duplicated
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index ad58cd0fc6d70..fa68b122cc98d 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -251,6 +251,22 @@ def time_groupby_int_count(self):
self.df.groupby(['key1', 'key2']).count()
+#----------------------------------------------------------------------
+# nunique() speed
+
+class groupby_nunique(object):
+
+ def setup(self):
+ self.n = 10000
+ self.df = DataFrame({'key1': randint(0, 500, size=self.n),
+ 'key2': randint(0, 100, size=self.n),
+ 'ints': randint(0, 1000, size=self.n),
+ 'ints2': randint(0, 1000, size=self.n), })
+
+ def time_groupby_nunique(self):
+ self.df.groupby(['key1', 'key2']).nunique()
+
+
#----------------------------------------------------------------------
# group with different functions per column
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0873e4b34b0b1..a5b95dfc4f3f7 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -97,6 +97,9 @@ Other enhancements
^^^^^^^^^^^^^^^^^^
- ``Series.sort_index`` accepts parameters ``kind`` and ``na_position`` (:issue:`13589`, :issue:`14444`)
+- ``DataFrame`` has gained a ``nunique()`` method as short-cut for ``.apply(lambda x: x.nunique())`` (counting the distinct values over an axis) (:issue:`14336`).
+- New ``DataFrame.groupby().nunique()`` method as short-cut for ``.apply(lambda g: g.apply(lambda x: x.nunique()))`` (counting the distinct values for all columns within each group) (:issue:`14336`).
+
- ``pd.read_excel`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`)
- Multiple offset aliases with decimal points are now supported (e.g. '0.5min' is parsed as '30s') (:issue:`8419`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d12b8af35469b..b872f35277402 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4969,6 +4969,38 @@ def f(x):
return Series(result, index=labels)
+ def nunique(self, axis=0, dropna=True):
+ """
+ Return Series with number of distinct observations over requested
+ axis.
+
+ .. versionadded:: 0.20.0
+
+ Parameters
+ ----------
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ 0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ dropna : boolean, default True
+ Don't include NaN in the counts.
+
+ Returns
+ -------
+ nunique : Series
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
+ >>> df.nunique()
+ A 3
+ B 1
+
+ >>> df.nunique(axis=1)
+ 0 1
+ 1 2
+ 2 2
+ """
+ return self.apply(Series.nunique, axis=axis, dropna=dropna)
+
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7eba32b4932d0..ea361afdc3a60 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3899,6 +3899,54 @@ def count(self):
return self._wrap_agged_blocks(data.items, list(blk))
+ def nunique(self, dropna=True):
+ """
+ Return DataFrame with number of distinct observations per group for
+ each column.
+
+ .. versionadded:: 0.20.0
+
+ Parameters
+ ----------
+ dropna : boolean, default True
+ Don't include NaN in the counts.
+
+ Returns
+ -------
+ nunique: DataFrame
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
+ ... 'ham', 'ham'],
+ ... 'value1': [1, 5, 5, 2, 5, 5],
+ ... 'value2': list('abbaxy')})
+ >>> df
+ id value1 value2
+ 0 spam 1 a
+ 1 egg 5 b
+ 2 egg 5 b
+ 3 spam 2 a
+ 4 ham 5 x
+ 5 ham 5 y
+
+ >>> df.groupby('id').nunique()
+ id value1 value2
+ id
+ egg 1 1 1
+ ham 1 1 2
+ spam 1 2 1
+
+ # check for rows with the same id but conflicting values
+ >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
+ id value1 value2
+ 0 spam 1 a
+ 3 spam 2 a
+ 4 ham 5 x
+ 5 ham 5 y
+ """
+ return self.apply(lambda g: g.apply(Series.nunique, dropna=dropna))
+
from pandas.tools.plotting import boxplot_frame_groupby # noqa
DataFrameGroupBy.boxplot = boxplot_frame_groupby
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index f6081e14d4081..4af26c3d0fdc0 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -16,6 +16,7 @@
MultiIndex, date_range, Timestamp)
import pandas as pd
import pandas.core.nanops as nanops
+import pandas.core.algorithms as algorithms
import pandas.formats.printing as printing
import pandas.util.testing as tm
@@ -410,6 +411,21 @@ def test_count(self):
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
+ def test_nunique(self):
+ f = lambda s: len(algorithms.unique1d(s.dropna()))
+ self._check_stat_op('nunique', f, has_skipna=False,
+ check_dtype=False, check_dates=True)
+
+ df = DataFrame({'A': [1, 1, 1],
+ 'B': [1, 2, 3],
+ 'C': [1, np.nan, 3]})
+ tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
+ tm.assert_series_equal(df.nunique(dropna=False),
+ Series({'A': 1, 'B': 3, 'C': 3}))
+ tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
+ tm.assert_series_equal(df.nunique(axis=1, dropna=False),
+ Series({0: 1, 1: 3, 2: 2}))
+
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index e87b5d04271e8..5cab941e74ce5 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2800,6 +2800,34 @@ def test_count_cross_type(self): # GH8169
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
+ def test_nunique(self):
+ df = DataFrame({
+ 'A': list('abbacc'),
+ 'B': list('abxacc'),
+ 'C': list('abbacx'),
+ })
+
+ expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
+ result = df.groupby('A', as_index=False).nunique()
+ tm.assert_frame_equal(result, expected)
+
+ # as_index
+ expected.index = list('abc')
+ expected.index.name = 'A'
+ result = df.groupby('A').nunique()
+ tm.assert_frame_equal(result, expected)
+
+ # with na
+ result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
+ tm.assert_frame_equal(result, expected)
+
+ # dropna
+ expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
+ index=list('abc'))
+ expected.index.name = 'A'
+ result = df.replace({'x': None}).groupby('A').nunique()
+ tm.assert_frame_equal(result, expected)
+
def test_non_cython_api(self):
# GH5610
@@ -5150,11 +5178,11 @@ def test_tab_completion(self):
'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',
'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot',
'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',
- 'head', 'irow', 'describe', 'cummax', 'quantile', 'rank',
- 'cumprod', 'tail', 'resample', 'cummin', 'fillna', 'cumsum',
- 'cumcount', 'all', 'shift', 'skew', 'bfill', 'ffill', 'take',
- 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith', 'cov',
- 'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
+ 'nunique', 'head', 'irow', 'describe', 'cummax', 'quantile',
+ 'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna',
+ 'cumsum', 'cumcount', 'all', 'shift', 'skew', 'bfill', 'ffill',
+ 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
+ 'cov', 'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding'])
self.assertEqual(results, expected)
| - [x] closes #14336
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14376 | 2016-10-07T19:09:05Z | 2017-01-23T13:41:26Z | null | 2017-01-23T20:45:41Z |
BUG: Fix linux Qt clipboard QApplication() creation | diff --git a/pandas/util/clipboard.py b/pandas/util/clipboard.py
index 02da0d5b8159f..deac1ebdfb62d 100644
--- a/pandas/util/clipboard.py
+++ b/pandas/util/clipboard.py
@@ -244,7 +244,7 @@ def _pasteKlipper():
copy = _copyGtk
elif qtBindingInstalled:
_functions = 'PyQt4 module' # for debugging
- app = QtGui.QApplication([])
+ app = QtGui.QApplication.instance() or QtGui.QApplication([])
cb = QtGui.QApplication.clipboard()
paste = _pasteQt
copy = _copyQt
| Fixes #14372
A Qt application cannot instantiate multiple `QApplication` instances,
so we create a new `QApplication` only when the global
`QApplication.instance()` is None.
Failing sample:
```
from PyQt4.QtGui import QApplication
myapp = QApplication([])
from pandas.util.clipboard import clipboard_get # <--- ERROR
File "prefix/lib/python2.7/site-packages/pandas/util/clipboard.py", line 164, in <module>
app = qt4.QtGui.QApplication([])
RuntimeError: A QApplication instance already exists.
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14371 | 2016-10-07T06:59:09Z | 2016-12-06T21:28:58Z | null | 2023-05-11T01:14:16Z |
DOC: add 0.19.1 whatsnew file | diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
index 77dc249aeb788..2a1f2cc47d48e 100644
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -18,6 +18,8 @@ What's New
These are new features and improvements of note in each release.
+.. include:: whatsnew/v0.19.1.txt
+
.. include:: whatsnew/v0.19.0.txt
.. include:: whatsnew/v0.18.1.txt
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
new file mode 100644
index 0000000000000..1c5f4915bb3a4
--- /dev/null
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -0,0 +1,32 @@
+.. _whatsnew_0191:
+
+v0.19.1 (????, 2016)
+---------------------
+
+This is a minor bug-fix release from 0.19.0 and includes a large number of
+bug fixes along with several new features, enhancements, and performance improvements.
+We recommend that all users upgrade to this version.
+
+Highlights include:
+
+
+.. contents:: What's new in v0.19.1
+ :local:
+ :backlinks: none
+
+
+.. _whatsnew_0191.performance:
+
+Performance Improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+
+
+
+
+.. _whatsnew_0191.bug_fixes:
+
+Bug Fixes
+~~~~~~~~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/14366 | 2016-10-06T09:16:03Z | 2016-10-07T19:25:31Z | 2016-10-07T19:25:31Z | 2016-10-07T19:25:31Z | |
DOC: Remove old warning from dsintro.rst | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 6063e3e8bce45..cc69367017aed 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -41,12 +41,6 @@ categories of functionality and methods in separate sections.
Series
------
-.. warning::
-
- In 0.13.0 ``Series`` has internally been refactored to no longer sub-class ``ndarray``
- but instead subclass ``NDFrame``, similarly to the rest of the pandas containers. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring<whatsnew_0130.refactoring>`)
-
:class:`Series` is a one-dimensional labeled array capable of holding any data
type (integers, strings, floating point numbers, Python objects, etc.). The axis
labels are collectively referred to as the **index**. The basic method to create a Series is to call:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
The warning is about something that have been fixed for almost 3 years. Every time a new user excited about pandas start reading the docs, they have to waste brain-cycles ignoring that big red warning bubble.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14365 | 2016-10-06T09:00:24Z | 2016-10-06T09:25:23Z | 2016-10-06T09:25:23Z | 2016-10-06T15:46:36Z |
BLD/CI: cython cache pxd files | diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh
index 6f16dce2fb431..cadc356b641f9 100755
--- a/ci/prep_cython_cache.sh
+++ b/ci/prep_cython_cache.sh
@@ -3,8 +3,8 @@
ls "$HOME/.cache/"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
-pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
+pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx" -o -name "*.pxd"`
CACHE_File="$HOME/.cache/cython_files.tar"
diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh
index 4f60df0ccb2d8..5c98c3df61736 100755
--- a/ci/submit_cython_cache.sh
+++ b/ci/submit_cython_cache.sh
@@ -2,7 +2,7 @@
CACHE_File="$HOME/.cache/cython_files.tar"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
rm -rf $CACHE_File
rm -rf $PYX_CACHE_DIR
| Currently the cython cache on travis doesn't pick up change in `.pxd` files Most of this commit history is trial and error - but 479c311 shows this working
https://travis-ci.org/pydata/pandas/jobs/166041112
```
$ ci/prep_cython_cache.sh
cython_files.tar motd.legal-displayed pip pyxfiles
Cache available - checking pyx diff
util.pxd has changed:
--- /home/travis/build/pydata/pandas/pandas/src/util.pxd 2016-10-08 13:01:48.255250369 +0000 +++ /home/travis/.cache/pyxfiles/home/travis/build/pydata/pandas/pandas/src/util.pxd 2016-10-06 11:04:00.000000000 +0000 @@ -97,5 +97,6 @@ cdef inline bint _checknan(object val): return not cnp.PyArray_Check(val) and val != val + cdef inline bint is_period_object(object val): return getattr(val, '_typ', '_typ') == 'period'
In a PR
Rebuilding cythonized files
Use cache (Blank if not set) = true
Clear cache (1=YES) = 1
```
xref #14359
| https://api.github.com/repos/pandas-dev/pandas/pulls/14363 | 2016-10-06T01:22:24Z | 2016-10-12T09:15:49Z | 2016-10-12T09:15:49Z | 2016-11-30T01:01:34Z |
block mutation of read-only array in series | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 147ff8795eb00..150925cfa53eb 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -55,7 +55,9 @@ Bug Fixes
- Bug in ``pd.concat`` with dataframes heterogeneous in length and tuple ``keys`` (:issue:`14438`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
-- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
+- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single
+- index frame by a column and the index level (:issue`14327`)
- Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns``
is not scalar and ``values`` is not specified (:issue:`14380`)
+- Bug in ``Series.__setitem__` which allowed mutating read-only arrays (:issue:`14359`).
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index ef3407ffd5388..b09a1c2755a06 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -975,7 +975,9 @@ def astype_intsafe(ndarray[object] arr, new_dtype):
if is_datelike and checknull(v):
result[i] = NPY_NAT
else:
- util.set_value_at(result, i, v)
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, v)
return result
@@ -986,7 +988,9 @@ cpdef ndarray[object] astype_unicode(ndarray arr):
ndarray[object] result = np.empty(n, dtype=object)
for i in range(n):
- util.set_value_at(result, i, unicode(arr[i]))
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, unicode(arr[i]))
return result
@@ -997,7 +1001,9 @@ cpdef ndarray[object] astype_str(ndarray arr):
ndarray[object] result = np.empty(n, dtype=object)
for i in range(n):
- util.set_value_at(result, i, str(arr[i]))
+ # we can use the unsafe version because we know `result` is mutable
+ # since it was created from `np.empty`
+ util.set_value_at_unsafe(result, i, str(arr[i]))
return result
diff --git a/pandas/src/util.pxd b/pandas/src/util.pxd
index fdbfbf62af7d2..be8d0d4aa6302 100644
--- a/pandas/src/util.pxd
+++ b/pandas/src/util.pxd
@@ -70,7 +70,12 @@ cdef inline object get_value_at(ndarray arr, object loc):
return get_value_1d(arr, i)
-cdef inline set_value_at(ndarray arr, object loc, object value):
+cdef inline set_value_at_unsafe(ndarray arr, object loc, object value):
+ """Sets a value into the array without checking the writeable flag.
+
+ This should be used when setting values in a loop, check the writeable
+ flag above the loop and then eschew the check on each iteration.
+ """
cdef:
Py_ssize_t i, sz
if is_float_object(loc):
@@ -87,6 +92,14 @@ cdef inline set_value_at(ndarray arr, object loc, object value):
assign_value_1d(arr, i, value)
+cdef inline set_value_at(ndarray arr, object loc, object value):
+ """Sets a value into the array after checking that the array is mutable.
+ """
+ if not cnp.PyArray_ISWRITEABLE(arr):
+ raise ValueError('assignment destination is read-only')
+
+ set_value_at_unsafe(arr, loc, value)
+
cdef inline int is_contiguous(ndarray arr):
return cnp.PyArray_CHKFLAGS(arr, cnp.NPY_C_CONTIGUOUS)
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 7c16fd060b181..c44a7a898bb8d 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1947,6 +1947,40 @@ def test_multilevel_preserve_name(self):
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
+ def test_setitem_scalar_into_readonly_backing_data(self):
+ # GH14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ for n in range(len(series)):
+ with self.assertRaises(ValueError):
+ series[n] = 1
+
+ self.assertEqual(
+ array[n],
+ 0,
+ msg='even though the ValueError was raised, the underlying'
+ ' array was still mutated!',
+ )
+
+ def test_setitem_slice_into_readonly_backing_data(self):
+ # GH14359: test that you cannot mutate a read only buffer
+
+ array = np.zeros(5)
+ array.flags.writeable = False # make the array immutable
+ series = Series(array)
+
+ with self.assertRaises(ValueError):
+ series[1:3] = 1
+
+ self.assertTrue(
+ not array.any(),
+ msg='even though the ValueError was raised, the underlying'
+ ' array was still mutated!',
+ )
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/setup.py b/setup.py
index 846e2b7fa2d88..1070ee7aa78c9 100755
--- a/setup.py
+++ b/setup.py
@@ -476,7 +476,8 @@ def pxd(name):
'pandas/src/period_helper.c']},
index={'pyxfile': 'index',
'sources': ['pandas/src/datetime/np_datetime.c',
- 'pandas/src/datetime/np_datetime_strings.c']},
+ 'pandas/src/datetime/np_datetime_strings.c'],
+ 'pxdfiles': ['src/util']},
algos={'pyxfile': 'algos',
'pxdfiles': ['src/util'],
'depends': _pxi_dep['algos']},
| - [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
reproduction case:
``` python
In [1]: array = np.array([1, 2, 3])
In [2]: array.flags.writeable = False
In [3]: series = pd.Series(array)
In [4]: series[0] = -1
In [5]: series
Out[5]:
0 -1
1 2
2 3
dtype: int64
In [6]: array
Out[6]: array([-1, 2, 3])
```
In the case I was running into I was passing a `numpy.memmap` which is a subclass of `ndarray` but was over a read only memory segment so attempting the setitem was causing a segfault. The new behavior matches the numpy behavior:
``` python
In [4]: series[0] = -1
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-2ad0997d5cf2> in <module>()
----> 1 series[0] = -1
...
ValueError: assignment destination is read-only
In [5]: array[0] = -1
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-f67d1d011a13> in <module>()
----> 1 array[0] = -1
ValueError: assignment destination is read-only
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14359 | 2016-10-05T19:26:55Z | 2016-10-24T22:43:56Z | null | 2016-10-24T22:46:44Z |
PERF: period factorization | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index e12b00dd06b39..5f3671012e6d5 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -548,6 +548,32 @@ def time_groupby_sum(self):
self.df.groupby(['a'])['b'].sum()
+class groupby_period(object):
+ # GH 14338
+ goal_time = 0.2
+
+ def make_grouper(self, N):
+ return pd.period_range('1900-01-01', freq='D', periods=N)
+
+ def setup(self):
+ N = 10000
+ self.grouper = self.make_grouper(N)
+ self.df = pd.DataFrame(np.random.randn(N, 2))
+
+ def time_groupby_sum(self):
+ self.df.groupby(self.grouper).sum()
+
+
+class groupby_datetime(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N)
+
+
+class groupby_datetimetz(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N,
+ tz='US/Central')
+
#----------------------------------------------------------------------
# Series.value_counts
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..8843a7849c200 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -20,7 +20,7 @@ Highlights include:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-
+ - Fixed performance regression in factorization of ``Period`` data (:issue:`14338`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ee59d6552bb2f..8644d4568e44d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -285,18 +285,27 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
- from pandas import Index, Series, DatetimeIndex
-
- vals = np.asarray(values)
-
- # localize to UTC
- is_datetimetz_type = is_datetimetz(values)
- if is_datetimetz_type:
- values = DatetimeIndex(values)
- vals = values.asi8
+ from pandas import Index, Series, DatetimeIndex, PeriodIndex
+
+ # handling two possibilities here
+ # - for a numpy datetimelike simply view as i8 then cast back
+ # - for an extension datetimelike view as i8 then
+ # reconstruct from boxed values to transfer metadata
+ dtype = None
+ if needs_i8_conversion(values):
+ if is_period_dtype(values):
+ values = PeriodIndex(values)
+ vals = values.asi8
+ elif is_datetimetz(values):
+ values = DatetimeIndex(values)
+ vals = values.asi8
+ else:
+ # numpy dtype
+ dtype = values.dtype
+ vals = values.view(np.int64)
+ else:
+ vals = np.asarray(values)
- is_datetime = is_datetime64_dtype(vals)
- is_timedelta = is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
@@ -311,13 +320,9 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
- if is_datetimetz_type:
- # reset tz
- uniques = values._shallow_copy(uniques)
- elif is_datetime:
- uniques = uniques.astype('M8[ns]')
- elif is_timedelta:
- uniques = uniques.astype('m8[ns]')
+ if dtype is not None:
+ uniques = uniques.astype(dtype)
+
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
| Merged in #14419
---
- [x] closes #14338
- [x] tests not needed / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
asv
```
before after ratio
[c41c6511] [96b364a4]
- 2.44s 46.28ms 0.02 groupby.groupby_period.time_groupby_sum
```
cc @sinhrks , @bmoscon
| https://api.github.com/repos/pandas-dev/pandas/pulls/14348 | 2016-10-05T02:15:35Z | 2016-10-13T20:13:34Z | null | 2023-05-11T01:14:14Z |
TST: #14345 fixes TestDatetimeIndexOps test_nat AssertionErrors on 32-bit | diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 8a86fcba32ecb..a6d58fa3e7ef3 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -867,7 +867,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
@@ -875,7 +875,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
@@ -1717,7 +1717,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
@@ -1725,7 +1725,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
@@ -2714,7 +2714,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
@@ -2722,7 +2722,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
| - [ ] closes #14345
Changed tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.int64)
np.int64 to npintp
All tests now pass on 32bit linux install.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14347 | 2016-10-05T00:01:22Z | 2016-10-06T10:23:46Z | null | 2016-10-06T10:23:46Z |
BUG: GH14323 Union of differences from DatetimeIndex incorrect | diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 5082fc84982c6..c9b7be5ead649 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1973,7 +1973,7 @@ def difference(self, other):
except TypeError:
pass
- return this._shallow_copy(the_diff, name=result_name)
+ return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
diff --git a/pandas/tests/indexes/test_datetimelike.py b/pandas/tests/indexes/test_datetimelike.py
index 7502a4ce26b04..b04e840ffc849 100644
--- a/pandas/tests/indexes/test_datetimelike.py
+++ b/pandas/tests/indexes/test_datetimelike.py
@@ -732,6 +732,31 @@ def test_fillna_datetime64(self):
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of DatetimeIndex does not preserve frequency,
+ # so a differencing operation should not retain the freq field of the
+ # original index.
+ i = pd.date_range("20160920", "20160925", freq="D")
+
+ a = pd.date_range("20160921", "20160924", freq="D")
+ expected = pd.DatetimeIndex(["20160920", "20160925"], freq=None)
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.date_range("20160922", "20160925", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.DatetimeIndex(["20160920", "20160921"], freq=None)
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_diff = a_diff.union(b_diff)
+ expected = pd.DatetimeIndex(["20160920", "20160921", "20160925"],
+ freq=None)
+ tm.assert_index_equal(union_of_diff, expected)
+ tm.assert_attr_equal('freq', union_of_diff, expected)
+
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
@@ -938,6 +963,30 @@ def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of Period MUST preserve frequency, but the ability
+ # to union results must be preserved
+ i = pd.period_range("20160920", "20160925", freq="D")
+
+ a = pd.period_range("20160921", "20160924", freq="D")
+ expected = pd.PeriodIndex(["20160920", "20160925"], freq='D')
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.period_range("20160922", "20160925", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.PeriodIndex(["20160920", "20160921"], freq='D')
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_diff = a_diff.union(b_diff)
+ expected = pd.PeriodIndex(["20160920", "20160921", "20160925"],
+ freq='D')
+ tm.assert_index_equal(union_of_diff, expected)
+ tm.assert_attr_equal('freq', union_of_diff, expected)
+
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
@@ -1149,3 +1198,28 @@ def test_fillna_timedelta(self):
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
+
+ def test_difference_of_union(self):
+ # GH14323: Test taking the union of differences of an Index.
+ # Difference of TimedeltaIndex does not preserve frequency,
+ # so a differencing operation should not retain the freq field of the
+ # original index.
+ i = pd.timedelta_range("0 days", "5 days", freq="D")
+
+ a = pd.timedelta_range("1 days", "4 days", freq="D")
+ expected = pd.TimedeltaIndex(["0 days", "5 days"], freq=None)
+ a_diff = i.difference(a)
+ tm.assert_index_equal(a_diff, expected)
+ tm.assert_attr_equal('freq', a_diff, expected)
+
+ b = pd.timedelta_range("2 days", "5 days", freq="D")
+ b_diff = i.difference(b)
+ expected = pd.TimedeltaIndex(["0 days", "1 days"], freq=None)
+ tm.assert_index_equal(b_diff, expected)
+ tm.assert_attr_equal('freq', b_diff, expected)
+
+ union_of_difference = a_diff.union(b_diff)
+ expected = pd.TimedeltaIndex(["0 days", "1 days", "5 days"],
+ freq=None)
+ tm.assert_index_equal(union_of_difference, expected)
+ tm.assert_attr_equal('freq', union_of_difference, expected)
| - [ ] closes #14323
- [ ] tests added / passed
- [ ] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
Sets freq to None when doing a difference operation on a DatetimeIndex
or TimedeltaIndex, rather than retaining the frequency (which can cause
problems with downstream operations). Frequency of PeriodIndex is
retained.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14346 | 2016-10-04T23:40:04Z | 2016-10-24T22:34:21Z | null | 2016-10-24T22:34:25Z |
DOC: Correct uniqueness of index for Series | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1c6b13885dd01..8a98f5cdf7e21 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -102,11 +102,11 @@ class Series(base.IndexOpsMixin, strings.StringAccessorMixin,
"""
One-dimensional ndarray with axis labels (including time series).
- Labels need not be unique but must be any hashable type. The object
+ Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
- missing data (currently represented as NaN)
+ missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
@@ -117,8 +117,8 @@ class Series(base.IndexOpsMixin, strings.StringAccessorMixin,
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
- Values must be unique and hashable, same length as data. Index
- object (or other iterable of same length as data) Will default to
+ Values must be hashable and have the same length as `data`.
+ Non-unique index values are allowed. Will default to
RangeIndex(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
| closes #7808
Just wanted to fix the docstring to reflect the fact that the index labels neither need to be unique ~~nor hashable~~.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14344 | 2016-10-04T05:54:18Z | 2016-11-25T10:05:30Z | 2016-11-25T10:05:30Z | 2016-11-30T02:38:51Z |
BUG: astype falsely converts inf to integer, patch for Numpy (GH14265) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index f534c67273560..8fdef39a3ae98 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -118,3 +118,5 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
+- Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`)
\ No newline at end of file
diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py
index 1c9b6119cf665..f210f70ad1940 100644
--- a/pandas/sparse/tests/test_array.py
+++ b/pandas/sparse/tests/test_array.py
@@ -361,7 +361,7 @@ def test_astype(self):
arr.astype('i8')
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
- msg = "Cannot convert NA to integer"
+ msg = 'Cannot convert non-finite values \(NA or inf\) to integer'
with tm.assertRaisesRegexp(ValueError, msg):
arr.astype('i8')
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 817770b9da610..61030c262a44b 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -353,9 +353,17 @@ def test_astype_with_view(self):
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
- def test_astype_cast_nan_int(self):
- df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]})
- self.assertRaises(ValueError, df.astype, np.int64)
+ def test_astype_cast_nan_inf_int(self):
+ # GH14265, check nan and inf raise error when converting to int
+ types = [np.int32, np.int64]
+ values = [np.nan, np.inf]
+ msg = 'Cannot convert non-finite values \(NA or inf\) to integer'
+
+ for this_type in types:
+ for this_val in values:
+ df = DataFrame([this_val])
+ with tm.assertRaisesRegexp(ValueError, msg):
+ df.astype(this_type)
def test_astype_str(self):
# GH9757
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 9a406dfa10c35..3eafbaf912797 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -42,9 +42,17 @@ def test_dtype(self):
assert_series_equal(self.ts.get_ftype_counts(), Series(
1, ['float64:dense']))
- def test_astype_cast_nan_int(self):
- df = Series([1.0, 2.0, 3.0, np.nan])
- self.assertRaises(ValueError, df.astype, np.int64)
+ def test_astype_cast_nan_inf_int(self):
+ # GH14265, check nan and inf raise error when converting to int
+ types = [np.int32, np.int64]
+ values = [np.nan, np.inf]
+ msg = 'Cannot convert non-finite values \(NA or inf\) to integer'
+
+ for this_type in types:
+ for this_val in values:
+ s = Series([this_val])
+ with self.assertRaisesRegexp(ValueError, msg):
+ s.astype(this_type)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index a79862eb195b6..d4beab5655e5c 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -527,8 +527,10 @@ def _astype_nansafe(arr, dtype, copy=True):
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
- if np.isnan(arr).any():
- raise ValueError('Cannot convert NA to integer')
+ if not np.isfinite(arr).all():
+ raise ValueError('Cannot convert non-finite values (NA or inf) to '
+ 'integer')
+
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
| - [x] closes #14265
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Bug in Numpy causes inf values to be falsely converted to integers. I added a ValueError exception similar to the exception for trying to convert NaN to an integer.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14343 | 2016-10-04T05:09:10Z | 2016-12-11T22:23:50Z | 2016-12-11T22:23:49Z | 2016-12-14T05:10:09Z |
Bug: Error when key-only Grouper is passed to groupby in a list (GH14334) | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index b2facd4e2d0ec..61d5a199acc3c 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -54,6 +54,7 @@ Bug Fixes
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
+- Bug in ``df.groupby`` where ``TypeError`` raised when ``pd.Grouper(key=...)`` is passed in a list (:issue:`14334`)
- Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns``
is not scalar and ``values`` is not specified (:issue:`14380`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 5223c0ac270f3..5e08f6c3368a6 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2208,7 +2208,10 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
index._get_grouper_for_level(self.grouper, level)
else:
- if isinstance(self.grouper, (list, tuple)):
+ if self.grouper is None and self.name is not None:
+ self.grouper = self.obj[self.name]
+
+ elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
@@ -2448,7 +2451,10 @@ def is_in_obj(gpr):
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
-
+ elif isinstance(gpr, Grouper) and gpr.key is not None:
+ # Add key to exclusions
+ exclusions.append(gpr.key)
+ in_axis, name = False, None
else:
in_axis, name = False, None
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f3791ee1d5c91..89aaafe9b2c02 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -442,6 +442,36 @@ def test_grouper_creation_bug(self):
result = g.sum()
assert_frame_equal(result, expected)
+ # GH14334
+ # pd.Grouper(key=...) may be passed in a list
+ df = DataFrame({'A': [0, 0, 0, 1, 1, 1],
+ 'B': [1, 1, 2, 2, 3, 3],
+ 'C': [1, 2, 3, 4, 5, 6]})
+ # Group by single column
+ expected = df.groupby('A').sum()
+ g = df.groupby([pd.Grouper(key='A')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group by two columns
+ # using a combination of strings and Grouper objects
+ expected = df.groupby(['A', 'B']).sum()
+
+ # Group with two Grouper objects
+ g = df.groupby([pd.Grouper(key='A'), pd.Grouper(key='B')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group with a string and a Grouper object
+ g = df.groupby(['A', pd.Grouper(key='B')])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
+ # Group with a Grouper object and a string
+ g = df.groupby([pd.Grouper(key='A'), 'B'])
+ result = g.sum()
+ assert_frame_equal(result, expected)
+
# GH8866
s = Series(np.arange(8, dtype='int64'),
index=pd.MultiIndex.from_product(
| - [x] closes #14334
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14342 | 2016-10-04T00:22:28Z | 2016-10-25T10:51:36Z | null | 2016-10-26T12:12:19Z |
BUG: Frequency not set on empty series | diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 352acee23df2d..cf604822d6eea 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -278,7 +278,7 @@ Please try to maintain backward compatibility. *pandas* has lots of users with l
Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore, it is worth getting in the habit of writing tests ahead of time so this is never an issue.
-Like many packages, *pandas* uses the [Nose testing system](http://nose.readthedocs.org/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
+Like many packages, *pandas* uses the [Nose testing system](https://nose.readthedocs.io/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
#### Writing tests
@@ -323,7 +323,7 @@ Performance matters and it is worth considering whether your code has introduced
>
> The asv benchmark suite was translated from the previous framework, vbench, so many stylistic issues are likely a result of automated transformation of the code.
-To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](http://asv.readthedocs.org/en/latest/installing.html).
+To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](https://asv.readthedocs.io/en/latest/installing.html).
To install asv:
@@ -360,7 +360,7 @@ This command is equivalent to:
This will launch every test only once, display stderr from the benchmarks, and use your local `python` that comes from your `$PATH`.
-Information on how to write a benchmark can be found in the [asv documentation](http://asv.readthedocs.org/en/latest/writing_benchmarks.html).
+Information on how to write a benchmark can be found in the [asv documentation](https://asv.readthedocs.io/en/latest/writing_benchmarks.html).
#### Running the vbench performance test suite (phasing out)
diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh
index 6f16dce2fb431..cadc356b641f9 100755
--- a/ci/prep_cython_cache.sh
+++ b/ci/prep_cython_cache.sh
@@ -3,8 +3,8 @@
ls "$HOME/.cache/"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
-pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
+pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx" -o -name "*.pxd"`
CACHE_File="$HOME/.cache/cython_files.tar"
diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh
index 4f60df0ccb2d8..5c98c3df61736 100755
--- a/ci/submit_cython_cache.sh
+++ b/ci/submit_cython_cache.sh
@@ -2,7 +2,7 @@
CACHE_File="$HOME/.cache/cython_files.tar"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
rm -rf $CACHE_File
rm -rf $PYX_CACHE_DIR
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 19318aad3d53d..e5aa6b577270a 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1794,18 +1794,18 @@ The following functions are available for one dimensional object arrays or scala
- :meth:`~pandas.to_datetime` (conversion to datetime objects)
- .. ipython:: python
+ .. ipython:: python
- import datetime
- m = ['2016-07-09', datetime.datetime(2016, 3, 2)]
- pd.to_datetime(m)
+ import datetime
+ m = ['2016-07-09', datetime.datetime(2016, 3, 2)]
+ pd.to_datetime(m)
- :meth:`~pandas.to_timedelta` (conversion to timedelta objects)
- .. ipython:: python
+ .. ipython:: python
- m = ['5us', pd.Timedelta('1day')]
- pd.to_timedelta(m)
+ m = ['5us', pd.Timedelta('1day')]
+ pd.to_timedelta(m)
To force a conversion, we can pass in an ``errors`` argument, which specifies how pandas should deal with elements
that cannot be converted to desired dtype or object. By default, ``errors='raise'``, meaning that any errors encountered
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fd3a2493a53e8..4f916c6ba5290 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -295,7 +295,7 @@
'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
- 'py': ('http://pylib.readthedocs.org/en/latest/', None)
+ 'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7f336abcaa6d7..446a40a7ec4b4 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -360,7 +360,7 @@ follow the Numpy Docstring Standard (see above), but you don't need to install
this because a local copy of numpydoc is included in the *pandas* source
code.
`nbconvert <https://nbconvert.readthedocs.io/en/latest/>`_ and
-`nbformat <http://nbformat.readthedocs.io/en/latest/>`_ are required to build
+`nbformat <https://nbformat.readthedocs.io/en/latest/>`_ are required to build
the Jupyter notebooks included in the documentation.
If you have a conda environment named ``pandas_dev``, you can install the extra
@@ -490,7 +490,7 @@ Adding tests is one of the most common requests after code is pushed to *pandas*
it is worth getting in the habit of writing tests ahead of time so this is never an issue.
Like many packages, *pandas* uses the `Nose testing system
-<http://nose.readthedocs.org/en/latest/index.html>`_ and the convenient
+<https://nose.readthedocs.io/en/latest/index.html>`_ and the convenient
extensions in `numpy.testing
<http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
@@ -569,7 +569,7 @@ supports both python2 and python3.
To use all features of asv, you will need either ``conda`` or
``virtualenv``. For more details please check the `asv installation
-webpage <http://asv.readthedocs.org/en/latest/installing.html>`_.
+webpage <https://asv.readthedocs.io/en/latest/installing.html>`_.
To install asv::
@@ -624,7 +624,7 @@ This will display stderr from the benchmarks, and use your local
``python`` that comes from your ``$PATH``.
Information on how to write a benchmark and how to use asv can be found in the
-`asv documentation <http://asv.readthedocs.org/en/latest/writing_benchmarks.html>`_.
+`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_.
.. _contributing.gbq_integration_tests:
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 38a816060e1bc..27462a08b0011 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -877,7 +877,7 @@ The :ref:`Plotting <visualization>` docs.
<http://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot>`__
`Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter
-<http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__
+<https://pandas-xlsxwriter-charts.readthedocs.io/>`__
`Boxplot for each quartile of a stratifying variable
<http://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas>`__
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 6063e3e8bce45..cc69367017aed 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -41,12 +41,6 @@ categories of functionality and methods in separate sections.
Series
------
-.. warning::
-
- In 0.13.0 ``Series`` has internally been refactored to no longer sub-class ``ndarray``
- but instead subclass ``NDFrame``, similarly to the rest of the pandas containers. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring<whatsnew_0130.refactoring>`)
-
:class:`Series` is a one-dimensional labeled array capable of holding any data
type (integers, strings, floating point numbers, Python objects, etc.). The axis
labels are collectively referred to as the **index**. The basic method to create a Series is to call:
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 17ebd1f163f4f..087b265ee83f2 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -145,7 +145,7 @@ API
`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.org/en/latest/>`_:
+``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.io/en/latest/>`_:
The following data feeds are available:
@@ -170,7 +170,7 @@ PyDatastream is a Python interface to the
SOAP API to return indexed Pandas DataFrames or Panels with financial data.
This package requires valid credentials for this API (non free).
-`pandaSDMX <http://pandasdmx.readthedocs.org>`__
+`pandaSDMX <https://pandasdmx.readthedocs.io>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pandaSDMX is an extensible library to retrieve and acquire statistical data
and metadata disseminated in
@@ -215,7 +215,7 @@ dimensional arrays, rather than the tabular data for which pandas excels.
Out-of-core
-------------
-`Dask <https://dask.readthedocs.org/en/latest/>`__
+`Dask <https://dask.readthedocs.io/en/latest/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dask is a flexible parallel computing library for analytics. Dask
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6295e6f6cbb68..73685e0be8e7e 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -189,7 +189,7 @@ pandas is equipped with an exhaustive set of unit tests covering about 97% of
the codebase as of this writing. To run it on your machine to verify that
everything is working (and you have all of the dependencies, soft and hard,
installed), make sure you have `nose
-<http://readthedocs.org/docs/nose/en/latest/>`__ and run:
+<https://nose.readthedocs.io/en/latest/>`__ and run:
::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index d436fa52918d3..811fca4344121 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1481,7 +1481,7 @@ function takes a number of arguments. Only the first is required.
- ``encoding``: a string representing the encoding to use if the contents are
non-ASCII, for python versions prior to 3
- ``line_terminator``: Character sequence denoting line end (default '\\n')
- - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL)
+ - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL). Note that if you have set a `float_format` then floats are converted to strings and csv.QUOTE_NONNUMERIC will treat them as non-numeric
- ``quotechar``: Character used to quote fields (default '"')
- ``doublequote``: Control quoting of ``quotechar`` in fields (default True)
- ``escapechar``: Character used to escape ``sep`` and ``quotechar`` when
@@ -2639,8 +2639,8 @@ config options <options>` ``io.excel.xlsx.writer`` and
``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx``
files if `Xlsxwriter`_ is not available.
-.. _XlsxWriter: http://xlsxwriter.readthedocs.org
-.. _openpyxl: http://openpyxl.readthedocs.org/
+.. _XlsxWriter: https://xlsxwriter.readthedocs.io
+.. _openpyxl: https://openpyxl.readthedocs.io/
.. _xlwt: http://www.python-excel.org
To specify which writer you want to use, you can pass an engine keyword
@@ -2775,6 +2775,7 @@ both on the writing (serialization), and reading (deserialization).
as an EXPERIMENTAL LIBRARY, the storage format may not be stable until a future release.
As a result of writing format changes and other issues:
+
+----------------------+------------------------+
| Packed with | Can be unpacked with |
+======================+========================+
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
index f3df1ebdf25cb..bde97d88a0ee7 100644
--- a/doc/source/r_interface.rst
+++ b/doc/source/r_interface.rst
@@ -17,7 +17,7 @@ rpy2 / R interface
In v0.16.0, the ``pandas.rpy`` interface has been **deprecated and will be
removed in a future version**. Similar functionality can be accessed
- through the `rpy2 <http://rpy2.readthedocs.io/>`__ project.
+ through the `rpy2 <https://rpy2.readthedocs.io/>`__ project.
See the :ref:`updating <rpy.updating>` section for a guide to port your
code from the ``pandas.rpy`` to ``rpy2`` functions.
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst
index e92798ea17448..c25e734a046b2 100644
--- a/doc/source/tutorials.rst
+++ b/doc/source/tutorials.rst
@@ -138,7 +138,7 @@ Modern Pandas
Excel charts with pandas, vincent and xlsxwriter
------------------------------------------------
-- `Using Pandas and XlsxWriter to create Excel charts <http://pandas-xlsxwriter-charts.readthedocs.org/>`_
+- `Using Pandas and XlsxWriter to create Excel charts <https://pandas-xlsxwriter-charts.readthedocs.io/>`_
Various Tutorials
-----------------
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
index 77dc249aeb788..2a1f2cc47d48e 100644
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -18,6 +18,8 @@ What's New
These are new features and improvements of note in each release.
+.. include:: whatsnew/v0.19.1.txt
+
.. include:: whatsnew/v0.19.0.txt
.. include:: whatsnew/v0.18.1.txt
diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt
index a91e0ab9e4961..181cd401c85d6 100644
--- a/doc/source/whatsnew/v0.14.0.txt
+++ b/doc/source/whatsnew/v0.14.0.txt
@@ -401,7 +401,7 @@ through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`).
All databases supported by SQLAlchemy can be used, such
as PostgreSQL, MySQL, Oracle, Microsoft SQL server (see documentation of
SQLAlchemy on `included dialects
-<http://sqlalchemy.readthedocs.org/en/latest/dialects/index.html>`_).
+<https://sqlalchemy.readthedocs.io/en/latest/dialects/index.html>`_).
The functionality of providing DBAPI connection objects will only be supported
for sqlite3 in the future. The ``'mysql'`` flavor is deprecated.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fc13224d3fe6e..9cb299593076d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -141,7 +141,7 @@ as well as the ``.sum()`` operation.
Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library.
-.. _dask: https://dask.readthedocs.org/en/latest/
+.. _dask: https://dask.readthedocs.io/en/latest/
.. _QT: https://wiki.python.org/moin/PyQt
.. _whatsnew_0170.plot:
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 60847469aa02c..8e7e95c071ea4 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1560,6 +1560,6 @@ Bug Fixes
- Bug in ``.to_string()`` when called with an integer ``line_width`` and ``index=False`` raises an UnboundLocalError exception because ``idx`` referenced before assignment.
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
-- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- ``PeriodIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
- Bug in ``Index.copy()`` where ``name`` parameter was ignored (:issue:`14302`)
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
new file mode 100644
index 0000000000000..daceb18a53457
--- /dev/null
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -0,0 +1,48 @@
+.. _whatsnew_0191:
+
+v0.19.1 (????, 2016)
+---------------------
+
+This is a minor bug-fix release from 0.19.0 and includes a large number of
+bug fixes along with several new features, enhancements, and performance improvements.
+We recommend that all users upgrade to this version.
+
+Highlights include:
+
+
+.. contents:: What's new in v0.19.1
+ :local:
+ :backlinks: none
+
+
+.. _whatsnew_0191.performance:
+
+Performance Improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+
+
+
+
+.. _whatsnew_0191.bug_fixes:
+
+Bug Fixes
+~~~~~~~~~
+
+
+
+
+- Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`)
+
+
+
+
+
+
+
+
+- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
+- Bug in ``asfreq``, where frequency wasn't set for empty Series (:issue:`14320`)
\ No newline at end of file
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6fb0090dea114..1798a35168265 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1345,7 +1345,9 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
- defaults to csv.QUOTE_MINIMAL
+ defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
+ then floats are comverted to strings and thus csv.QUOTE_NONNUMERIC
+ will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 1ab5dbb737739..0c465da24a17e 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -116,12 +116,27 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
return result
- def _verify_integrity(self):
- """Raises ValueError if length of levels and labels don't match or any
- label would exceed level bounds"""
+ def _verify_integrity(self, labels=None, levels=None):
+ """
+
+ Parameters
+ ----------
+ labels : optional list
+ Labels to check for validity. Defaults to current labels.
+ levels : optional list
+ Levels to check for validity. Defaults to current levels.
+
+ Raises
+ ------
+ ValueError
+ * if length of levels and labels don't match or any label would
+ exceed level bounds
+ """
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
- labels, levels = self.labels, self.levels
+ labels = labels or self.labels
+ levels = levels or self.levels
+
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
@@ -162,6 +177,9 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
+ if verify_integrity:
+ self._verify_integrity(levels=new_levels)
+
names = self.names
self._levels = new_levels
if any(names):
@@ -170,9 +188,6 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
@@ -268,13 +283,13 @@ def _set_labels(self, labels, level=None, copy=False, validate=True,
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
+ if verify_integrity:
+ self._verify_integrity(labels=new_labels)
+
self._labels = new_labels
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index e5aaba26135e7..b7cd8a1c01224 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -4,21 +4,21 @@
from datetime import datetime
-from numpy import nan
import numpy as np
+from numpy import nan
-from pandas.compat import lrange
-from pandas import DataFrame, Series, Index, Timestamp
import pandas as pd
-from pandas.util.testing import (assert_series_equal,
- assert_frame_equal,
- assertRaisesRegexp)
-
-import pandas.util.testing as tm
+from pandas import DataFrame, Index, Series, Timestamp
+from pandas.compat import lrange
from pandas.tests.frame.common import TestData
+import pandas.util.testing as tm
+from pandas.util.testing import (assertRaisesRegexp,
+ assert_frame_equal,
+ assert_series_equal)
+
class TestDataFrameConcatCommon(tm.TestCase, TestData):
@@ -324,6 +324,29 @@ def test_join_multiindex_leftright(self):
assert_frame_equal(df2.join(df1, how='left'),
exp[['value2', 'value1']])
+ def test_concat_named_keys(self):
+ # GH 14252
+ df = pd.DataFrame({'foo': [1, 2], 'bar': [0.1, 0.2]})
+ index = Index(['a', 'b'], name='baz')
+ concatted_named_from_keys = pd.concat([df, df], keys=index)
+ expected_named = pd.DataFrame(
+ {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
+ names=['baz', None]))
+ assert_frame_equal(concatted_named_from_keys, expected_named)
+
+ index_no_name = Index(['a', 'b'], name=None)
+ concatted_named_from_names = pd.concat(
+ [df, df], keys=index_no_name, names=['baz'])
+ assert_frame_equal(concatted_named_from_names, expected_named)
+
+ concatted_unnamed = pd.concat([df, df], keys=index_no_name)
+ expected_unnamed = pd.DataFrame(
+ {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
+ names=[None, None]))
+ assert_frame_equal(concatted_unnamed, expected_unnamed)
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 9758c2b9c805e..55fd169e26eb7 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -323,6 +323,13 @@ def test_asfreq_datetimeindex(self):
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
+ def test_asfreq_datetimeindex_empty_series(self):
+ # GH 14340
+ empty = Series(index=pd.DatetimeIndex([])).asfreq('H')
+ normal = Series(index=pd.DatetimeIndex(["2016-09-29 11:00"]),
+ data=[3]).asfreq('H')
+ self.assertEqual(empty.index.freq, normal.index.freq)
+
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index cd9ce0102ca1e..fdc5a2eaec812 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -149,14 +149,14 @@ def test_set_levels(self):
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
- def assert_matching(actual, expected):
+ def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
- exp = np.asarray(exp, dtype=np.object_)
- tm.assert_numpy_array_equal(act, exp)
+ exp = np.asarray(exp)
+ tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
@@ -204,6 +204,31 @@ def assert_matching(actual, expected):
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
+ # illegal level changing should not change levels
+ # GH 13754
+ original_index = self.index.copy()
+ for inplace in [True, False]:
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_levels(['c'], level=0, inplace=inplace)
+ assert_matching(self.index.levels, original_index.levels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
+ inplace=inplace)
+ assert_matching(self.index.labels, original_index.labels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(TypeError, "^Levels"):
+ self.index.set_levels('c', level=0, inplace=inplace)
+ assert_matching(self.index.levels, original_index.levels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(TypeError, "^Labels"):
+ self.index.set_labels(1, level=0, inplace=inplace)
+ assert_matching(self.index.labels, original_index.labels,
+ check_dtype=True)
+
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 8cdde8d92b28f..a8c43195f5552 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1369,7 +1369,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
- keys = clean_keys
+ name = getattr(keys, 'name', None)
+ keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
@@ -1454,7 +1455,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
self.axis = axis
self.join_axes = join_axes
self.keys = keys
- self.names = names
+ self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index f1a209053445a..7ce97f94ea5ef 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -1344,7 +1344,10 @@ def asfreq(obj, freq, method=None, how=None, normalize=False):
return new_obj
else:
if len(obj.index) == 0:
- return obj.copy()
+ new_index = obj.index._shallow_copy(freq=to_offset(freq))
+ new_obj = obj.copy()
+ new_obj.index = new_index
+ return new_obj
dti = date_range(obj.index[0], obj.index[-1], freq=freq)
dti.name = obj.index.name
rs = obj.reindex(dti, method=method)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 8a86fcba32ecb..a6d58fa3e7ef3 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -867,7 +867,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
@@ -875,7 +875,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
@@ -1717,7 +1717,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
@@ -1725,7 +1725,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
@@ -2714,7 +2714,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([], dtype=np.int64))
+ np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
@@ -2722,7 +2722,7 @@ def test_nat(self):
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
- np.array([1], dtype=np.int64))
+ np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index a85a606075911..c7e4f03fcd792 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -17,7 +17,8 @@
from pytz import NonExistentTimeError
import pandas.util.testing as tm
-from pandas.util.testing import assert_frame_equal, set_timezone
+from pandas.util.testing import (assert_frame_equal, assert_series_equal,
+ set_timezone)
from pandas.compat import lrange, zip
try:
@@ -535,6 +536,44 @@ def test_ambiguous_nat(self):
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
self.assert_numpy_array_equal(di_test.values, localized.values)
+ def test_ambiguous_bool(self):
+ # make sure that we are correctly accepting bool values as ambiguous
+
+ # gh-14402
+ t = Timestamp('2015-11-01 01:00:03')
+ expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
+ expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
+
+ def f():
+ t.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = t.tz_localize('US/Central', ambiguous=True)
+ self.assertEqual(result, expected0)
+
+ result = t.tz_localize('US/Central', ambiguous=False)
+ self.assertEqual(result, expected1)
+
+ s = Series([t])
+ expected0 = Series([expected0])
+ expected1 = Series([expected1])
+
+ def f():
+ s.dt.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=True)
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[True])
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=False)
+ assert_series_equal(result, expected1)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[False])
+ assert_series_equal(result, expected1)
+
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
@@ -629,14 +668,14 @@ def test_localized_at_time_between_time(self):
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
def test_string_index_alias_tz_aware(self):
@@ -723,7 +762,7 @@ def test_frame_no_datetime64_dtype(self):
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
@@ -1324,7 +1363,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
@@ -1336,7 +1375,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
utc = rng1.tz
self.assertEqual(utc, ts_result.index.tz)
@@ -1352,7 +1391,7 @@ def test_append_aware(self):
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
@@ -1368,7 +1407,7 @@ def test_append_dst(self):
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
- tm.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
def test_append_aware_naive(self):
@@ -1429,7 +1468,7 @@ def test_arith_utc_convert(self):
expected = uts1 + uts2
self.assertEqual(result.index.tz, pytz.UTC)
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 9073ad0abd535..bab45595cd60f 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4155,6 +4155,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
"""
cdef:
ndarray[int64_t] trans, deltas, idx_shifted
+ ndarray ambiguous_array
Py_ssize_t i, idx, pos, ntrans, n = len(vals)
int64_t *tdata
int64_t v, left, right
@@ -4190,11 +4191,18 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
infer_dst = True
elif ambiguous == 'NaT':
fill = True
+ elif isinstance(ambiguous, bool):
+ is_dst = True
+ if ambiguous:
+ ambiguous_array = np.ones(len(vals), dtype=bool)
+ else:
+ ambiguous_array = np.zeros(len(vals), dtype=bool)
elif hasattr(ambiguous, '__iter__'):
is_dst = True
if len(ambiguous) != len(vals):
raise ValueError(
"Length of ambiguous bool-array must be the same size as vals")
+ ambiguous_array = np.asarray(ambiguous)
trans, deltas, typ = _get_dst_info(tz)
@@ -4286,7 +4294,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
if infer_dst and dst_hours[i] != NPY_NAT:
result[i] = dst_hours[i]
elif is_dst:
- if ambiguous[i]:
+ if ambiguous_array[i]:
result[i] = left
else:
result[i] = right
| - [x] closes #14320
- [x] tests added / passed
- [ ] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14340 | 2016-10-03T22:57:17Z | 2016-10-19T23:26:55Z | null | 2023-05-11T01:14:14Z |
Bug: Grouping by index and column fails on DataFrame with single index (GH14327) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0354a8046e873..a556d8707a21d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -79,3 +79,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index (:issue`14327`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 3c376e3188eac..5223c0ac270f3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2201,36 +2201,12 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
- inds = index.labels[level]
- level_index = index.levels[level]
-
if self.name is None:
self.name = index.names[level]
- # XXX complete hack
-
- if grouper is not None:
- level_values = index.levels[level].take(inds)
- self.grouper = level_values.map(self.grouper)
- else:
- # all levels may not be observed
- labels, uniques = algos.factorize(inds, sort=True)
-
- if len(uniques) > 0 and uniques[0] == -1:
- # handle NAs
- mask = inds != -1
- ok_labels, uniques = algos.factorize(inds[mask], sort=True)
-
- labels = np.empty(len(inds), dtype=inds.dtype)
- labels[mask] = ok_labels
- labels[~mask] = -1
-
- if len(uniques) < len(level_index):
- level_index = level_index.take(uniques)
+ self.grouper, self._labels, self._group_index = \
+ index._get_grouper_for_level(self.grouper, level)
- self._labels = labels
- self._group_index = level_index
- self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 557b9b2b17e95..aea50d490a9c9 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -432,6 +432,35 @@ def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
+ def _get_grouper_for_level(self, group_mapper, level):
+ """
+ Get index grouper corresponding to an index level
+
+ Parameters
+ ----------
+ group_mapper: Group mapping function or None
+ Function mapping index values to groups
+ level : int
+ Index level (Only used by MultiIndex override)
+
+ Returns
+ -------
+ grouper : Index
+ Index of values to group on
+ labels : None
+ Array of locations in level_index
+ (Only returned by MultiIndex override)
+ level_index : None
+ Index of unique values for level
+ (Only returned by MultiIndex override)
+ """
+ if group_mapper is None:
+ grouper = self
+ else:
+ grouper = self.map(group_mapper)
+
+ return grouper, None, None
+
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 1ab5dbb737739..d43034b0ebdd6 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -524,6 +524,55 @@ def _format_native_types(self, na_rep='nan', **kwargs):
return mi.values
+ def _get_grouper_for_level(self, group_mapper, level):
+ """
+ Get index grouper corresponding to an index level
+
+ Parameters
+ ----------
+ group_mapper: Group mapping function or None
+ Function mapping index values to groups
+ level : int
+ Index level
+
+ Returns
+ -------
+ grouper : Index
+ Index of values to group on
+ labels : ndarray of int or None
+ Array of locations in level_index
+ level_index : Index or None
+ Index of unique values for level
+ """
+ inds = self.labels[level]
+ level_index = self.levels[level]
+
+ if group_mapper is not None:
+ # Handle group mapping function and return
+ level_values = self.levels[level].take(inds)
+ grouper = level_values.map(group_mapper)
+ return grouper, None, None
+
+ labels, uniques = algos.factorize(inds, sort=True)
+
+ if len(uniques) > 0 and uniques[0] == -1:
+ # Handle NAs
+ mask = inds != -1
+ ok_labels, uniques = algos.factorize(inds[mask],
+ sort=True)
+
+ labels = np.empty(len(inds), dtype=inds.dtype)
+ labels[mask] = ok_labels
+ labels[~mask] = -1
+
+ if len(uniques) < len(level_index):
+ # Remove unobserved levels from level_index
+ level_index = level_index.take(uniques)
+
+ grouper = level_index.take(labels)
+
+ return grouper, labels, level_index
+
@property
def _constructor(self):
return MultiIndex.from_tuples
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 01c1d48c6d5c0..e787af5b7c322 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -458,6 +458,39 @@ def test_grouper_creation_bug(self):
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
+ def test_grouper_column_and_index(self):
+ # GH 14327
+
+ # Grouping a multi-index frame by a column and an index level should
+ # be equivalent to resetting the index and grouping by two columns
+ idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 3),
+ ('b', 1), ('b', 2), ('b', 3)])
+ idx.names = ['outer', 'inner']
+ df_multi = pd.DataFrame({"A": np.arange(6),
+ 'B': ['one', 'one', 'two',
+ 'two', 'one', 'one']},
+ index=idx)
+ result = df_multi.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_multi.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_multi.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_multi.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
+ # Grouping a single-index frame by a column and the index should
+ # be equivalent to resetting the index and grouping by two columns
+ df_single = df_multi.reset_index('outer')
+ result = df_single.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_single.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_single.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_single.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
def test_grouper_getting_correct_binner(self):
# GH 10063
| - [x] closes #14327
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Don't know if this is too late for 0.19.0 but I went ahead and added the whatsnew entry there for now.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14333 | 2016-10-02T01:31:15Z | 2016-10-14T23:35:59Z | null | 2016-10-14T23:41:45Z |
DOC: fix some sphinx build issues | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index b1795cb37200c..f52f72b49dd31 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -757,12 +757,18 @@ Use ``.astype`` or ``union_categoricals`` to get ``category`` result.
Following table summarizes the results of ``Categoricals`` related concatenations.
-| arg1 | arg2 | result |
-|---------|-------------------------------------------|---------|
-| category | category (identical categories) | category |
-| category | category (different categories, both not ordered) | object (dtype is inferred) |
++----------+--------------------------------------------------------+----------------------------+
+| arg1 | arg2 | result |
++==========+========================================================+============================+
+| category | category (identical categories) | category |
++----------+--------------------------------------------------------+----------------------------+
+| category | category (different categories, both not ordered) | object (dtype is inferred) |
++----------+--------------------------------------------------------+----------------------------+
| category | category (different categories, either one is ordered) | object (dtype is inferred) |
-| category | not category | object (dtype is inferred) |
++----------+--------------------------------------------------------+----------------------------+
+| category | not category | object (dtype is inferred) |
++----------+--------------------------------------------------------+----------------------------+
+
Getting Data In/Out
-------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4aa1ac4a47090..697438df87d4f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3999,7 +3999,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False):
converted : type of caller
To learn more about the frequency strings, please see `this link
-<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
+ <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
from pandas.tseries.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 557b9b2b17e95..5082fc84982c6 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1994,7 +1994,7 @@ def symmetric_difference(self, other, result_name=None):
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
- dropped.
+ dropped.
Examples
--------
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index ff6c0b85a1e5c..f68750e242f1f 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -196,6 +196,9 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
name : object
Name to be stored in the index
+ Notes
+ -----
+
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index f1e199adeebfc..c1b0936edaff9 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -112,6 +112,9 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
name : object
Name to be stored in the index
+ Notes
+ -----
+
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
| The table here: http://pandas-docs.github.io/pandas-docs-travis/categorical.html#concatenation is apparently not building well. Rst ...
| https://api.github.com/repos/pandas-dev/pandas/pulls/14332 | 2016-10-01T21:33:08Z | 2016-10-02T08:58:10Z | 2016-10-02T08:58:10Z | 2016-10-02T08:58:11Z |
TST: fix period tests for numpy 1.9.3 (GH14183) | diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 62cfcf7f1360e..e314081eac373 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -23,7 +23,8 @@
from pandas.compat.numpy import np_datetime64_compat
from pandas import (Series, DataFrame,
- _np_version_under1p9, _np_version_under1p12)
+ _np_version_under1p9, _np_version_under1p10,
+ _np_version_under1p12)
from pandas import tslib
import pandas.util.testing as tm
@@ -4177,7 +4178,7 @@ def test_pi_ops_errors(self):
with tm.assertRaises(TypeError):
np.add(obj, ng)
- if _np_version_under1p9:
+ if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
@@ -4186,7 +4187,7 @@ def test_pi_ops_errors(self):
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
- if _np_version_under1p9:
+ if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
@@ -4293,7 +4294,7 @@ def test_pi_sub_period(self):
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
- if _np_version_under1p9:
+ if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
| Partly addresses #14183
| https://api.github.com/repos/pandas-dev/pandas/pulls/14331 | 2016-10-01T20:27:12Z | 2016-10-02T08:45:53Z | 2016-10-02T08:45:53Z | 2016-10-02T08:45:54Z |
BUG: mixed freq timeseries plotting with shared axes (GH13341) | diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt
index 49c8330490ed1..52bcc7d054629 100644
--- a/doc/source/whatsnew/v0.19.2.txt
+++ b/doc/source/whatsnew/v0.19.2.txt
@@ -38,7 +38,8 @@ Bug Fixes
- Bug in ``pd.cut`` with negative values and a single bin (:issue:`14652`)
- Bug in ``pd.to_numeric`` where a 0 was not unsigned on a ``downcast='unsigned'`` argument (:issue:`14401`)
-
+- Bug in plotting regular and irregular timeseries using shared axes
+ (``sharex=True`` or ``ax.twinx()``) (:issue:`13341`, :issue:`14322`).
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 0f7bc02e24915..f07aadba175f2 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -778,6 +778,41 @@ def test_mixed_freq_irreg_period(self):
irreg.plot()
ps.plot()
+ def test_mixed_freq_shared_ax(self):
+
+ # GH13341, using sharex=True
+ idx1 = date_range('2015-01-01', periods=3, freq='M')
+ idx2 = idx1[:1].union(idx1[2:])
+ s1 = Series(range(len(idx1)), idx1)
+ s2 = Series(range(len(idx2)), idx2)
+
+ fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
+ s1.plot(ax=ax1)
+ s2.plot(ax=ax2)
+
+ self.assertEqual(ax1.freq, 'M')
+ self.assertEqual(ax2.freq, 'M')
+ self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ ax2.lines[0].get_xydata()[0, 0])
+
+ # using twinx
+ fig, ax1 = self.plt.subplots()
+ ax2 = ax1.twinx()
+ s1.plot(ax=ax1)
+ s2.plot(ax=ax2)
+
+ self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ ax2.lines[0].get_xydata()[0, 0])
+
+ # TODO (GH14330, GH14322)
+ # plotting the irregular first does not yet work
+ # fig, ax1 = plt.subplots()
+ # ax2 = ax1.twinx()
+ # s2.plot(ax=ax1)
+ # s1.plot(ax=ax2)
+ # self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
+ # ax2.lines[0].get_xydata()[0, 0])
+
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py
index fe64af67af0ed..89aecf2acc07e 100644
--- a/pandas/tseries/plotting.py
+++ b/pandas/tseries/plotting.py
@@ -162,18 +162,37 @@ def _decorate_axes(ax, freq, kwargs):
ax.date_axis_info = None
-def _get_freq(ax, series):
- # get frequency from data
- freq = getattr(series.index, 'freq', None)
- if freq is None:
- freq = getattr(series.index, 'inferred_freq', None)
-
+def _get_ax_freq(ax):
+ """
+ Get the freq attribute of the ax object if set.
+ Also checks shared axes (eg when using secondary yaxis, sharex=True
+ or twinx)
+ """
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
+ # check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
+ if ax_freq is None:
+ # check if a shared ax (sharex/twinx) has already freq set
+ shared_axes = ax.get_shared_x_axes().get_siblings(ax)
+ if len(shared_axes) > 1:
+ for shared_ax in shared_axes:
+ ax_freq = getattr(shared_ax, 'freq', None)
+ if ax_freq is not None:
+ break
+ return ax_freq
+
+
+def _get_freq(ax, series):
+ # get frequency from data
+ freq = getattr(series.index, 'freq', None)
+ if freq is None:
+ freq = getattr(series.index, 'inferred_freq', None)
+
+ ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
@@ -191,7 +210,7 @@ def _get_freq(ax, series):
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
- ax_freq = getattr(ax, 'freq', None)
+ ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
@@ -244,7 +263,7 @@ def _maybe_convert_index(ax, data):
freq = freq.rule_code
if freq is None:
- freq = getattr(ax, 'freq', None)
+ freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
| Closes #13341, partly closes #14322 (that example still does not work when first plotting the irregular series)
cc @sinhrks @TomAugspurger
| https://api.github.com/repos/pandas-dev/pandas/pulls/14330 | 2016-10-01T20:05:31Z | 2016-11-26T09:13:05Z | 2016-11-26T09:13:05Z | 2016-11-26T09:13:05Z |
to_latex encoding issue | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1cc689528caaa..6fb0090dea114 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1635,7 +1635,8 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
- Default encoding is ascii in Python 2 and utf-8 in Python 3
+ A string representing the encoding to use in the output file,
+ defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
diff --git a/pandas/formats/format.py b/pandas/formats/format.py
index e5089983ac8f7..7706666142a64 100644
--- a/pandas/formats/format.py
+++ b/pandas/formats/format.py
@@ -654,6 +654,9 @@ def to_latex(self, column_format=None, longtable=False, encoding=None):
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable)
+ if encoding is None:
+ encoding = 'ascii' if compat.PY2 else 'utf-8'
+
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index 58e9b30e7f624..3bbfd621d2342 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -2823,7 +2823,7 @@ def test_to_latex_filename(self):
if compat.PY3: # python3: pandas default encoding is utf-8
with tm.ensure_clean('test.tex') as path:
df.to_latex(path)
- with codecs.open(path, 'r') as f:
+ with codecs.open(path, 'r', encoding='utf-8') as f:
self.assertEqual(df.to_latex(), f.read())
else:
# python2 default encoding is ascii, so an error should be raised
| - [x] closes #14275
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14329 | 2016-10-01T17:34:21Z | 2016-10-02T11:56:59Z | 2016-10-02T11:56:58Z | 2016-10-02T11:57:09Z |
Remove NotImplementedError for parse_dates keyword in read_excel | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d436fa52918d3..11a699b6a183d 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2503,6 +2503,20 @@ indices to be parsed.
read_excel('path_to_file.xls', 'Sheet1', parse_cols=[0, 2, 3])
+
+Parsing Dates
++++++++++++++
+
+Datetime-like values are normally automatically converted to the appropriate
+dtype when reading the excel file. But if you have a column of strings that
+*look* like dates (but are not actually formatted as dates in excel), you can
+use the `parse_dates` keyword to parse those strings to datetimes:
+
+.. code-block:: python
+
+ read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings'])
+
+
Cell Converters
+++++++++++++++
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index f4110cba68c31..276757f5e7d78 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -516,6 +516,7 @@ Other enhancements
- The ``pd.read_json`` and ``DataFrame.to_json`` has gained support for reading and writing json lines with ``lines`` option see :ref:`Line delimited json <io.jsonl>` (:issue:`9180`)
- :func:``read_excel`` now supports the true_values and false_values keyword arguments (:issue:`13347`)
- ``groupby()`` will now accept a scalar and a single-element list for specifying ``level`` on a non-``MultiIndex`` grouper. (:issue:`13907`)
+- Re-enable the ``parse_dates`` keyword of ``read_excel`` to parse string columns as dates (:issue:`14326`)
.. _whatsnew_0190.api:
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 6662d106ad85d..ac1d0fce4b51a 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -335,13 +335,10 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, names=None,
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
- if parse_dates:
- raise NotImplementedError("parse_dates keyword of read_excel "
- "is not implemented")
- if date_parser is not None:
- raise NotImplementedError("date_parser keyword of read_excel "
- "is not implemented")
+ if parse_dates is True and not index_col:
+ warn("The 'parse_dates=True' keyword of read_excel was provided"
+ " without an 'index_col' keyword value.")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index d163b05aa01d4..611b1abe57d31 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -887,17 +887,27 @@ def test_read_excel_chunksize(self):
chunksize=100)
def test_read_excel_parse_dates(self):
- # GH 11544
- with tm.assertRaises(NotImplementedError):
- pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
- parse_dates=True)
+ # GH 11544, 12051
- def test_read_excel_date_parser(self):
- # GH 11544
- with tm.assertRaises(NotImplementedError):
- dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
- pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
- date_parser=dateparse)
+ df = DataFrame(
+ {'col': [1, 2, 3],
+ 'date_strings': pd.date_range('2012-01-01', periods=3)})
+ df2 = df.copy()
+ df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y')
+
+ with ensure_clean(self.ext) as pth:
+ df2.to_excel(pth)
+
+ res = read_excel(pth)
+ tm.assert_frame_equal(df2, res)
+
+ res = read_excel(pth, parse_dates=['date_strings'])
+ tm.assert_frame_equal(df, res)
+
+ dateparser = lambda x: pd.datetime.strptime(x, '%m/%d/%Y')
+ res = read_excel(pth, parse_dates=['date_strings'],
+ date_parser=dateparser)
+ tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self):
# GH 4903
@@ -1339,8 +1349,7 @@ def test_to_excel_multiindex(self):
# round trip
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
- df = read_excel(reader, 'test1', index_col=[0, 1],
- parse_dates=False)
+ df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
@@ -1381,8 +1390,7 @@ def test_to_excel_multiindex_cols(self):
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
df = read_excel(reader, 'test1', header=header,
- index_col=[0, 1],
- parse_dates=False)
+ index_col=[0, 1])
if not self.merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
| Rebase and update of PR https://github.com/pydata/pandas/pull/12051
| https://api.github.com/repos/pandas-dev/pandas/pulls/14326 | 2016-09-30T09:18:07Z | 2017-03-27T18:42:00Z | null | 2022-06-15T22:23:44Z |
BUG: Patch Checked Add Method | diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 352acee23df2d..cf604822d6eea 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -278,7 +278,7 @@ Please try to maintain backward compatibility. *pandas* has lots of users with l
Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore, it is worth getting in the habit of writing tests ahead of time so this is never an issue.
-Like many packages, *pandas* uses the [Nose testing system](http://nose.readthedocs.org/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
+Like many packages, *pandas* uses the [Nose testing system](https://nose.readthedocs.io/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
#### Writing tests
@@ -323,7 +323,7 @@ Performance matters and it is worth considering whether your code has introduced
>
> The asv benchmark suite was translated from the previous framework, vbench, so many stylistic issues are likely a result of automated transformation of the code.
-To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](http://asv.readthedocs.org/en/latest/installing.html).
+To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](https://asv.readthedocs.io/en/latest/installing.html).
To install asv:
@@ -360,7 +360,7 @@ This command is equivalent to:
This will launch every test only once, display stderr from the benchmarks, and use your local `python` that comes from your `$PATH`.
-Information on how to write a benchmark can be found in the [asv documentation](http://asv.readthedocs.org/en/latest/writing_benchmarks.html).
+Information on how to write a benchmark can be found in the [asv documentation](https://asv.readthedocs.io/en/latest/writing_benchmarks.html).
#### Running the vbench performance test suite (phasing out)
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 6eac7b4831f0f..9807639143ddb 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -15,6 +15,14 @@ def setup(self):
self.int = pd.Int64Index(np.arange(N).repeat(5))
self.float = pd.Float64Index(np.random.randn(N).repeat(5))
+ # Convenience naming.
+ self.checked_add = pd.core.nanops._checked_add_with_arr
+
+ self.arr = np.arange(1000000)
+ self.arrpos = np.arange(1000000)
+ self.arrneg = np.arange(-1000000, 0)
+ self.arrmixed = np.array([1, -1]).repeat(500000)
+
def time_int_factorize(self):
self.int.factorize()
@@ -29,3 +37,21 @@ def time_int_duplicated(self):
def time_float_duplicated(self):
self.float.duplicated()
+
+ def time_add_overflow_pos_scalar(self):
+ self.checked_add(self.arr, 1)
+
+ def time_add_overflow_neg_scalar(self):
+ self.checked_add(self.arr, -1)
+
+ def time_add_overflow_zero_scalar(self):
+ self.checked_add(self.arr, 0)
+
+ def time_add_overflow_pos_arr(self):
+ self.checked_add(self.arr, self.arrpos)
+
+ def time_add_overflow_neg_arr(self):
+ self.checked_add(self.arr, self.arrneg)
+
+ def time_add_overflow_mixed_arr(self):
+ self.checked_add(self.arr, self.arrmixed)
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index 2b10cb88a3134..de9aa18937985 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -20,4 +20,4 @@ def setup(self):
self.cur_index = self.df.index
def time_setattr_dataframe_index(self):
- self.df.index = self.cur_index
\ No newline at end of file
+ self.df.index = self.cur_index
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 265ffbc7261ca..f68cf9399c546 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -49,4 +49,4 @@ def setup(self):
self.s = Series(([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * 1000))
def time_index_from_series_ctor(self):
- Index(self.s)
\ No newline at end of file
+ Index(self.s)
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 85f3c1628bd8b..6f40611e68531 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -1703,4 +1703,4 @@ def setup(self):
self.dict_list = [dict(zip(self.columns, row)) for row in self.frame.values]
def time_series_ctor_from_dict(self):
- Series(self.some_dict)
\ No newline at end of file
+ Series(self.some_dict)
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index e12b00dd06b39..5f3671012e6d5 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -548,6 +548,32 @@ def time_groupby_sum(self):
self.df.groupby(['a'])['b'].sum()
+class groupby_period(object):
+ # GH 14338
+ goal_time = 0.2
+
+ def make_grouper(self, N):
+ return pd.period_range('1900-01-01', freq='D', periods=N)
+
+ def setup(self):
+ N = 10000
+ self.grouper = self.make_grouper(N)
+ self.df = pd.DataFrame(np.random.randn(N, 2))
+
+ def time_groupby_sum(self):
+ self.df.groupby(self.grouper).sum()
+
+
+class groupby_datetime(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N)
+
+
+class groupby_datetimetz(groupby_period):
+ def make_grouper(self, N):
+ return pd.date_range('1900-01-01', freq='D', periods=N,
+ tz='US/Central')
+
#----------------------------------------------------------------------
# Series.value_counts
diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py
index 7638cc2a0f8df..659fc4941da54 100644
--- a/asv_bench/benchmarks/hdfstore_bench.py
+++ b/asv_bench/benchmarks/hdfstore_bench.py
@@ -348,4 +348,4 @@ def remove(self, f):
try:
os.remove(self.f)
except:
- pass
\ No newline at end of file
+ pass
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index a0a1b560d36f3..2c94f9b2b1e8c 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -344,4 +344,4 @@ def setup(self):
self.mi = MultiIndex.from_product([self.level1, self.level2])
def time_multiindex_with_datetime_level_sliced(self):
- self.mi[:10].values
\ No newline at end of file
+ self.mi[:10].values
diff --git a/asv_bench/benchmarks/io_sql.py b/asv_bench/benchmarks/io_sql.py
index 9a6b21f9e067a..c583ac1768c90 100644
--- a/asv_bench/benchmarks/io_sql.py
+++ b/asv_bench/benchmarks/io_sql.py
@@ -212,4 +212,4 @@ def setup(self):
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_sql_write_sqlalchemy(self):
- self.df.to_sql('test1', self.engine, if_exists='replace')
\ No newline at end of file
+ self.df.to_sql('test1', self.engine, if_exists='replace')
diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py
index 3f80c4c0c6338..5419571c75b43 100644
--- a/asv_bench/benchmarks/packers.py
+++ b/asv_bench/benchmarks/packers.py
@@ -547,6 +547,31 @@ def remove(self, f):
pass
+class packers_write_json_lines(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.f = '__test__.msg'
+ self.N = 100000
+ self.C = 5
+ self.index = date_range('20000101', periods=self.N, freq='H')
+ self.df = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
+ self.remove(self.f)
+ self.df.index = np.arange(self.N)
+
+ def time_packers_write_json_lines(self):
+ self.df.to_json(self.f, orient="records", lines=True)
+
+ def teardown(self):
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
+
class packers_write_json_T(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index 0b0e73847aa96..4f6fd4a5a2df8 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -61,4 +61,4 @@ def setup(self):
self.data_frames[x] = self.df
def time_panel_from_dict_two_different_indexes(self):
- Panel.from_dict(self.data_frames)
\ No newline at end of file
+ Panel.from_dict(self.data_frames)
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index 90118eaf6e407..0bd572db2211a 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -53,4 +53,4 @@ def setup(self):
self.panel = Panel(np.random.randn(100, len(self.index), 1000))
def time_panel_shift_minor(self):
- self.panel.shift(1, axis='minor')
\ No newline at end of file
+ self.panel.shift(1, axis='minor')
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index e9f33ebfce0bd..869ddd8d6fa49 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -45,4 +45,4 @@ def setup(self):
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
- self.ts.replace(np.nan, 0.0, inplace=True)
\ No newline at end of file
+ self.ts.replace(np.nan, 0.0, inplace=True)
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 604fa5092a231..ab235e085986c 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -73,4 +73,4 @@ def setup(self):
break
def time_unstack_sparse_keyspace(self):
- self.idf.unstack()
\ No newline at end of file
+ self.idf.unstack()
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index daf5135e64c40..12fbb2478c2a5 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -258,4 +258,4 @@ def time_rolling_skew(self):
rolling_skew(self.arr, self.win)
def time_rolling_kurt(self):
- rolling_kurt(self.arr, self.win)
\ No newline at end of file
+ rolling_kurt(self.arr, self.win)
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index e4f91b1b9c0c6..d64606214ca6a 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -390,4 +390,4 @@ def time_strings_upper(self):
self.many.str.upper()
def make_series(self, letters, strlen, size):
- return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
\ No newline at end of file
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py
index 9719fd87dfb2e..8470525dd01fa 100644
--- a/asv_bench/benchmarks/timedelta.py
+++ b/asv_bench/benchmarks/timedelta.py
@@ -1,5 +1,5 @@
from .pandas_vb_common import *
-from pandas import to_timedelta
+from pandas import to_timedelta, Timestamp
class timedelta_convert_int(object):
@@ -47,3 +47,14 @@ def time_timedelta_convert_coerce(self):
def time_timedelta_convert_ignore(self):
to_timedelta(self.arr, errors='ignore')
+
+
+class timedelta_add_overflow(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.td = to_timedelta(np.arange(1000000))
+ self.ts = Timestamp('2000')
+
+ def test_add_td_ts(self):
+ self.td + self.ts
diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh
index 6f16dce2fb431..cadc356b641f9 100755
--- a/ci/prep_cython_cache.sh
+++ b/ci/prep_cython_cache.sh
@@ -3,8 +3,8 @@
ls "$HOME/.cache/"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
-pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
+pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx" -o -name "*.pxd"`
CACHE_File="$HOME/.cache/cython_files.tar"
diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh
index 4f60df0ccb2d8..5c98c3df61736 100755
--- a/ci/submit_cython_cache.sh
+++ b/ci/submit_cython_cache.sh
@@ -2,7 +2,7 @@
CACHE_File="$HOME/.cache/cython_files.tar"
PYX_CACHE_DIR="$HOME/.cache/pyxfiles"
-pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx"`
+pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd"`
rm -rf $CACHE_File
rm -rf $PYX_CACHE_DIR
diff --git a/doc/README.rst b/doc/README.rst
index a93ad32a4c8f8..a3733846d9ed1 100644
--- a/doc/README.rst
+++ b/doc/README.rst
@@ -155,9 +155,9 @@ Where to start?
---------------
There are a number of issues listed under `Docs
-<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Good as first PR
-<https://github.com/pydata/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
where you could start out.
Or maybe you have an idea of your own, by using pandas, looking for something
diff --git a/doc/_templates/autosummary/accessor_attribute.rst b/doc/_templates/autosummary/accessor_attribute.rst
index e38a9f22f9d99..a2f0eb5e068c4 100644
--- a/doc/_templates/autosummary/accessor_attribute.rst
+++ b/doc/_templates/autosummary/accessor_attribute.rst
@@ -3,4 +3,4 @@
.. currentmodule:: {{ module.split('.')[0] }}
-.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
\ No newline at end of file
+.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
diff --git a/doc/_templates/autosummary/accessor_method.rst b/doc/_templates/autosummary/accessor_method.rst
index 8175d8615ceb2..43dfc3b813120 100644
--- a/doc/_templates/autosummary/accessor_method.rst
+++ b/doc/_templates/autosummary/accessor_method.rst
@@ -3,4 +3,4 @@
.. currentmodule:: {{ module.split('.')[0] }}
-.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
\ No newline at end of file
+.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 19318aad3d53d..e5aa6b577270a 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1794,18 +1794,18 @@ The following functions are available for one dimensional object arrays or scala
- :meth:`~pandas.to_datetime` (conversion to datetime objects)
- .. ipython:: python
+ .. ipython:: python
- import datetime
- m = ['2016-07-09', datetime.datetime(2016, 3, 2)]
- pd.to_datetime(m)
+ import datetime
+ m = ['2016-07-09', datetime.datetime(2016, 3, 2)]
+ pd.to_datetime(m)
- :meth:`~pandas.to_timedelta` (conversion to timedelta objects)
- .. ipython:: python
+ .. ipython:: python
- m = ['5us', pd.Timedelta('1day')]
- pd.to_timedelta(m)
+ m = ['5us', pd.Timedelta('1day')]
+ pd.to_timedelta(m)
To force a conversion, we can pass in an ``errors`` argument, which specifies how pandas should deal with elements
that cannot be converted to desired dtype or object. By default, ``errors='raise'``, meaning that any errors encountered
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index f52f72b49dd31..090998570a358 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -973,7 +973,7 @@ are not numeric data (even in the case that ``.categories`` is numeric).
print("TypeError: " + str(e))
.. note::
- If such a function works, please file a bug at https://github.com/pydata/pandas!
+ If such a function works, please file a bug at https://github.com/pandas-dev/pandas!
dtype in apply
~~~~~~~~~~~~~~
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst
index 85d432b546f21..7ec91d251f15d 100644
--- a/doc/source/comparison_with_sas.rst
+++ b/doc/source/comparison_with_sas.rst
@@ -116,7 +116,7 @@ Reading External Data
Like SAS, pandas provides utilities for reading in data from
many formats. The ``tips`` dataset, found within the pandas
-tests (`csv <https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv>`_)
+tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_)
will be used in many of the following examples.
SAS provides ``PROC IMPORT`` to read csv data into a data set.
@@ -131,7 +131,7 @@ The pandas method is :func:`read_csv`, which works similarly.
.. ipython:: python
- url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
+ url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst
index 099a0e9469058..7962e0e69faa1 100644
--- a/doc/source/comparison_with_sql.rst
+++ b/doc/source/comparison_with_sql.rst
@@ -23,7 +23,7 @@ structure.
.. ipython:: python
- url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
+ url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fd3a2493a53e8..4f679f3f728bf 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -295,15 +295,15 @@
'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
- 'py': ('http://pylib.readthedocs.org/en/latest/', None)
+ 'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
# extlinks alias
-extlinks = {'issue': ('https://github.com/pydata/pandas/issues/%s',
+extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
- 'wiki': ('https://github.com/pydata/pandas/wiki/%s',
+ 'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
ipython_exec_lines = [
@@ -468,10 +468,10 @@ def linkcode_resolve(domain, info):
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
- return "http://github.com/pydata/pandas/blob/master/pandas/%s%s" % (
+ return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % (
fn, linespec)
else:
- return "http://github.com/pydata/pandas/blob/v%s/pandas/%s%s" % (
+ return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % (
pandas.__version__, fn, linespec)
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7f336abcaa6d7..a8a47a9d979c0 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -14,11 +14,11 @@ All contributions, bug reports, bug fixes, documentation improvements,
enhancements and ideas are welcome.
If you are simply looking to start working with the *pandas* codebase, navigate to the
-`GitHub "issues" tab <https://github.com/pydata/pandas/issues>`_ and start looking through
+`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through
interesting issues. There are a number of issues listed under `Docs
-<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
+<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Difficulty Novice
-<https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
+<https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
where you could start out.
Or maybe through using *pandas* you have an idea of your own or are looking for something
@@ -27,7 +27,7 @@ about it!
Feel free to ask questions on the `mailing list
<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter
-<https://gitter.im/pydata/pandas>`_.
+<https://gitter.im/pandas-dev/pandas>`_.
Bug reports and enhancement requests
====================================
@@ -79,7 +79,7 @@ It can very quickly become overwhelming, but sticking to the guidelines below wi
straightforward and mostly trouble free. As always, if you are having difficulties please
feel free to ask for help.
-The code is hosted on `GitHub <https://www.github.com/pydata/pandas>`_. To
+The code is hosted on `GitHub <https://www.github.com/pandas-dev/pandas>`_. To
contribute you will need to sign up for a `free GitHub account
<https://github.com/signup/free>`_. We use `Git <http://git-scm.com/>`_ for
version control to allow many people to work together on the project.
@@ -103,12 +103,12 @@ Forking
-------
You will need your own fork to work on the code. Go to the `pandas project
-page <https://github.com/pydata/pandas>`_ and hit the ``Fork`` button. You will
+page <https://github.com/pandas-dev/pandas>`_ and hit the ``Fork`` button. You will
want to clone your fork to your machine::
git clone git@github.com:your-user-name/pandas.git pandas-yourname
cd pandas-yourname
- git remote add upstream git://github.com/pydata/pandas.git
+ git remote add upstream git://github.com/pandas-dev/pandas.git
This creates the directory `pandas-yourname` and connects your repository to
the upstream (main project) *pandas* repository.
@@ -360,7 +360,7 @@ follow the Numpy Docstring Standard (see above), but you don't need to install
this because a local copy of numpydoc is included in the *pandas* source
code.
`nbconvert <https://nbconvert.readthedocs.io/en/latest/>`_ and
-`nbformat <http://nbformat.readthedocs.io/en/latest/>`_ are required to build
+`nbformat <https://nbformat.readthedocs.io/en/latest/>`_ are required to build
the Jupyter notebooks included in the documentation.
If you have a conda environment named ``pandas_dev``, you can install the extra
@@ -467,7 +467,7 @@ and make these changes with::
pep8radius master --diff --in-place
Additional standards are outlined on the `code style wiki
-page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
+page <https://github.com/pandas-dev/pandas/wiki/Code-Style-and-Conventions>`_.
Please try to maintain backward compatibility. *pandas* has lots of users with lots of
existing code, so don't break it if at all possible. If you think breakage is required,
@@ -490,7 +490,7 @@ Adding tests is one of the most common requests after code is pushed to *pandas*
it is worth getting in the habit of writing tests ahead of time so this is never an issue.
Like many packages, *pandas* uses the `Nose testing system
-<http://nose.readthedocs.org/en/latest/index.html>`_ and the convenient
+<https://nose.readthedocs.io/en/latest/index.html>`_ and the convenient
extensions in `numpy.testing
<http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
@@ -501,7 +501,7 @@ All tests should go into the ``tests`` subdirectory of the specific package.
This folder contains many current examples of tests, and we suggest looking to these for
inspiration. If your test requires working with files or
network connectivity, there is more information on the `testing page
-<https://github.com/pydata/pandas/wiki/Testing>`_ of the wiki.
+<https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki.
The ``pandas.util.testing`` module has many special ``assert`` functions that
make it easier to make statements about whether Series or DataFrame objects are
@@ -569,7 +569,7 @@ supports both python2 and python3.
To use all features of asv, you will need either ``conda`` or
``virtualenv``. For more details please check the `asv installation
-webpage <http://asv.readthedocs.org/en/latest/installing.html>`_.
+webpage <https://asv.readthedocs.io/en/latest/installing.html>`_.
To install asv::
@@ -624,7 +624,7 @@ This will display stderr from the benchmarks, and use your local
``python`` that comes from your ``$PATH``.
Information on how to write a benchmark and how to use asv can be found in the
-`asv documentation <http://asv.readthedocs.org/en/latest/writing_benchmarks.html>`_.
+`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_.
.. _contributing.gbq_integration_tests:
@@ -639,7 +639,7 @@ on Travis-CI. The first step is to create a `service account
Integration tests for ``pandas.io.gbq`` are skipped in pull requests because
the credentials that are required for running Google BigQuery integration
tests are `encrypted <https://docs.travis-ci.com/user/encrypting-files/>`__
-on Travis-CI and are only accessible from the pydata/pandas repository. The
+on Travis-CI and are only accessible from the pandas-dev/pandas repository. The
credentials won't be available on forks of pandas. Here are the steps to run
gbq integration tests on a forked repository:
@@ -688,7 +688,7 @@ performance regressions.
You can run specific benchmarks using the ``-r`` flag, which takes a regular expression.
-See the `performance testing wiki <https://github.com/pydata/pandas/wiki/Performance-Testing>`_ for information
+See the `performance testing wiki <https://github.com/pandas-dev/pandas/wiki/Performance-Testing>`_ for information
on how to write a benchmark.
Documenting your code
@@ -712,8 +712,8 @@ directive is used. The sphinx syntax for that is:
This will put the text *New in version 0.17.0* wherever you put the sphinx
directive. This should also be put in the docstring when adding a new function
-or method (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
-or a new keyword argument (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
+or method (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
+or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
Contributing your changes to *pandas*
=====================================
@@ -806,8 +806,8 @@ like::
origin git@github.com:yourname/pandas.git (fetch)
origin git@github.com:yourname/pandas.git (push)
- upstream git://github.com/pydata/pandas.git (fetch)
- upstream git://github.com/pydata/pandas.git (push)
+ upstream git://github.com/pandas-dev/pandas.git (fetch)
+ upstream git://github.com/pandas-dev/pandas.git (push)
Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to
happen, a pull request needs to be submitted on GitHub.
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 38a816060e1bc..3e84d15caf50b 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -200,7 +200,7 @@ The :ref:`indexing <indexing>` docs.
df[(df.AAA <= 6) & (df.index.isin([0,2,4]))]
`Use loc for label-oriented slicing and iloc positional slicing
-<https://github.com/pydata/pandas/issues/2904>`__
+<https://github.com/pandas-dev/pandas/issues/2904>`__
.. ipython:: python
@@ -410,7 +410,7 @@ Sorting
df.sort_values(by=('Labs', 'II'), ascending=False)
`Partial Selection, the need for sortedness;
-<https://github.com/pydata/pandas/issues/2995>`__
+<https://github.com/pandas-dev/pandas/issues/2995>`__
Levels
******
@@ -787,7 +787,7 @@ The :ref:`Resample <timeseries.resampling>` docs.
<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__
`Using TimeGrouper and another grouping to create subgroups, then apply a custom function
-<https://github.com/pydata/pandas/issues/3791>`__
+<https://github.com/pandas-dev/pandas/issues/3791>`__
`Resampling with custom periods
<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__
@@ -823,7 +823,7 @@ ignore_index is needed in pandas < v0.13, and depending on df construction
df = df1.append(df2,ignore_index=True); df
`Self Join of a DataFrame
-<https://github.com/pydata/pandas/issues/2996>`__
+<https://github.com/pandas-dev/pandas/issues/2996>`__
.. ipython:: python
@@ -877,7 +877,7 @@ The :ref:`Plotting <visualization>` docs.
<http://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot>`__
`Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter
-<http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__
+<https://pandas-xlsxwriter-charts.readthedocs.io/>`__
`Boxplot for each quartile of a stratifying variable
<http://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas>`__
@@ -936,7 +936,7 @@ using that handle to read.
<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
`Dealing with bad lines
-<http://github.com/pydata/pandas/issues/2886>`__
+<http://github.com/pandas-dev/pandas/issues/2886>`__
`Dealing with bad lines II
<http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/>`__
@@ -1075,7 +1075,7 @@ The :ref:`HDFStores <io.hdf5>` docs
<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
`Managing heterogeneous data using a linked multiple table hierarchy
-<http://github.com/pydata/pandas/issues/3032>`__
+<http://github.com/pandas-dev/pandas/issues/3032>`__
`Merging on-disk tables with millions of rows
<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__
@@ -1216,7 +1216,7 @@ Timedeltas
The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
`Using timedeltas
-<http://github.com/pydata/pandas/pull/2899>`__
+<http://github.com/pandas-dev/pandas/pull/2899>`__
.. ipython:: python
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 17ebd1f163f4f..a37b1e89c7cc3 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -143,9 +143,9 @@ both "column wise min/max and global min/max coloring."
API
-----
-`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
+`pandas-datareader <https://github.com/pandas-dev/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.org/en/latest/>`_:
+``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.io/en/latest/>`_:
The following data feeds are available:
@@ -170,7 +170,7 @@ PyDatastream is a Python interface to the
SOAP API to return indexed Pandas DataFrames or Panels with financial data.
This package requires valid credentials for this API (non free).
-`pandaSDMX <http://pandasdmx.readthedocs.org>`__
+`pandaSDMX <https://pandasdmx.readthedocs.io>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pandaSDMX is an extensible library to retrieve and acquire statistical data
and metadata disseminated in
@@ -215,7 +215,7 @@ dimensional arrays, rather than the tabular data for which pandas excels.
Out-of-core
-------------
-`Dask <https://dask.readthedocs.org/en/latest/>`__
+`Dask <https://dask.readthedocs.io/en/latest/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dask is a flexible parallel computing library for analytics. Dask
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 99d7486cde2d0..cfac5c257184d 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -391,7 +391,7 @@ This is because ``reindex_like`` silently inserts ``NaNs`` and the ``dtype``
changes accordingly. This can cause some issues when using ``numpy`` ``ufuncs``
such as ``numpy.logical_and``.
-See the `this old issue <https://github.com/pydata/pandas/issues/2388>`__ for a more
+See the `this old issue <https://github.com/pandas-dev/pandas/issues/2388>`__ for a more
detailed discussion.
Parsing Dates from Text Files
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6295e6f6cbb68..923c22aa9048f 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -13,7 +13,7 @@ This is the recommended installation method for most users.
Instructions for installing from source,
`PyPI <http://pypi.python.org/pypi/pandas>`__, various Linux distributions, or a
-`development version <http://github.com/pydata/pandas>`__ are also provided.
+`development version <http://github.com/pandas-dev/pandas>`__ are also provided.
Python version support
----------------------
@@ -189,7 +189,7 @@ pandas is equipped with an exhaustive set of unit tests covering about 97% of
the codebase as of this writing. To run it on your machine to verify that
everything is working (and you have all of the dependencies, soft and hard,
installed), make sure you have `nose
-<http://readthedocs.org/docs/nose/en/latest/>`__ and run:
+<https://nose.readthedocs.io/en/latest/>`__ and run:
::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index c07cfe4cd5574..ae71587c8b46b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2035,7 +2035,7 @@ You can even pass in an instance of ``StringIO`` if you so desire
that having so many network-accessing functions slows down the documentation
build. If you spot an error or an example that doesn't run, please do not
hesitate to report it over on `pandas GitHub issues page
- <http://www.github.com/pydata/pandas/issues>`__.
+ <http://www.github.com/pandas-dev/pandas/issues>`__.
Read a URL and match a table that contains specific text
@@ -2639,8 +2639,8 @@ config options <options>` ``io.excel.xlsx.writer`` and
``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx``
files if `Xlsxwriter`_ is not available.
-.. _XlsxWriter: http://xlsxwriter.readthedocs.org
-.. _openpyxl: http://openpyxl.readthedocs.org/
+.. _XlsxWriter: https://xlsxwriter.readthedocs.io
+.. _openpyxl: https://openpyxl.readthedocs.io/
.. _xlwt: http://www.python-excel.org
To specify which writer you want to use, you can pass an engine keyword
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index b1addddc2121d..92caeec319169 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -81,7 +81,7 @@ Getting Support
---------------
The first stop for pandas issues and ideas is the `Github Issue Tracker
-<https://github.com/pydata/pandas/issues>`__. If you have a general question,
+<https://github.com/pandas-dev/pandas/issues>`__. If you have a general question,
pandas community experts can answer through `Stack Overflow
<http://stackoverflow.com/questions/tagged/pandas>`__.
@@ -103,7 +103,7 @@ training, and consulting for pandas.
pandas is only made possible by a group of people around the world like you
who have contributed new code, bug reports, fixes, comments and ideas. A
-complete list can be found `on Github <http://www.github.com/pydata/pandas/contributors>`__.
+complete list can be found `on Github <http://www.github.com/pandas-dev/pandas/contributors>`__.
Development Team
----------------
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst
index f3df1ebdf25cb..f2a8668bbda91 100644
--- a/doc/source/r_interface.rst
+++ b/doc/source/r_interface.rst
@@ -17,7 +17,7 @@ rpy2 / R interface
In v0.16.0, the ``pandas.rpy`` interface has been **deprecated and will be
removed in a future version**. Similar functionality can be accessed
- through the `rpy2 <http://rpy2.readthedocs.io/>`__ project.
+ through the `rpy2 <https://rpy2.readthedocs.io/>`__ project.
See the :ref:`updating <rpy.updating>` section for a guide to port your
code from the ``pandas.rpy`` to ``rpy2`` functions.
@@ -71,7 +71,7 @@ The ``convert_to_r_matrix`` function can be replaced by the normal
Not all conversion functions in rpy2 are working exactly the same as the
current methods in pandas. If you experience problems or limitations in
comparison to the ones in pandas, please report this at the
- `issue tracker <https://github.com/pydata/pandas/issues>`_.
+ `issue tracker <https://github.com/pandas-dev/pandas/issues>`_.
See also the documentation of the `rpy2 <http://rpy2.bitbucket.org/>`__ project.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7e987fcff31b3..d210065f04459 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -20,7 +20,7 @@ Release Notes
*************
This is the list of changes to pandas between each release. For full details,
-see the commit logs at http://github.com/pydata/pandas
+see the commit logs at http://github.com/pandas-dev/pandas
**What is it**
@@ -33,7 +33,7 @@ analysis / manipulation tool available in any language.
**Where to get it**
-* Source code: http://github.com/pydata/pandas
+* Source code: http://github.com/pandas-dev/pandas
* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
* Documentation: http://pandas.pydata.org
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
index 019aa82fed1aa..e2c713ac8519a 100644
--- a/doc/source/remote_data.rst
+++ b/doc/source/remote_data.rst
@@ -13,7 +13,7 @@ DataReader
The sub-package ``pandas.io.data`` is removed in favor of a separately
installable `pandas-datareader package
-<https://github.com/pydata/pandas-datareader>`_. This will allow the data
+<https://github.com/pandas-dev/pandas-datareader>`_. This will allow the data
modules to be independently updated to your pandas installation. The API for
``pandas-datareader v0.1.1`` is the same as in ``pandas v0.16.1``.
(:issue:`8961`)
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst
index e92798ea17448..c25e734a046b2 100644
--- a/doc/source/tutorials.rst
+++ b/doc/source/tutorials.rst
@@ -138,7 +138,7 @@ Modern Pandas
Excel charts with pandas, vincent and xlsxwriter
------------------------------------------------
-- `Using Pandas and XlsxWriter to create Excel charts <http://pandas-xlsxwriter-charts.readthedocs.org/>`_
+- `Using Pandas and XlsxWriter to create Excel charts <https://pandas-xlsxwriter-charts.readthedocs.io/>`_
Various Tutorials
-----------------
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 6e05c3ff0457a..e3b186abe53fc 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -892,7 +892,7 @@ for Fourier series. By coloring these curves differently for each class
it is possible to visualize data clustering. Curves belonging to samples
of the same class will usually be closer together and form larger structures.
-**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__.
.. ipython:: python
@@ -1044,7 +1044,7 @@ forces acting on our sample are at an equilibrium) is where a dot representing
our sample will be drawn. Depending on which class that sample belongs it will
be colored differently.
-**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt
index a91e0ab9e4961..181cd401c85d6 100644
--- a/doc/source/whatsnew/v0.14.0.txt
+++ b/doc/source/whatsnew/v0.14.0.txt
@@ -401,7 +401,7 @@ through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`).
All databases supported by SQLAlchemy can be used, such
as PostgreSQL, MySQL, Oracle, Microsoft SQL server (see documentation of
SQLAlchemy on `included dialects
-<http://sqlalchemy.readthedocs.org/en/latest/dialects/index.html>`_).
+<https://sqlalchemy.readthedocs.io/en/latest/dialects/index.html>`_).
The functionality of providing DBAPI connection objects will only be supported
for sqlite3 in the future. The ``'mysql'`` flavor is deprecated.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fc13224d3fe6e..9cb299593076d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -141,7 +141,7 @@ as well as the ``.sum()`` operation.
Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library.
-.. _dask: https://dask.readthedocs.org/en/latest/
+.. _dask: https://dask.readthedocs.io/en/latest/
.. _QT: https://wiki.python.org/moin/PyQt
.. _whatsnew_0170.plot:
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 3edb8c1fa9071..5180b9a092f6c 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -20,8 +20,8 @@ Highlights include:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-
-
+- Fixed performance regression in factorization of ``Period`` data (:issue:`14338`)
+- Improved Performance in ``.to_json()`` when ``lines=True`` (:issue:`14408`)
@@ -34,7 +34,7 @@ Bug Fixes
-
+- Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`)
@@ -44,4 +44,7 @@ Bug Fixes
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``pd.concat`` where ``axis`` cannot take string parameters ``'rows'`` or ``'columns'`` (:issue:`14369`)
- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
+- Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`)
+- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0354a8046e873..7fa9991138fba 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -62,6 +62,7 @@ Deprecations
Removal of prior version deprecations/changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- ``pd.to_datetime`` and ``pd.to_timedelta`` have dropped the ``coerce`` parameter in favor of ``errors`` (:issue:`13602`)
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 990018f2f7f3b..1b8930dcae0f1 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -392,7 +392,7 @@ def __reduce__(self): # optional, for pickle support
return type(self), args, None, None, list(self.items())
-# https://github.com/pydata/pandas/pull/9123
+# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 72fbc3906cafb..f480eae2dd04d 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -1693,11 +1693,11 @@ def test_result_types(self):
self.check_result_type(np.float64, np.float64)
def test_result_types2(self):
- # xref https://github.com/pydata/pandas/issues/12293
+ # xref https://github.com/pandas-dev/pandas/issues/12293
raise nose.SkipTest("unreliable tests on complex128")
# Did not test complex64 because DataFrame is converting it to
- # complex128. Due to https://github.com/pydata/pandas/issues/10952
+ # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
self.check_result_type(np.complex128, np.complex128)
def test_undefined_func(self):
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ee59d6552bb2f..8644d4568e44d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -285,18 +285,27 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
- from pandas import Index, Series, DatetimeIndex
-
- vals = np.asarray(values)
-
- # localize to UTC
- is_datetimetz_type = is_datetimetz(values)
- if is_datetimetz_type:
- values = DatetimeIndex(values)
- vals = values.asi8
+ from pandas import Index, Series, DatetimeIndex, PeriodIndex
+
+ # handling two possibilities here
+ # - for a numpy datetimelike simply view as i8 then cast back
+ # - for an extension datetimelike view as i8 then
+ # reconstruct from boxed values to transfer metadata
+ dtype = None
+ if needs_i8_conversion(values):
+ if is_period_dtype(values):
+ values = PeriodIndex(values)
+ vals = values.asi8
+ elif is_datetimetz(values):
+ values = DatetimeIndex(values)
+ vals = values.asi8
+ else:
+ # numpy dtype
+ dtype = values.dtype
+ vals = values.view(np.int64)
+ else:
+ vals = np.asarray(values)
- is_datetime = is_datetime64_dtype(vals)
- is_timedelta = is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
@@ -311,13 +320,9 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
- if is_datetimetz_type:
- # reset tz
- uniques = values._shallow_copy(uniques)
- elif is_datetime:
- uniques = uniques.astype('M8[ns]')
- elif is_timedelta:
- uniques = uniques.astype('m8[ns]')
+ if dtype is not None:
+ uniques = uniques.astype(dtype)
+
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index db48f2a46eaf3..9efaff6060909 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1681,7 +1681,7 @@ def __setitem__(self, key, value):
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
- # https://github.com/pydata/pandas/issues/6168
+ # https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
@@ -1690,7 +1690,7 @@ def __setitem__(self, key, value):
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
- # https://github.com/pydata/pandas/issues/7820
+ # https://github.com/pandas-dev/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 3c376e3188eac..5223c0ac270f3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2201,36 +2201,12 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
- inds = index.labels[level]
- level_index = index.levels[level]
-
if self.name is None:
self.name = index.names[level]
- # XXX complete hack
-
- if grouper is not None:
- level_values = index.levels[level].take(inds)
- self.grouper = level_values.map(self.grouper)
- else:
- # all levels may not be observed
- labels, uniques = algos.factorize(inds, sort=True)
-
- if len(uniques) > 0 and uniques[0] == -1:
- # handle NAs
- mask = inds != -1
- ok_labels, uniques = algos.factorize(inds[mask], sort=True)
-
- labels = np.empty(len(inds), dtype=inds.dtype)
- labels[mask] = ok_labels
- labels[~mask] = -1
-
- if len(uniques) < len(level_index):
- level_index = level_index.take(uniques)
+ self.grouper, self._labels, self._group_index = \
+ index._get_grouper_for_level(self.grouper, level)
- self._labels = labels
- self._group_index = level_index
- self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 564586eec5a8e..d7d68ad536be5 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -11,6 +11,7 @@
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
+from pandas.compat.numpy import _np_version_under1p10
from pandas.types.common import (_ensure_int64, _ensure_object,
_ensure_float64, _get_dtype,
is_float, is_scalar,
@@ -829,9 +830,37 @@ def _checked_add_with_arr(arr, b):
Raises
------
- OverflowError if any x + y exceeds the maximum int64 value.
+ OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
- if (np.iinfo(np.int64).max - b < arr).any():
- raise OverflowError("Python int too large to "
- "convert to C long")
+ # For performance reasons, we broadcast 'b' to the new array 'b2'
+ # so that it has the same size as 'arr'.
+ if _np_version_under1p10:
+ if lib.isscalar(b):
+ b2 = np.empty(arr.shape)
+ b2.fill(b)
+ else:
+ b2 = b
+ else:
+ b2 = np.broadcast_to(b, arr.shape)
+
+ # gh-14324: For each element in 'arr' and its corresponding element
+ # in 'b2', we check the sign of the element in 'b2'. If it is positive,
+ # we then check whether its sum with the element in 'arr' exceeds
+ # np.iinfo(np.int64).max. If so, we have an overflow error. If it
+ # it is negative, we then check whether its sum with the element in
+ # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
+ # error as well.
+ mask1 = b2 > 0
+ mask2 = b2 < 0
+
+ if not mask1.any():
+ to_raise = (np.iinfo(np.int64).min - b2 > arr).any()
+ elif not mask2.any():
+ to_raise = (np.iinfo(np.int64).max - b2 < arr).any()
+ else:
+ to_raise = ((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]).any() or
+ (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]).any())
+
+ if to_raise:
+ raise OverflowError("Overflow in int64 addition")
return arr + b
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 5082fc84982c6..1c24a0db34b2b 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -432,6 +432,36 @@ def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
+ _index_shared_docs['_get_grouper_for_level'] = """
+ Get index grouper corresponding to an index level
+
+ Parameters
+ ----------
+ mapper: Group mapping function or None
+ Function mapping index values to groups
+ level : int or None
+ Index level
+
+ Returns
+ -------
+ grouper : Index
+ Index of values to group on
+ labels : ndarray of int or None
+ Array of locations in level_index
+ uniques : Index or None
+ Index of unique values for level
+ """
+
+ @Appender(_index_shared_docs['_get_grouper_for_level'])
+ def _get_grouper_for_level(self, mapper, level=None):
+ assert level is None or level == 0
+ if mapper is None:
+ grouper = self
+ else:
+ grouper = self.map(mapper)
+
+ return grouper, None, None
+
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 0c465da24a17e..a9f452db69659 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -539,6 +539,37 @@ def _format_native_types(self, na_rep='nan', **kwargs):
return mi.values
+ @Appender(_index_shared_docs['_get_grouper_for_level'])
+ def _get_grouper_for_level(self, mapper, level):
+ indexer = self.labels[level]
+ level_index = self.levels[level]
+
+ if mapper is not None:
+ # Handle group mapping function and return
+ level_values = self.levels[level].take(indexer)
+ grouper = level_values.map(mapper)
+ return grouper, None, None
+
+ labels, uniques = algos.factorize(indexer, sort=True)
+
+ if len(uniques) > 0 and uniques[0] == -1:
+ # Handle NAs
+ mask = indexer != -1
+ ok_labels, uniques = algos.factorize(indexer[mask],
+ sort=True)
+
+ labels = np.empty(len(indexer), dtype=indexer.dtype)
+ labels[mask] = ok_labels
+ labels[~mask] = -1
+
+ if len(uniques) < len(level_index):
+ # Remove unobserved levels from level_index
+ level_index = level_index.take(uniques)
+
+ grouper = level_index.take(labels)
+
+ return grouper, labels, level_index
+
@property
def _constructor(self):
return MultiIndex.from_tuples
diff --git a/pandas/io/data.py b/pandas/io/data.py
index e76790a6ab98b..09c7aef0cde1a 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -1,6 +1,6 @@
raise ImportError(
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
+ "(https://github.com/pandas-dev/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.")
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index d6f8660f20ef6..8038cc500f6cd 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -236,7 +236,7 @@ def get_user_account_credentials(self):
return credentials
def get_service_account_credentials(self):
- # Bug fix for https://github.com/pydata/pandas/issues/12572
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
diff --git a/pandas/io/json.py b/pandas/io/json.py
index e697351484f68..1e258101a5d86 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -605,20 +605,9 @@ def _convert_to_line_delimits(s):
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
- num_open_brackets_seen = 0
- commas_to_replace = []
- for idx, char in enumerate(s): # iter through to find all
- if char == ',': # commas that should be \n
- if num_open_brackets_seen == 0:
- commas_to_replace.append(idx)
- elif char == '{':
- num_open_brackets_seen += 1
- elif char == '}':
- num_open_brackets_seen -= 1
- s_arr = np.array(list(s)) # Turn to an array to set
- s_arr[commas_to_replace] = '\n' # all commas at once.
- s = ''.join(s_arr)
- return s
+
+ from pandas.lib import convert_json_to_lines
+ return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", level=0):
diff --git a/pandas/io/tests/json/test_pandas.py b/pandas/io/tests/json/test_pandas.py
index 47bdd25572fc7..117ac2324d0e0 100644
--- a/pandas/io/tests/json/test_pandas.py
+++ b/pandas/io/tests/json/test_pandas.py
@@ -767,7 +767,7 @@ def test_round_trip_exception_(self):
@network
def test_url(self):
- url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
+ url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
self.assertEqual(result[c].dtype, 'datetime64[ns]')
@@ -962,6 +962,12 @@ def test_to_jsonl(self):
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
self.assertEqual(result, expected)
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
+ self.assertEqual(result, expected)
+ assert_frame_equal(pd.read_json(result, lines=True), df)
+
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(
diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py
index 0b59b695e1dca..0219e16391be8 100644
--- a/pandas/io/tests/parser/common.py
+++ b/pandas/io/tests/parser/common.py
@@ -629,7 +629,7 @@ def test_read_csv_parse_simple_list(self):
@tm.network
def test_url(self):
# HTTP(S)
- url = ('https://raw.github.com/pydata/pandas/master/'
+ url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/io/tests/parser/data/salary.table.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
diff --git a/pandas/io/tests/parser/test_network.py b/pandas/io/tests/parser/test_network.py
index 8b8a6de36fc03..7e2f039853e2f 100644
--- a/pandas/io/tests/parser/test_network.py
+++ b/pandas/io/tests/parser/test_network.py
@@ -23,7 +23,7 @@ def setUp(self):
@tm.network
def test_url_gz(self):
- url = ('https://raw.github.com/pydata/pandas/'
+ url = ('https://raw.github.com/pandas-dev/pandas/'
'master/pandas/io/tests/parser/data/salary.table.gz')
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index d163b05aa01d4..998e71076b7c0 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -543,7 +543,7 @@ def test_read_xlrd_Book(self):
@tm.network
def test_read_from_http_url(self):
- url = ('https://raw.github.com/pydata/pandas/master/'
+ url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/io/tests/data/test1' + self.ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1')
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index 0ea4b5204e150..cca1580b84195 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -150,7 +150,7 @@ def _test_imports():
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
- # Bug fix for https://github.com/pydata/pandas/issues/12572
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
@@ -651,7 +651,7 @@ def test_download_dataset_larger_than_200k_rows(self):
self.assertEqual(len(df.drop_duplicates()), test_size)
def test_zero_rows(self):
- # Bug fix for https://github.com/pydata/pandas/issues/10273
+ # Bug fix for https://github.com/pandas-dev/pandas/issues/10273
df = gbq.read_gbq("SELECT title, id "
"FROM [publicdata:samples.wikipedia] "
"WHERE timestamp=-9999999",
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index cf61ad9a35935..91042775ba19d 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -544,7 +544,7 @@ def test_sparse_frame(self):
class TestCompression(TestPackers):
- """See https://github.com/pydata/pandas/pull/9783
+ """See https://github.com/pandas-dev/pandas/pull/9783
"""
def setUp(self):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 198a4017b5af7..af8989baabbc0 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -1610,7 +1610,7 @@ def test_double_precision(self):
def test_connectable_issue_example(self):
# This tests the example raised in issue
- # https://github.com/pydata/pandas/issues/10104
+ # https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
index 5dc4d9ce1adc4..2183290c7e074 100644
--- a/pandas/io/wb.py
+++ b/pandas/io/wb.py
@@ -1,6 +1,6 @@
raise ImportError(
"The pandas.io.wb module is moved to a separate package "
"(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
+ "(https://github.com/pandas-dev/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.")
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index e7672de5c835e..b56a02b245d69 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -1087,6 +1087,44 @@ def string_array_replace_from_nan_rep(
return arr
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def convert_json_to_lines(object arr):
+ """
+ replace comma separated json with line feeds, paying special attention
+ to quotes & brackets
+ """
+ cdef:
+ Py_ssize_t i = 0, num_open_brackets_seen = 0, in_quotes = 0, length
+ ndarray[uint8_t] narr
+ unsigned char v, comma, left_bracket, right_brack, newline
+
+ newline = ord('\n')
+ comma = ord(',')
+ left_bracket = ord('{')
+ right_bracket = ord('}')
+ quote = ord('"')
+ backslash = ord('\\')
+
+ narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy()
+ length = narr.shape[0]
+ for i in range(length):
+ v = narr[i]
+ if v == quote and i > 0 and narr[i - 1] != backslash:
+ in_quotes = ~in_quotes
+ if v == comma: # commas that should be \n
+ if num_open_brackets_seen == 0 and not in_quotes:
+ narr[i] = newline
+ elif v == left_bracket:
+ if not in_quotes:
+ num_open_brackets_seen += 1
+ elif v == right_bracket:
+ if not in_quotes:
+ num_open_brackets_seen -= 1
+
+ return narr.tostring().decode('utf-8')
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
def write_csv_rows(list data, ndarray data_index,
diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/formats/test_style.py
index 3083750e582fc..2fec04b9c1aa3 100644
--- a/pandas/tests/formats/test_style.py
+++ b/pandas/tests/formats/test_style.py
@@ -144,7 +144,7 @@ def test_set_properties_subset(self):
self.assertEqual(result, expected)
def test_empty_index_name_doesnt_display(self):
- # https://github.com/pydata/pandas/pull/12090#issuecomment-180695902
+ # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
@@ -175,7 +175,7 @@ def test_empty_index_name_doesnt_display(self):
self.assertEqual(result['head'], expected)
def test_index_name(self):
- # https://github.com/pydata/pandas/issues/11655
+ # https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
@@ -195,7 +195,7 @@ def test_index_name(self):
self.assertEqual(result['head'], expected)
def test_multiindex_name(self):
- # https://github.com/pydata/pandas/issues/11655
+ # https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
@@ -217,7 +217,7 @@ def test_multiindex_name(self):
self.assertEqual(result['head'], expected)
def test_numeric_columns(self):
- # https://github.com/pydata/pandas/issues/12125
+ # https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index b7cd8a1c01224..81aa694577fb5 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -347,6 +347,65 @@ def test_concat_named_keys(self):
names=[None, None]))
assert_frame_equal(concatted_unnamed, expected_unnamed)
+ def test_concat_axis_parameter(self):
+ # GH 14369
+ df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2))
+ df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2))
+
+ # Index/row/0 DataFrame
+ expected_index = pd.DataFrame(
+ {'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
+
+ concatted_index = pd.concat([df1, df2], axis='index')
+ assert_frame_equal(concatted_index, expected_index)
+
+ concatted_row = pd.concat([df1, df2], axis='rows')
+ assert_frame_equal(concatted_row, expected_index)
+
+ concatted_0 = pd.concat([df1, df2], axis=0)
+ assert_frame_equal(concatted_0, expected_index)
+
+ # Columns/1 DataFrame
+ expected_columns = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A'])
+
+ concatted_columns = pd.concat([df1, df2], axis='columns')
+ assert_frame_equal(concatted_columns, expected_columns)
+
+ concatted_1 = pd.concat([df1, df2], axis=1)
+ assert_frame_equal(concatted_1, expected_columns)
+
+ series1 = pd.Series([0.1, 0.2])
+ series2 = pd.Series([0.3, 0.4])
+
+ # Index/row/0 Series
+ expected_index_series = pd.Series(
+ [0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
+
+ concatted_index_series = pd.concat([series1, series2], axis='index')
+ assert_series_equal(concatted_index_series, expected_index_series)
+
+ concatted_row_series = pd.concat([series1, series2], axis='rows')
+ assert_series_equal(concatted_row_series, expected_index_series)
+
+ concatted_0_series = pd.concat([series1, series2], axis=0)
+ assert_series_equal(concatted_0_series, expected_index_series)
+
+ # Columns/1 Series
+ expected_columns_series = pd.DataFrame(
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1])
+
+ concatted_columns_series = pd.concat(
+ [series1, series2], axis='columns')
+ assert_frame_equal(concatted_columns_series, expected_columns_series)
+
+ concatted_1_series = pd.concat([series1, series2], axis=1)
+ assert_frame_equal(concatted_1_series, expected_columns_series)
+
+ # Testing ValueError
+ with assertRaisesRegexp(ValueError, 'No axis named'):
+ pd.concat([series1, series2], axis='something')
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 2cb62a60f885b..9ef2802cb950f 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -392,7 +392,7 @@ def test_boolean_selection(self):
def test_indexing_with_category(self):
- # https://github.com/pydata/pandas/issues/12564
+ # https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 333792c5ffdb2..0916693ade2ce 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -100,7 +100,7 @@ def test_boxplot_return_type_none(self):
@slow
def test_boxplot_return_type_legacy(self):
- # API change in https://github.com/pydata/pandas/pull/7096
+ # API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(randn(6, 4),
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 4d0c1e9213b17..87cf89ebf0a9d 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -84,7 +84,7 @@ def test_plot(self):
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
- # present). see: https://github.com/pydata/pandas/issues/9737
+ # present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
@@ -927,7 +927,7 @@ def test_plot_scatter_with_c(self):
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
- # See https://github.com/pydata/pandas/issues/8852 for bug report
+ # See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
@@ -2115,7 +2115,7 @@ def test_pie_df_nan(self):
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
- # see https://github.com/pydata/pandas/issues/8390
+ # see https://github.com/pandas-dev/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i + 1:])
@@ -2336,9 +2336,9 @@ def _check_errorbar_color(containers, expected, has_err='has_xerr'):
@slow
def test_sharex_and_ax(self):
- # https://github.com/pydata/pandas/issues/9737 using gridspec, the axis
- # in fig.get_axis() are sorted differently than pandas expected them,
- # so make sure that only the right ones are removed
+ # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
+ # the axis in fig.get_axis() are sorted differently than pandas
+ # expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
@@ -2388,9 +2388,9 @@ def _check(axes):
@slow
def test_sharey_and_ax(self):
- # https://github.com/pydata/pandas/issues/9737 using gridspec, the axis
- # in fig.get_axis() are sorted differently than pandas expected them,
- # so make sure that only the right ones are removed
+ # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
+ # the axis in fig.get_axis() are sorted differently than pandas
+ # expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 8f2ab0ed28839..ed441f2f85572 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -273,7 +273,7 @@ def f():
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index f688ec2d43789..086946d05d7a6 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1412,7 +1412,7 @@ def tester(a, b):
# NotImplemented
# this is an alignment issue; these are equivalent
- # https://github.com/pydata/pandas/issues/5284
+ # https://github.com/pandas-dev/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 092e02ee261a0..f89f41abd0d35 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -595,7 +595,7 @@ def test_categorical_zeroes(self):
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
- # https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
+ # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=True),
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index a494a0d53b123..f01fff035a3c5 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -191,7 +191,7 @@ def f():
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
- # https://github.com/pydata/pandas/issues/3678
+ # https://github.com/pandas-dev/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
@@ -618,7 +618,7 @@ def test_describe(self):
index=exp_index)
tm.assert_frame_equal(desc, expected)
- # https://github.com/pydata/pandas/issues/3678
+ # https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
@@ -1547,7 +1547,7 @@ def test_memory_usage(self):
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
- # https://github.com/pydata/pandas/issues/8420
+ # https://github.com/pandas-dev/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
@@ -1633,7 +1633,7 @@ def test_reflected_comparison_with_scalars(self):
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
- # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
@@ -3829,7 +3829,7 @@ def f():
self.assertRaises(TypeError, f)
- # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
@@ -4303,14 +4303,14 @@ def test_cat_accessor_api(self):
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
- # https://github.com/pydata/pandas/issues/10661
+ # https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
@@ -4385,7 +4385,7 @@ def test_str_accessor_api_for_categorical(self):
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
- # https://github.com/pydata/pandas/issues/10661
+ # https://github.com/pandas-dev/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 62ad4c5aa4338..ea226851c9101 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -427,7 +427,7 @@ def f3(key):
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
- # See https://github.com/pydata/pandas/issues/8514
+ # See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 01c1d48c6d5c0..f3791ee1d5c91 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -458,6 +458,39 @@ def test_grouper_creation_bug(self):
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
+ def test_grouper_column_and_index(self):
+ # GH 14327
+
+ # Grouping a multi-index frame by a column and an index level should
+ # be equivalent to resetting the index and grouping by two columns
+ idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 3),
+ ('b', 1), ('b', 2), ('b', 3)])
+ idx.names = ['outer', 'inner']
+ df_multi = pd.DataFrame({"A": np.arange(6),
+ 'B': ['one', 'one', 'two',
+ 'two', 'one', 'one']},
+ index=idx)
+ result = df_multi.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_multi.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_multi.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_multi.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
+ # Grouping a single-index frame by a column and the index should
+ # be equivalent to resetting the index and grouping by two columns
+ df_single = df_multi.reset_index('outer')
+ result = df_single.groupby(['B', pd.Grouper(level='inner')]).mean()
+ expected = df_single.reset_index().groupby(['B', 'inner']).mean()
+ assert_frame_equal(result, expected)
+
+ # Test the reverse grouping order
+ result = df_single.groupby([pd.Grouper(level='inner'), 'B']).mean()
+ expected = df_single.reset_index().groupby(['inner', 'B']).mean()
+ assert_frame_equal(result, expected)
+
def test_grouper_getting_correct_binner(self):
# GH 10063
@@ -6443,7 +6476,7 @@ def test_transform_doesnt_clobber_ints(self):
def test_groupby_categorical_two_columns(self):
- # https://github.com/pydata/pandas/issues/8138
+ # https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index f00fdd196abea..be634228b1b6e 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1004,13 +1004,20 @@ def prng(self):
def test_int64_add_overflow():
# see gh-14068
- msg = "too (big|large) to convert"
+ msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
+ n = np.iinfo(np.int64).min
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), m)
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), np.array([m, m]))
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([n, n]), n)
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([n, n]), np.array([n, n]))
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assertRaisesRegexp(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
nanops._checked_add_with_arr(np.array([m, m]),
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 4019bbe20ea1a..9a3505c3421e0 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2604,7 +2604,7 @@ def test_cat_on_filtered_index(self):
self.assertEqual(str_multiple.loc[1], '2011 2 2')
def test_str_cat_raises_intuitive_error(self):
- # https://github.com/pydata/pandas/issues/11334
+ # https://github.com/pandas-dev/pandas/issues/11334
s = Series(['a', 'b', 'c', 'd'])
message = "Did you mean to supply a `sep` keyword?"
with tm.assertRaisesRegexp(ValueError, message):
@@ -2661,7 +2661,7 @@ def test_index_str_accessor_visibility(self):
idx.str
def test_str_accessor_no_new_attributes(self):
- # https://github.com/pydata/pandas/issues/10673
+ # https://github.com/pandas-dev/pandas/issues/10673
s = Series(list('aabbcde'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index a8c43195f5552..ce7f8908d7506 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1283,7 +1283,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
- axis : {0, 1, ...}, default 0
+ axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
@@ -1411,6 +1411,12 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
sample = objs[0]
self.objs = objs
+ # Standardize axis parameter to int
+ if isinstance(sample, Series):
+ axis = DataFrame()._get_axis_number(axis)
+ else:
+ axis = sample._get_axis_number(axis)
+
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7fd0b1044f9d7..d46dc4d355b4c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -722,7 +722,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
- >>> df = read_csv('https://raw.github.com/pydata/pandas/master'
+ >>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270',
'#4ECDC4', '#C7F464'))
@@ -2773,7 +2773,7 @@ def plot_group(keys, values, ax):
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
- # https://github.com/pydata/pandas/pull/12216#issuecomment-241175580
+ # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index f1a209053445a..d02c403cb3c66 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -1281,7 +1281,7 @@ def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
- # See https://github.com/pydata/pandas/issues/8683
+ # See https://github.com/pandas-dev/pandas/issues/8683
first_tzinfo = first.tzinfo
first = first.tz_localize(None)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index b3da62c8d2db5..1735ac4e2efa5 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -4606,7 +4606,7 @@ def test_parse_time_string(self):
self.assertEqual(reso, reso_lower)
def test_parse_time_quarter_w_dash(self):
- # https://github.com/pydata/pandas/issue/9688
+ # https://github.com/pandas-dev/pandas/issue/9688
pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988'), ]
for dashed, normal in pairs:
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 204808dd510a0..9d3d27f3224b4 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1678,7 +1678,7 @@ def test_resample_anchored_multiday(self):
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
- # See: https://github.com/pydata/pandas/issues/8683
+ # See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 38e210d698035..f0d14014d6559 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -1957,6 +1957,8 @@ def test_add_overflow(self):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
+
+ msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index ac48fcc2551ea..f640b3974b360 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -5514,22 +5514,6 @@ def test_second(self):
class TestDaysInMonth(tm.TestCase):
- def test_coerce_deprecation(self):
-
- # deprecation of coerce
- with tm.assert_produces_warning(FutureWarning):
- to_datetime('2015-02-29', coerce=True)
- with tm.assert_produces_warning(FutureWarning):
- self.assertRaises(ValueError,
- lambda: to_datetime('2015-02-29', coerce=False))
-
- # multiple arguments
- for e, c in zip(['raise', 'ignore', 'coerce'], [True, False]):
- with tm.assert_produces_warning(FutureWarning):
- self.assertRaises(TypeError,
- lambda: to_datetime('2015-02-29', errors=e,
- coerce=c))
-
# tests for issue #10154
def test_day_not_in_month_coerce(self):
self.assertTrue(isnull(to_datetime('2015-02-29', errors='coerce')))
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index a85a606075911..00e8ee631f463 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -17,7 +17,8 @@
from pytz import NonExistentTimeError
import pandas.util.testing as tm
-from pandas.util.testing import assert_frame_equal, set_timezone
+from pandas.util.testing import (assert_frame_equal, assert_series_equal,
+ set_timezone)
from pandas.compat import lrange, zip
try:
@@ -535,6 +536,44 @@ def test_ambiguous_nat(self):
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
self.assert_numpy_array_equal(di_test.values, localized.values)
+ def test_ambiguous_bool(self):
+ # make sure that we are correctly accepting bool values as ambiguous
+
+ # gh-14402
+ t = Timestamp('2015-11-01 01:00:03')
+ expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
+ expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
+
+ def f():
+ t.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = t.tz_localize('US/Central', ambiguous=True)
+ self.assertEqual(result, expected0)
+
+ result = t.tz_localize('US/Central', ambiguous=False)
+ self.assertEqual(result, expected1)
+
+ s = Series([t])
+ expected0 = Series([expected0])
+ expected1 = Series([expected1])
+
+ def f():
+ s.dt.tz_localize('US/Central')
+ self.assertRaises(pytz.AmbiguousTimeError, f)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=True)
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[True])
+ assert_series_equal(result, expected0)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=False)
+ assert_series_equal(result, expected1)
+
+ result = s.dt.tz_localize('US/Central', ambiguous=[False])
+ assert_series_equal(result, expected1)
+
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
@@ -629,14 +668,14 @@ def test_localized_at_time_between_time(self):
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
def test_string_index_alias_tz_aware(self):
@@ -723,7 +762,7 @@ def test_frame_no_datetime64_dtype(self):
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
@@ -903,7 +942,7 @@ def test_utc_with_system_utc(self):
def test_tz_convert_hour_overflow_dst(self):
# Regression test for:
- # https://github.com/pydata/pandas/issues/13306
+ # https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
@@ -943,7 +982,7 @@ def test_tz_convert_hour_overflow_dst(self):
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
- # https://github.com/pydata/pandas/issues/13306
+ # https://github.com/pandas-dev/pandas/issues/13306
tz = self.tzstr('US/Eastern')
@@ -985,7 +1024,7 @@ def test_tz_convert_hour_overflow_dst_timestamps(self):
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
- # See https://github.com/pydata/pandas/issues/4496 for details.
+ # See https://github.com/pandas-dev/pandas/issues/4496 for details.
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
@@ -1324,7 +1363,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
@@ -1336,7 +1375,7 @@ def test_append_aware(self):
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
utc = rng1.tz
self.assertEqual(utc, ts_result.index.tz)
@@ -1352,7 +1391,7 @@ def test_append_aware(self):
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
- self.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
@@ -1368,7 +1407,7 @@ def test_append_dst(self):
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
- tm.assert_series_equal(ts_result, exp)
+ assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
def test_append_aware_naive(self):
@@ -1429,7 +1468,7 @@ def test_arith_utc_convert(self):
expected = uts1 + uts2
self.assertEqual(result.index.tz, pytz.UTC)
- tm.assert_series_equal(result, expected)
+ assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 2ca3fcea8005b..9bf39652a4e00 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -11,12 +11,9 @@
is_timedelta64_dtype,
is_list_like)
from pandas.types.generic import ABCSeries, ABCIndexClass
-from pandas.util.decorators import deprecate_kwarg
-@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
- mapping={True: 'coerce', False: 'raise'})
-def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None):
+def to_timedelta(arg, unit='ns', box=True, errors='raise'):
"""
Convert argument to timedelta
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 93d35ff964e69..637e70b76de98 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -16,7 +16,6 @@
from pandas.types.missing import notnull
import pandas.compat as compat
-from pandas.util.decorators import deprecate_kwarg
_DATEUTIL_LEXER_SPLIT = None
try:
@@ -175,10 +174,8 @@ def _guess_datetime_format_for_array(arr, **kwargs):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
-@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
- mapping={True: 'coerce', False: 'raise'})
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
- utc=None, box=True, format=None, exact=True, coerce=None,
+ utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False):
"""
Convert argument to datetime.
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 9073ad0abd535..bab45595cd60f 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4155,6 +4155,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
"""
cdef:
ndarray[int64_t] trans, deltas, idx_shifted
+ ndarray ambiguous_array
Py_ssize_t i, idx, pos, ntrans, n = len(vals)
int64_t *tdata
int64_t v, left, right
@@ -4190,11 +4191,18 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
infer_dst = True
elif ambiguous == 'NaT':
fill = True
+ elif isinstance(ambiguous, bool):
+ is_dst = True
+ if ambiguous:
+ ambiguous_array = np.ones(len(vals), dtype=bool)
+ else:
+ ambiguous_array = np.zeros(len(vals), dtype=bool)
elif hasattr(ambiguous, '__iter__'):
is_dst = True
if len(ambiguous) != len(vals):
raise ValueError(
"Length of ambiguous bool-array must be the same size as vals")
+ ambiguous_array = np.asarray(ambiguous)
trans, deltas, typ = _get_dst_info(tz)
@@ -4286,7 +4294,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
if infer_dst and dst_hours[i] != NPY_NAT:
result[i] = dst_hours[i]
elif is_dst:
- if ambiguous[i]:
+ if ambiguous_array[i]:
result[i] = left
else:
result[i] = right
diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py
index f00273bc75199..49273bacccf98 100755
--- a/scripts/find_undoc_args.py
+++ b/scripts/find_undoc_args.py
@@ -19,7 +19,7 @@
parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True,
help='name of package to import and examine',action='store')
parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False,
- help='github project where the the code lives, e.g. "pydata/pandas"',
+ help='github project where the the code lives, e.g. "pandas-dev/pandas"',
default=None,action='store')
args = parser.parse_args()
diff --git a/scripts/gen_release_notes.py b/scripts/gen_release_notes.py
index 02ba4f57c189d..7e4ffca59a0ab 100644
--- a/scripts/gen_release_notes.py
+++ b/scripts/gen_release_notes.py
@@ -46,7 +46,7 @@ def get_issues():
def _get_page(page_number):
- gh_url = ('https://api.github.com/repos/pydata/pandas/issues?'
+ gh_url = ('https://api.github.com/repos/pandas-dev/pandas/issues?'
'milestone=*&state=closed&assignee=*&page=%d') % page_number
with urlopen(gh_url) as resp:
rs = resp.readlines()[0]
diff --git a/scripts/touchup_gh_issues.py b/scripts/touchup_gh_issues.py
index 96ee220f55a02..8aa6d426156f0 100755
--- a/scripts/touchup_gh_issues.py
+++ b/scripts/touchup_gh_issues.py
@@ -14,7 +14,7 @@
pat = "((?:\s*GH\s*)?)#(\d{3,4})([^_]|$)?"
rep_pat = r"\1GH\2_\3"
-anchor_pat = ".. _GH{id}: https://github.com/pydata/pandas/issues/{id}"
+anchor_pat = ".. _GH{id}: https://github.com/pandas-dev/pandas/issues/{id}"
section_pat = "^pandas\s[\d\.]+\s*$"
diff --git a/vb_suite/perf_HEAD.py b/vb_suite/perf_HEAD.py
index c14a1795f01e0..143d943b9eadf 100755
--- a/vb_suite/perf_HEAD.py
+++ b/vb_suite/perf_HEAD.py
@@ -192,7 +192,7 @@ def get_build_results(build):
return convert_json_to_df(r_url)
-def get_all_results(repo_id=53976): # travis pydata/pandas id
+def get_all_results(repo_id=53976): # travis pandas-dev/pandas id
"""Fetches the VBENCH results for all travis builds, and returns a list of result df
unsuccesful individual vbenches are dropped.
diff --git a/vb_suite/suite.py b/vb_suite/suite.py
index 70a6278c0852d..45053b6610896 100644
--- a/vb_suite/suite.py
+++ b/vb_suite/suite.py
@@ -67,7 +67,7 @@
TMP_DIR = config.get('setup', 'tmp_dir')
except:
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
- REPO_URL = 'git@github.com:pydata/pandas.git'
+ REPO_URL = 'git@github.com:pandas-dev/pandas.git'
DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db')
TMP_DIR = os.path.join(HOME, 'tmp/vb_pandas')
@@ -138,7 +138,7 @@ def generate_rst_files(benchmarks):
The ``.pandas_vb_common`` setup script can be found here_
-.. _here: https://github.com/pydata/pandas/tree/master/vb_suite
+.. _here: https://github.com/pandas-dev/pandas/tree/master/vb_suite
Produced on a machine with
| Title is self-explanatory. Follow-up to #14237.
Merged in PR #14453
| https://api.github.com/repos/pandas-dev/pandas/pulls/14324 | 2016-09-29T18:08:07Z | 2016-10-19T16:11:15Z | null | 2016-10-19T23:25:58Z |
Enforce boolean types | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 23f2589adde89..0148a47068beb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -247,6 +247,7 @@ Other API Changes
- ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`)
- ``pd.read_csv()`` will now raise a ``ValueError`` for the C engine if the quote character is larger than than one byte (:issue:`11592`)
+- ``inplace`` arguments now require a boolean value, else a ``ValueError`` is thrown (:issue:`14189`)
.. _whatsnew_0200.deprecations:
diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py
index fffde4d9db867..a0a08e4a968cc 100644
--- a/pandas/computation/eval.py
+++ b/pandas/computation/eval.py
@@ -11,6 +11,7 @@
from pandas.computation.scope import _ensure_scope
from pandas.compat import string_types
from pandas.computation.engines import _engines
+from pandas.util.validators import validate_bool_kwarg
def _check_engine(engine):
@@ -231,6 +232,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
pandas.DataFrame.query
pandas.DataFrame.eval
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
first_expr = True
if isinstance(expr, string_types):
_check_expression(expr)
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index ffa2cb0684b72..1b577a574350d 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -1970,6 +1970,15 @@ def test_negate_lt_eq_le():
for engine, parser in product(_engines, expr._parsers):
yield check_negate_lt_eq_le, engine, parser
+class TestValidate(tm.TestCase):
+
+ def test_validate_bool_args(self):
+ invalid_values = [1, "True", [1,2,3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ pd.eval("2+2", inplace=value)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 49e43a60403ca..77272f7721b32 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -8,6 +8,7 @@
from pandas.types.missing import isnull
from pandas.types.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.types.common import is_object_dtype, is_list_like, is_scalar
+from pandas.util.validators import validate_bool_kwarg
from pandas.core import common as com
import pandas.core.nanops as nanops
@@ -1178,6 +1179,7 @@ def searchsorted(self, value, side='left', sorter=None):
False: 'first'})
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 0562736038483..5980f872f951f 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -35,6 +35,7 @@
deprecate_kwarg, Substitution)
from pandas.util.terminal import get_terminal_size
+from pandas.util.validators import validate_bool_kwarg
from pandas.core.config import get_option
@@ -615,6 +616,7 @@ def set_ordered(self, value, inplace=False):
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
self._validate_ordered(value)
cat = self if inplace else self.copy()
cat._ordered = value
@@ -631,6 +633,7 @@ def as_ordered(self, inplace=False):
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
@@ -643,6 +646,7 @@ def as_unordered(self, inplace=False):
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def _get_ordered(self):
@@ -702,6 +706,7 @@ def set_categories(self, new_categories, ordered=None, rename=False,
remove_categories
remove_unused_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
if rename:
@@ -754,6 +759,7 @@ def rename_categories(self, new_categories, inplace=False):
remove_unused_categories
set_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
cat.categories = new_categories
if not inplace:
@@ -794,6 +800,7 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
remove_unused_categories
set_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self._categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
@@ -832,6 +839,7 @@ def add_categories(self, new_categories, inplace=False):
remove_unused_categories
set_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self._categories)
@@ -877,6 +885,7 @@ def remove_categories(self, removals, inplace=False):
remove_unused_categories
set_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
@@ -917,6 +926,7 @@ def remove_unused_categories(self, inplace=False):
remove_categories
set_categories
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
@@ -1322,6 +1332,7 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'):
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d96fb094f5d5c..b9290c0ce3457 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -23,8 +23,7 @@
import numpy as np
import numpy.ma as ma
-from pandas.types.cast import (_maybe_upcast,
- _infer_dtype_from_scalar,
+from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
_possibly_cast_to_datetime,
_possibly_infer_to_datetimelike,
_possibly_convert_platform,
@@ -79,6 +78,7 @@
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util.decorators import deprecate_kwarg, Appender, Substitution
+from pandas.util.validators import validate_bool_kwarg
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
@@ -2164,6 +2164,7 @@ def query(self, expr, inplace=False, **kwargs):
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
@@ -2230,6 +2231,7 @@ def eval(self, expr, inplace=None, **kwargs):
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
@@ -2843,6 +2845,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
-------
dataframe : DataFrame
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
@@ -2935,6 +2938,7 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
-------
resetted : DataFrame
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
@@ -3039,6 +3043,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
-------
dropped : DataFrame
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
@@ -3102,6 +3107,7 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False):
-------
deduplicated : DataFrame
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
@@ -3163,7 +3169,7 @@ def f(vals):
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
-
+ inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
@@ -3274,7 +3280,7 @@ def sort(self, columns=None, axis=0, ascending=True, inplace=False,
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
-
+ inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1680c061ad7d3..0b5767da74cad 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -52,6 +52,7 @@
isidentifier, set_function_name)
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, Substitution, deprecate_kwarg
+from pandas.util.validators import validate_bool_kwarg
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
@@ -733,6 +734,7 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
1 2 5
2 3 6
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
@@ -1950,6 +1952,7 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
-------
dropped : type of caller
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
@@ -2099,6 +2102,7 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
@Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame"))
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
@@ -2872,6 +2876,7 @@ def consolidate(self, inplace=False):
-------
consolidated : type of caller
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
@@ -3267,6 +3272,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False,
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
@@ -3479,6 +3485,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
and play with this method to gain intuition about how it works.
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
@@ -3714,6 +3721,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
"""
Interpolate values according to different methods.
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
@@ -4627,6 +4635,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com._apply_if_callable(cond, self)
@@ -4894,6 +4903,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
try_cast=False, raise_on_error=True):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com._apply_if_callable(cond, self)
return self.where(~cond, other=other, inplace=inplace, axis=axis,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index aa865ae430d4a..289ce150eb46b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -57,6 +57,7 @@
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
+from pandas.util.validators import validate_bool_kwarg
from pandas.tslib import Timedelta
from pandas import compat, _np_version_under1p9
@@ -360,6 +361,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None,
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
if inplace:
@@ -626,6 +628,7 @@ def replace(self, to_replace, value, inplace=False, filter=None,
compatibility.
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
original_to_replace = to_replace
mask = isnull(self.values)
# try to replace, if we raise an error, convert to ObjectBlock and
@@ -897,6 +900,9 @@ def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None,
**kwargs):
+
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
@@ -944,6 +950,8 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
@@ -970,6 +978,7 @@ def _interpolate(self, method=None, index=None, values=None,
mgr=None, **kwargs):
""" interpolate using scipy wrappers """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
data = self.values if inplace else self.values.copy()
# only deal with floats
@@ -1514,6 +1523,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
-------
a new block(s), the result of the putmask
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
# use block's copy logic.
# .values may be an Index which does shallow copy by default
@@ -1801,6 +1811,7 @@ def should_store(self, value):
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
@@ -1982,6 +1993,9 @@ def replace(self, to_replace, value, inplace=False, filter=None,
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
+
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
@@ -3205,6 +3219,8 @@ def replace_list(self, src_list, dest_list, inplace=False, regex=False,
mgr=None):
""" do a list replace """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+
if mgr is None:
mgr = self
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f656d72296e3a..0b29e8c93a12d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -66,6 +66,7 @@
import pandas.core.nanops as nanops
import pandas.formats.format as fmt
from pandas.util.decorators import Appender, deprecate_kwarg, Substitution
+from pandas.util.validators import validate_bool_kwarg
import pandas.lib as lib
import pandas.tslib as tslib
@@ -975,6 +976,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
----------
resetted : DataFrame, or Series if drop == True
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = _default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
@@ -1175,6 +1177,7 @@ def _set_name(self, name, inplace=False):
inplace : bool
whether to modify `self` directly or return a copy
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
ser = self if inplace else self.copy()
ser.name = name
return ser
@@ -1722,6 +1725,7 @@ def update(self, other):
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
# GH 5856/5853
@@ -1774,6 +1778,7 @@ def _try_kind_sort(arr):
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
+ inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
index = self.index
if level is not None:
@@ -2350,6 +2355,9 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, **kwargs):
+ kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
+ 'inplace')
+
non_mapping = is_scalar(index) or (is_list_like(index) and
not is_dict_like(index))
if non_mapping:
@@ -2646,6 +2654,7 @@ def dropna(self, axis=0, inplace=False, **kwargs):
inplace : boolean, default False
Do operation in place.
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py
index 82de8cd7d3959..d294e65bbf10c 100644
--- a/pandas/sparse/list.py
+++ b/pandas/sparse/list.py
@@ -5,6 +5,7 @@
from pandas.types.common import is_scalar
from pandas.sparse.array import SparseArray
+from pandas.util.validators import validate_bool_kwarg
import pandas._sparse as splib
@@ -78,6 +79,7 @@ def consolidate(self, inplace=True):
If inplace=False, new object, otherwise reference to existing
object
"""
+ inplace = validate_bool_kwarg(inplace, 'inplace')
if not inplace:
result = self.copy()
else:
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 8a6cbe44465c1..d7466f5ede06f 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -208,7 +208,7 @@ def test_fillna(self):
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
- df.x.fillna(method=m, inplace=1)
+ df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
# with different dtype (GH3386)
diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py
new file mode 100644
index 0000000000000..e1ef87bb3271a
--- /dev/null
+++ b/pandas/tests/frame/test_validate.py
@@ -0,0 +1,33 @@
+from unittest import TestCase
+from pandas.core.frame import DataFrame
+
+
+class TestDataFrameValidate(TestCase):
+ """Tests for error handling related to data types of method arguments."""
+ df = DataFrame({'a': [1, 2], 'b': [3, 4]})
+
+ def test_validate_bool_args(self):
+ # Tests for error handling related to boolean arguments.
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ self.df.query('a > b', inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.eval('a + b', inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.set_index(keys=['a'], inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.reset_index(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.dropna(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.drop_duplicates(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.df.sort_values(by=['a'], inplace=value)
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
new file mode 100644
index 0000000000000..cf0482b41c80a
--- /dev/null
+++ b/pandas/tests/series/test_validate.py
@@ -0,0 +1,33 @@
+from unittest import TestCase
+from pandas.core.series import Series
+
+
+class TestSeriesValidate(TestCase):
+ """Tests for error handling related to data types of method arguments."""
+ s = Series([1, 2, 3, 4, 5])
+
+ def test_validate_bool_args(self):
+ # Tests for error handling related to boolean arguments.
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ self.s.reset_index(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s._set_name(name='hello', inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s.sort_values(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s.sort_index(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s.sort_index(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s.rename(inplace=value)
+
+ with self.assertRaises(ValueError):
+ self.s.dropna(inplace=value)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 717eae3e59715..f750936961831 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -1050,6 +1050,13 @@ def test_searchsorted(self):
index = np.searchsorted(o, max(o), sorter=range(len(o)))
self.assertTrue(0 <= index <= len(o))
+ def test_validate_bool_args(self):
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ self.int_series.drop_duplicates(inplace=value)
+
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 23280395427fd..382f1dd1decfb 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1694,6 +1694,41 @@ def test_map(self):
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
+ def test_validate_inplace(self):
+ cat = Categorical(['A','B','B','C','A'])
+ invalid_values = [1, "True", [1,2,3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ cat.set_ordered(value=True, inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.as_ordered(inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.as_unordered(inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.set_categories(['X','Y','Z'], rename=True, inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.rename_categories(['X','Y','Z'], inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.reorder_categories(['X','Y','Z'], ordered=True, inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.add_categories(new_categories=['D','E','F'], inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.remove_categories(removals=['D','E','F'], inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.remove_unused_categories(inplace=value)
+
+ with self.assertRaises(ValueError):
+ cat.sort_values(inplace=value)
+
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 3500ce913462a..f32990ff32cbe 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -642,6 +642,40 @@ def test_numpy_clip(self):
np.clip, obj,
lower, upper, out=col)
+ def test_validate_bool_args(self):
+ df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'},
+ axis=1, inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).drop('a', axis=1, inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).sort_index(inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).consolidate(inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).fillna(value=0, inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).replace(to_replace=1, value=7,
+ inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).interpolate(inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
+
+ with self.assertRaises(ValueError):
+ super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
+
class TestSeries(tm.TestCase, Generic):
_typ = Series
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 32e8f44e6f258..22addd4c23817 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -859,6 +859,14 @@ def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.])
+ def test_validate_bool_args(self):
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+ bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ bm1.replace_list([1], [2], inplace=value)
+
class TestIndexing(object):
# Nosetests-style data-driven tests.
diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py
index cb12048676d26..ed82604035358 100644
--- a/pandas/tests/test_util.py
+++ b/pandas/tests/test_util.py
@@ -8,7 +8,8 @@
from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
from pandas.util.decorators import deprecate_kwarg
from pandas.util.validators import (validate_args, validate_kwargs,
- validate_args_and_kwargs)
+ validate_args_and_kwargs,
+ validate_bool_kwarg)
import pandas.util.testing as tm
@@ -200,6 +201,22 @@ def test_validation(self):
kwargs = dict(f=None, b=1)
validate_kwargs(self.fname, kwargs, compat_args)
+ def test_validate_bool_kwarg(self):
+ arg_names = ['inplace', 'copy']
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+ valid_values = [True, False, None]
+
+ for name in arg_names:
+ for value in invalid_values:
+ with tm.assertRaisesRegexp(ValueError,
+ ("For argument \"%s\" expected "
+ "type bool, received type %s") %
+ (name, type(value).__name__)):
+ validate_bool_kwarg(value, name)
+
+ for value in valid_values:
+ tm.assert_equal(validate_bool_kwarg(value, name), value)
+
class TestValidateKwargsAndArgs(tm.TestCase):
fname = 'func'
diff --git a/pandas/util/validators.py b/pandas/util/validators.py
index 964fa9d9b38d5..f22412a2bcd17 100644
--- a/pandas/util/validators.py
+++ b/pandas/util/validators.py
@@ -215,3 +215,12 @@ def validate_args_and_kwargs(fname, args, kwargs,
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
+
+
+def validate_bool_kwarg(value, arg_name):
+ """ Ensures that argument passed in arg_name is of type bool. """
+ if not (is_bool(value) or value is None):
+ raise ValueError('For argument "%s" expected type bool, '
+ 'received type %s.' %
+ (arg_name, type(value).__name__))
+ return value
| - [ x ] closes #14189
- [ x ] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
This PR only addresses the `inplace` argument, though there was a comment on #14189 that identifies the issue as being with the `copy` argument as well. I can build this out further to account for all of the frequently occurring boolean arguments.
I also wrote tests for the common function `_enforce_bool_type`, but didn't write individual tests for every method with an `inplace` argument. This is something I can add if it makes sense. I wanted to get something reviewed sooner rather than later to get feedback and ensure that I'm on the right track. Feedback is much appreciated -- thanks!
| https://api.github.com/repos/pandas-dev/pandas/pulls/14318 | 2016-09-29T06:10:37Z | 2017-01-06T12:33:21Z | 2017-01-06T12:33:21Z | 2017-01-06T12:33:35Z |
BUG: date slicing with reverse sorted index | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 454ffc5e5c685..d7f481d2a268b 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1521,7 +1521,7 @@ Bug Fixes
- Bug in ``.shift`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13926`)
- Bug in ``pd.read_hdf()`` returns incorrect result when a ``DataFrame`` with a ``categorical`` column and a query which doesn't match any values (:issue:`13792`)
- Bug in ``.iloc`` when indexing with a non lex-sorted MultiIndex (:issue:`13797`)
-
+- Bug in ``.loc`` when indexing with date strings in a reverse sorted ``DatetimeIndex`` (:issue:`14316`)
- Bug in ``Series`` comparison operators when dealing with zero dim NumPy arrays (:issue:`13006`)
- Bug in ``.combine_first`` may return incorrect ``dtype`` (:issue:`7630`, :issue:`10567`)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index e0d63d5aa0c44..fa406a27bef69 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -100,7 +100,7 @@ class TestIndexing(tm.TestCase):
_multiprocess_can_split_ = True
_objs = set(['series', 'frame', 'panel'])
- _typs = set(['ints', 'labels', 'mixed', 'ts', 'floats', 'empty'])
+ _typs = set(['ints', 'labels', 'mixed', 'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
@@ -137,6 +137,15 @@ def setUp(self):
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
+ dates_rev = (date_range('20130101', periods=4)
+ .sort_values(ascending=False))
+ self.series_ts_rev = Series(np.random.randn(4),
+ index=dates_rev)
+ self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
+ index=dates_rev)
+ self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
+ items=dates_rev)
+
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
@@ -1358,6 +1367,10 @@ def test_loc_getitem_label_slice(self):
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
+ # GH 14316
+ self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
+ 'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
+
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index b2326cb7b3255..ff6c0b85a1e5c 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1446,8 +1446,14 @@ def _maybe_cast_slice_bound(self, label, side, kind):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
- bounds = self._parsed_string_to_bounds(reso, parsed)
- return bounds[0 if side == 'left' else 1]
+ lower, upper = self._parsed_string_to_bounds(reso, parsed)
+ # lower, upper form the half-open interval:
+ # [parsed, parsed + 1 freq)
+ # because label may be passed to searchsorted
+ # the bounds need swapped if index is reverse sorted
+ if self.is_monotonic_decreasing:
+ return upper if side == 'left' else lower
+ return lower if side == 'left' else upper
else:
return label
| - [x] closes #14316
- [ ] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
cc @shoyer - good diagnosis!
| https://api.github.com/repos/pandas-dev/pandas/pulls/14317 | 2016-09-29T01:08:48Z | 2016-09-30T10:08:02Z | null | 2016-09-30T10:36:11Z |
DOC: expand doc for numeric_only | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2f78c9acf7972..a3cac2d6f9f2f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5510,8 +5510,8 @@ def _doc_parms(cls):
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
numeric_only : boolean, default None
- Include only float, int, boolean data. If None, will attempt to use
- everything, then use only numeric data
+ Include only float, int, boolean columns. If None, will attempt to use
+ everything, then use only numeric data. Not implemented for Series.
Returns
-------
@@ -5533,8 +5533,8 @@ def _doc_parms(cls):
ddof : int, default 1
degrees of freedom
numeric_only : boolean, default None
- Include only float, int, boolean data. If None, will attempt to use
- everything, then use only numeric data
+ Include only float, int, boolean columns. If None, will attempt to use
+ everything, then use only numeric data. Not implemented for Series.
Returns
-------
@@ -5554,8 +5554,8 @@ def _doc_parms(cls):
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
bool_only : boolean, default None
- Include only boolean data. If None, will attempt to use everything,
- then use only boolean data
+ Include only boolean columns. If None, will attempt to use everything,
+ then use only boolean data. Not implemented for Series.
Returns
-------
| - [x] closes #10480
- [x] tests not needed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry not needed
Just making the description (hopefully) a little clearer.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14309 | 2016-09-28T00:16:01Z | 2016-09-28T10:08:52Z | null | 2016-09-28T10:08:52Z |
PERF: unnecessary materialization of a MultiIndex.values when introspecting memory_usage | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 2f8baae416dea..f4110cba68c31 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1409,7 +1409,7 @@ Performance Improvements
- Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`)
- Improved performance of by lazily creating indexing hashtables on larger Indexes (:issue:`14266`)
- Improved performance of ``groupby.groups`` (:issue:`14293`)
-
+- Unecessary materializing of a MultiIndex when introspecting for memory usage (:issue:`14308`)
.. _whatsnew_0190.bug_fixes:
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index e6aefaeb01a15..1ab5dbb737739 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -413,10 +413,27 @@ def _shallow_copy(self, values=None, **kwargs):
def dtype(self):
return np.dtype('O')
+ @Appender(Index.memory_usage.__doc__)
+ def memory_usage(self, deep=False):
+ # we are overwriting our base class to avoid
+ # computing .values here which could materialize
+ # a tuple representation uncessarily
+ return self._nbytes(deep)
+
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
- level_nbytes = sum((i.nbytes for i in self.levels))
+ return self._nbytes(False)
+
+ def _nbytes(self, deep=False):
+ """
+ return the number of bytes in the underlying data
+ deeply introspect the level data if deep=True
+
+ *this is in internal routine*
+
+ """
+ level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))
label_nbytes = sum((i.nbytes for i in self.labels))
names_nbytes = sum((getsizeof(i) for i in self.names))
return level_nbytes + label_nbytes + names_nbytes
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 66e592c013fb1..5e5e9abda1200 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -381,3 +381,27 @@ def test_info_memory_usage(self):
# deep=True, and add on some GC overhead
diff = df.memory_usage(deep=True).sum() - sys.getsizeof(df)
self.assertTrue(abs(diff) < 100)
+
+ def test_info_memory_usage_bug_on_multiindex(self):
+ # GH 14308
+ # memory usage introspection should not materialize .values
+
+ from string import ascii_uppercase as uppercase
+
+ def memory_usage(f):
+ return f.memory_usage(deep=True).sum()
+
+ N = 100
+ M = len(uppercase)
+ index = pd.MultiIndex.from_product([list(uppercase),
+ pd.date_range('20160101',
+ periods=N)],
+ names=['id', 'date'])
+ df = DataFrame({'value': np.random.randn(N * M)}, index=index)
+
+ unstacked = df.unstack('id')
+ self.assertEqual(df.values.nbytes, unstacked.values.nbytes)
+ self.assertTrue(memory_usage(df) > memory_usage(unstacked))
+
+ # high upper bound
+ self.assertTrue(memory_usage(unstacked) - memory_usage(df) < 2000)
| ```
In [2]: import string
...: import pandas as pd
...: import numpy as np
...:
...: def memory_usage(f):
...: return f.memory_usage(deep=True).sum()
...:
...: N = 100
...: M = len(string.uppercase)
...: df = pd.DataFrame({'value' : np.random.randn(N*M)},
...: index=pd.MultiIndex.from_product([list(string.uppercase),
...: pd.date_range('20160101',periods=N)],
...: names=['id','date'])
...: )
...:
...:
...: stacked = df.unstack('id')
...:
...: assert df.values.nbytes == stacked.values.nbytes
...:
In [3]: memory_usage(df)
Out[3]: 145600
In [4]: memory_usage(stacked)
Out[4]: 21600
I
n [7]: df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 2600 entries, (A, 2016-01-01 00:00:00) to (Z, 2016-04-09 00:00:00)
Data columns (total 1 columns):
value 2600 non-null float64
dtypes: float64(1)
memory usage: 142.2 KB
In [8]: stacked.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 100 entries, 2016-01-01 to 2016-04-09
Freq: D
Data columns (total 26 columns):
(value, A) 100 non-null float64
(value, B) 100 non-null float64
(value, C) 100 non-null float64
(value, D) 100 non-null float64
(value, E) 100 non-null float64
(value, F) 100 non-null float64
(value, G) 100 non-null float64
(value, H) 100 non-null float64
(value, I) 100 non-null float64
(value, J) 100 non-null float64
(value, K) 100 non-null float64
(value, L) 100 non-null float64
(value, M) 100 non-null float64
(value, N) 100 non-null float64
(value, O) 100 non-null float64
(value, P) 100 non-null float64
(value, Q) 100 non-null float64
(value, R) 100 non-null float64
(value, S) 100 non-null float64
(value, T) 100 non-null float64
(value, U) 100 non-null float64
(value, V) 100 non-null float64
(value, W) 100 non-null float64
(value, X) 100 non-null float64
(value, Y) 100 non-null float64
(value, Z) 100 non-null float64
dtypes: float64(26)
memory usage: 21.1 KB
```
with this PR
```
In [2]: memory_usage(df)
Out[2]: 27088
In [3]: memory_usage(stacked)
Out[3]: 21600
In [4]: df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 2600 entries, (A, 2016-01-01 00:00:00) to (Z, 2016-04-09 00:00:00)
Data columns (total 1 columns):
value 2600 non-null float64
dtypes: float64(1)
memory usage: 26.5 KB
In [5]: stacked.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 100 entries, 2016-01-01 to 2016-04-09
Freq: D
Data columns (total 26 columns):
(value, A) 100 non-null float64
(value, B) 100 non-null float64
(value, C) 100 non-null float64
(value, D) 100 non-null float64
(value, E) 100 non-null float64
(value, F) 100 non-null float64
(value, G) 100 non-null float64
(value, H) 100 non-null float64
(value, I) 100 non-null float64
(value, J) 100 non-null float64
(value, K) 100 non-null float64
(value, L) 100 non-null float64
(value, M) 100 non-null float64
(value, N) 100 non-null float64
(value, O) 100 non-null float64
(value, P) 100 non-null float64
(value, Q) 100 non-null float64
(value, R) 100 non-null float64
(value, S) 100 non-null float64
(value, T) 100 non-null float64
(value, U) 100 non-null float64
(value, V) 100 non-null float64
(value, W) 100 non-null float64
(value, X) 100 non-null float64
(value, Y) 100 non-null float64
(value, Z) 100 non-null float64
dtypes: float64(26)
memory usage: 21.1 KB
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14308 | 2016-09-27T22:37:23Z | 2016-09-28T18:01:18Z | 2016-09-28T18:01:18Z | 2016-09-28T18:01:19Z |
BUG: Index.copy() honors 'name' parameter (#14302) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 454ffc5e5c685..50b4b1176c6cb 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1578,3 +1578,4 @@ Bug Fixes
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
+- Bug in ``Index.copy()`` where ``name`` parameter was ignored (:issue:`14302`)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index f430305f5cb91..0f68272117cc8 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -620,26 +620,40 @@ def _coerce_scalar_to_index(self, item):
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
- names = kwargs.get('names')
- if names is not None and name is not None:
- raise TypeError("Can only provide one of `names` and `name`")
if deep:
- from copy import deepcopy
new_index = self._shallow_copy(self._data.copy())
- name = name or deepcopy(self.name)
else:
new_index = self._shallow_copy()
- name = self.name
- if name is not None:
- names = [name]
- if names:
- new_index = new_index.set_names(names)
+
+ names = kwargs.get('names')
+ names = self._validate_names(name=name, names=names, deep=deep)
+ new_index = new_index.set_names(names)
+
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
+ def _validate_names(self, name=None, names=None, deep=False):
+ """
+ Handles the quirks of having a singular 'name' parameter for general
+ Index and plural 'names' parameter for MultiIndex.
+ """
+ from copy import deepcopy
+ if names is not None and name is not None:
+ raise TypeError("Can only provide one of `names` and `name`")
+ elif names is None and name is None:
+ return deepcopy(self.names) if deep else self.names
+ elif names is not None:
+ if not is_list_like(names):
+ raise TypeError("Must pass list-like as `names`.")
+ return names
+ else:
+ if not is_list_like(name):
+ return [name]
+ return name
+
def __unicode__(self):
"""
Return a string representation for this object.
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 09c755b2c9792..e6aefaeb01a15 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -346,7 +346,7 @@ def set_labels(self, labels, level=None, inplace=False,
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
- deep=False, _set_identity=False):
+ deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
@@ -368,15 +368,20 @@ def copy(self, names=None, dtype=None, levels=None, labels=None,
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
+ name = kwargs.get('name')
+ names = self._validate_names(name=name, names=names, deep=deep)
+
if deep:
from copy import deepcopy
- levels = levels if levels is not None else deepcopy(self.levels)
- labels = labels if labels is not None else deepcopy(self.labels)
- names = names if names is not None else deepcopy(self.names)
+ if levels is None:
+ levels = deepcopy(self.levels)
+ if labels is None:
+ labels = deepcopy(self.labels)
else:
- levels = self.levels
- labels = self.labels
- names = self.names
+ if levels is None:
+ levels = self.levels
+ if labels is None:
+ labels = self.labels
return MultiIndex(levels=levels, labels=labels, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7f68318d4d7d3..61b70c738aa95 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1817,6 +1817,30 @@ def test_copy_name(self):
s3 = s1 * s2
self.assertEqual(s3.index.name, 'mario')
+ def test_copy_name2(self):
+ # Check that adding a "name" parameter to the copy is honored
+ # GH14302
+ idx = pd.Index([1, 2], name='MyName')
+ idx1 = idx.copy()
+
+ self.assertTrue(idx.equals(idx1))
+ self.assertEqual(idx.name, 'MyName')
+ self.assertEqual(idx1.name, 'MyName')
+
+ idx2 = idx.copy(name='NewName')
+
+ self.assertTrue(idx.equals(idx2))
+ self.assertEqual(idx.name, 'MyName')
+ self.assertEqual(idx2.name, 'NewName')
+
+ idx3 = idx.copy(names=['NewName'])
+
+ self.assertTrue(idx.equals(idx3))
+ self.assertEqual(idx.name, 'MyName')
+ self.assertEqual(idx.names, ['MyName'])
+ self.assertEqual(idx3.name, 'NewName')
+ self.assertEqual(idx3.names, ['NewName'])
+
def test_union_base(self):
idx = self.create_index()
first = idx[3:]
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 5248f0775d22f..46fd811e5f14c 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -415,6 +415,28 @@ def test_set_value_keeps_names(self):
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
+ def test_copy_names(self):
+ # Check that adding a "names" parameter to the copy is honored
+ # GH14302
+ multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
+ multi_idx1 = multi_idx.copy()
+
+ self.assertTrue(multi_idx.equals(multi_idx1))
+ self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
+ self.assertEqual(multi_idx1.names, ['MyName1', 'MyName2'])
+
+ multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
+
+ self.assertTrue(multi_idx.equals(multi_idx2))
+ self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
+ self.assertEqual(multi_idx2.names, ['NewName1', 'NewName2'])
+
+ multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
+
+ self.assertTrue(multi_idx.equals(multi_idx3))
+ self.assertEqual(multi_idx.names, ['MyName1', 'MyName2'])
+ self.assertEqual(multi_idx3.names, ['NewName1', 'NewName2'])
+
def test_names(self):
# names are assigned in __init__
| - [x] closes #14302
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14303 | 2016-09-26T18:11:42Z | 2016-09-27T23:44:16Z | null | 2016-09-27T23:44:23Z |
API: add dtype= option to python parser | diff --git a/doc/source/io.rst b/doc/source/io.rst
index ee319092c6dd5..b1c151def26af 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -157,6 +157,9 @@ dtype : Type name or dict of column -> type, default ``None``
Data type for data or columns. E.g. ``{'a': np.float64, 'b': np.int32}``
(unsupported with ``engine='python'``). Use `str` or `object` to preserve and
not interpret dtype.
+
+ .. versionadded:: 0.20.0 support for the Python parser.
+
engine : {``'c'``, ``'python'``}
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
@@ -473,10 +476,9 @@ However, if you wanted for all the data to be coerced, no matter the type, then
using the ``converters`` argument of :func:`~pandas.read_csv` would certainly be
worth trying.
-.. note::
- The ``dtype`` option is currently only supported by the C engine.
- Specifying ``dtype`` with ``engine`` other than 'c' raises a
- ``ValueError``.
+ .. versionadded:: 0.20.0 support for the Python parser.
+
+ The ``dtype`` option is supported by the 'python' engine
.. note::
In some cases, reading in abnormal data with columns containing mixed dtypes
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 65b62601c7022..6e3559bee728d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -22,8 +22,17 @@ New features
~~~~~~~~~~~~
+``read_csv`` supports ``dtype`` keyword for python engine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The ``dtype`` keyword argument in the :func:`read_csv` function for specifying the types of parsed columns
+ is now supported with the ``'python'`` engine (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information.
+.. ipython:: python
+
+ data = "a,b\n1,2\n3,4"
+ pd.read_csv(StringIO(data), engine='python').dtypes
+ pd.read_csv(StringIO(data), engine='python', dtype={'a':'float64', 'b':'object'}).dtypes
.. _whatsnew_0200.enhancements.other:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 929b360854d5b..0736535ce2d67 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -17,11 +17,15 @@
zip, string_types, map, u)
from pandas.types.common import (is_integer, _ensure_object,
is_list_like, is_integer_dtype,
- is_float,
- is_scalar)
+ is_float, is_dtype_equal,
+ is_object_dtype,
+ is_scalar, is_categorical_dtype)
+from pandas.types.missing import isnull
+from pandas.types.cast import _astype_nansafe
from pandas.core.index import Index, MultiIndex, RangeIndex
from pandas.core.series import Series
from pandas.core.frame import DataFrame
+from pandas.core.categorical import Categorical
from pandas.core.common import AbstractMethodError
from pandas.core.config import get_option
from pandas.io.date_converters import generic_parser
@@ -111,8 +115,9 @@
are duplicate names in the columns.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
- (Unsupported with engine='python'). Use `str` or `object` to preserve and
- not interpret dtype.
+ Use `str` or `object` to preserve and not interpret dtype.
+ If converters are specified, they will be applied INSTEAD
+ of dtype conversion.
%s
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can either
@@ -421,6 +426,7 @@ def _read(filepath_or_buffer, kwds):
'true_values': None,
'false_values': None,
'converters': None,
+ 'dtype': None,
'skipfooter': 0,
'keep_default_na': True,
@@ -461,7 +467,6 @@ def _read(filepath_or_buffer, kwds):
'buffer_lines': None,
'error_bad_lines': True,
'warn_bad_lines': True,
- 'dtype': None,
'float_precision': None
}
@@ -476,7 +481,6 @@ def _read(filepath_or_buffer, kwds):
'buffer_lines',
'error_bad_lines',
'warn_bad_lines',
- 'dtype',
'float_precision',
])
_deprecated_args = set([
@@ -834,9 +838,6 @@ def _clean_options(self, options, engine):
" ignored as it is not supported by the 'python'"
" engine.").format(reason=fallback_reason,
option=arg)
- if arg == 'dtype':
- msg += " (Note the 'converters' option provides"\
- " similar functionality.)"
raise ValueError(msg)
del result[arg]
@@ -1285,7 +1286,7 @@ def _agg_index(self, index, try_parse_dates=True):
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues)
- arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
+ arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_names)
@@ -1293,10 +1294,15 @@ def _agg_index(self, index, try_parse_dates=True):
return index
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
- converters=None):
+ converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
+ if isinstance(dtypes, dict):
+ cast_type = dtypes.get(c, None)
+ else:
+ # single dtype or None
+ cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
@@ -1304,17 +1310,35 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
else:
col_na_values, col_na_fvalues = set(), set()
- coerce_type = True
if conv_f is not None:
+ # conv_f applied to data before inference
+ if cast_type is not None:
+ warnings.warn(("Both a converter and dtype were specified "
+ "for column {0} - only the converter will "
+ "be used").format(c), ParserWarning,
+ stacklevel=7)
+
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = lib.ismember(values, na_values).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
- coerce_type = False
- cvals, na_count = self._convert_types(
- values, set(col_na_values) | col_na_fvalues, coerce_type)
+ cvals, na_count = self._infer_types(
+ values, set(col_na_values) | col_na_fvalues,
+ try_num_bool=False)
+ else:
+ # skip inference if specified dtype is object
+ try_num_bool = not (cast_type and is_object_dtype(cast_type))
+
+ # general type inference and conversion
+ cvals, na_count = self._infer_types(
+ values, set(col_na_values) | col_na_fvalues,
+ try_num_bool)
+
+ # type specificed in dtype param
+ if cast_type and not is_dtype_equal(cvals, cast_type):
+ cvals = self._cast_types(cvals, cast_type, c)
if issubclass(cvals.dtype.type, np.integer) and self.compact_ints:
cvals = lib.downcast_int64(
@@ -1326,7 +1350,23 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
- def _convert_types(self, values, na_values, try_num_bool=True):
+ def _infer_types(self, values, na_values, try_num_bool=True):
+ """
+ Infer types of values, possibly casting
+
+ Parameters
+ ----------
+ values : ndarray
+ na_values : set
+ try_num_bool : bool, default try
+ try to cast values to numeric (first preference) or boolean
+
+ Returns:
+ --------
+ converted : ndarray
+ na_count : int
+ """
+
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = lib.ismember(values, na_values)
@@ -1340,6 +1380,7 @@ def _convert_types(self, values, na_values, try_num_bool=True):
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
+ na_count = isnull(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
@@ -1356,6 +1397,38 @@ def _convert_types(self, values, na_values, try_num_bool=True):
return result, na_count
+ def _cast_types(self, values, cast_type, column):
+ """
+ Cast values to specified type
+
+ Parameters
+ ----------
+ values : ndarray
+ cast_type : string or np.dtype
+ dtype to cast values to
+ column : string
+ column name - used only for error reporting
+
+ Returns
+ -------
+ converted : ndarray
+ """
+
+ if is_categorical_dtype(cast_type):
+ # XXX this is for consistency with
+ # c-parser which parses all categories
+ # as strings
+ if not is_object_dtype(values):
+ values = _astype_nansafe(values, str)
+ values = Categorical(values)
+ else:
+ try:
+ values = _astype_nansafe(values, cast_type, copy=True)
+ except ValueError:
+ raise ValueError("Unable to convert column %s to "
+ "type %s" % (column, cast_type))
+ return values
+
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
@@ -1784,6 +1857,7 @@ def __init__(self, f, **kwds):
self.verbose = kwds['verbose']
self.converters = kwds['converters']
+ self.dtype = kwds['dtype']
self.compact_ints = kwds['compact_ints']
self.use_unsigned = kwds['use_unsigned']
@@ -1982,7 +2056,7 @@ def read(self, rows=None):
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
- names, self.index_col, self.index_names)
+ names, self.index_col, self.index_names, self.dtype)
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
return index, columns, col_dict
@@ -2033,15 +2107,25 @@ def get_chunk(self, size=None):
def _convert_data(self, data):
# apply converters
- clean_conv = {}
-
- for col, f in compat.iteritems(self.converters):
- if isinstance(col, int) and col not in self.orig_names:
- col = self.orig_names[col]
- clean_conv[col] = f
+ def _clean_mapping(mapping):
+ "converts col numbers to names"
+ clean = {}
+ for col, v in compat.iteritems(mapping):
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+ clean[col] = v
+ return clean
+
+ clean_conv = _clean_mapping(self.converters)
+ if not isinstance(self.dtype, dict):
+ # handles single dtype applied to all columns
+ clean_dtypes = self.dtype
+ else:
+ clean_dtypes = _clean_mapping(self.dtype)
return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues,
- self.verbose, clean_conv)
+ self.verbose, clean_conv,
+ clean_dtypes)
def _to_recarray(self, data, columns):
dtypes = []
diff --git a/pandas/io/tests/parser/c_parser_only.py b/pandas/io/tests/parser/c_parser_only.py
index 9cbe88d4032a3..c781b0549ee60 100644
--- a/pandas/io/tests/parser/c_parser_only.py
+++ b/pandas/io/tests/parser/c_parser_only.py
@@ -12,10 +12,9 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import DataFrame, Series, Index, MultiIndex, Categorical
+from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, range, lrange
-from pandas.types.dtypes import CategoricalDtype
class CParserTests(object):
@@ -100,29 +99,13 @@ def test_dtype_and_names_error(self):
self.read_csv(StringIO(data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
- def test_passing_dtype(self):
- # see gh-6607
+ def test_unsupported_dtype(self):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
- with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
+ with tm.ensure_clean('__unsupported_dtype__.csv') as path:
df.to_csv(path)
- # see gh-3795: passing 'str' as the dtype
- result = self.read_csv(path, dtype=str, index_col=0)
- tm.assert_series_equal(result.dtypes, Series(
- {'A': 'object', 'B': 'object'}))
-
- # we expect all object columns, so need to
- # convert to test for equivalence
- result = result.astype(float)
- tm.assert_frame_equal(result, df)
-
- # invalid dtype
- self.assertRaises(TypeError, self.read_csv, path,
- dtype={'A': 'foo', 'B': 'float64'},
- index_col=0)
-
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
@@ -141,11 +124,6 @@ def test_passing_dtype(self):
dtype={'A': 'U8'},
index_col=0)
- # see gh-12048: empty frame
- actual = self.read_csv(StringIO('A,B'), dtype=str)
- expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
- tm.assert_frame_equal(actual, expected)
-
def test_precise_conversion(self):
# see gh-8002
tm._skip_if_32bit()
@@ -178,104 +156,6 @@ def error(val):
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
- def test_pass_dtype(self):
- data = """\
-one,two
-1,2.5
-2,3.5
-3,4.5
-4,5.5"""
-
- result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
- self.assertEqual(result['one'].dtype, 'u1')
- self.assertEqual(result['two'].dtype, 'object')
-
- def test_categorical_dtype(self):
- # GH 10153
- data = """a,b,c
-1,a,3.4
-1,a,3.4
-2,b,4.5"""
- expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
- 'b': Categorical(['a', 'a', 'b']),
- 'c': Categorical(['3.4', '3.4', '4.5'])})
- actual = self.read_csv(StringIO(data), dtype='category')
- tm.assert_frame_equal(actual, expected)
-
- actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
- tm.assert_frame_equal(actual, expected)
-
- actual = self.read_csv(StringIO(data), dtype={'a': 'category',
- 'b': 'category',
- 'c': CategoricalDtype()})
- tm.assert_frame_equal(actual, expected)
-
- actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
- expected = pd.DataFrame({'a': [1, 1, 2],
- 'b': Categorical(['a', 'a', 'b']),
- 'c': [3.4, 3.4, 4.5]})
- tm.assert_frame_equal(actual, expected)
-
- actual = self.read_csv(StringIO(data), dtype={1: 'category'})
- tm.assert_frame_equal(actual, expected)
-
- # unsorted
- data = """a,b,c
-1,b,3.4
-1,b,3.4
-2,a,4.5"""
- expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
- 'b': Categorical(['b', 'b', 'a']),
- 'c': Categorical(['3.4', '3.4', '4.5'])})
- actual = self.read_csv(StringIO(data), dtype='category')
- tm.assert_frame_equal(actual, expected)
-
- # missing
- data = """a,b,c
-1,b,3.4
-1,nan,3.4
-2,a,4.5"""
- expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
- 'b': Categorical(['b', np.nan, 'a']),
- 'c': Categorical(['3.4', '3.4', '4.5'])})
- actual = self.read_csv(StringIO(data), dtype='category')
- tm.assert_frame_equal(actual, expected)
-
- def test_categorical_dtype_encoding(self):
- # GH 10153
- pth = tm.get_data_path('unicode_series.csv')
- encoding = 'latin-1'
- expected = self.read_csv(pth, header=None, encoding=encoding)
- expected[1] = Categorical(expected[1])
- actual = self.read_csv(pth, header=None, encoding=encoding,
- dtype={1: 'category'})
- tm.assert_frame_equal(actual, expected)
-
- pth = tm.get_data_path('utf16_ex.txt')
- encoding = 'utf-16'
- expected = self.read_table(pth, encoding=encoding)
- expected = expected.apply(Categorical)
- actual = self.read_table(pth, encoding=encoding, dtype='category')
- tm.assert_frame_equal(actual, expected)
-
- def test_categorical_dtype_chunksize(self):
- # GH 10153
- data = """a,b
-1,a
-1,b
-1,b
-2,c"""
- expecteds = [pd.DataFrame({'a': [1, 1],
- 'b': Categorical(['a', 'b'])}),
- pd.DataFrame({'a': [1, 2],
- 'b': Categorical(['b', 'c'])},
- index=[2, 3])]
- actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
- chunksize=2)
-
- for actual, expected in zip(actuals, expecteds):
- tm.assert_frame_equal(actual, expected)
-
def test_pass_dtype_as_recarray(self):
if compat.is_platform_windows() and self.low_memory:
raise nose.SkipTest(
@@ -295,66 +175,6 @@ def test_pass_dtype_as_recarray(self):
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
- def test_empty_pass_dtype(self):
- data = 'one,two'
- result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
-
- expected = DataFrame({'one': np.empty(0, dtype='u1'),
- 'two': np.empty(0, dtype=np.object)})
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- def test_empty_with_index_pass_dtype(self):
- data = 'one,two'
- result = self.read_csv(StringIO(data), index_col=['one'],
- dtype={'one': 'u1', 1: 'f'})
-
- expected = DataFrame({'two': np.empty(0, dtype='f')},
- index=Index([], dtype='u1', name='one'))
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- def test_empty_with_multiindex_pass_dtype(self):
- data = 'one,two,three'
- result = self.read_csv(StringIO(data), index_col=['one', 'two'],
- dtype={'one': 'u1', 1: 'f8'})
-
- exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
- np.empty(0, dtype='O')],
- names=['one', 'two'])
- expected = DataFrame(
- {'three': np.empty(0, dtype=np.object)}, index=exp_idx)
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- def test_empty_with_mangled_column_pass_dtype_by_names(self):
- data = 'one,one'
- result = self.read_csv(StringIO(data), dtype={
- 'one': 'u1', 'one.1': 'f'})
-
- expected = DataFrame(
- {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
- data = 'one,one'
- result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
-
- expected = DataFrame(
- {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- def test_empty_with_dup_column_pass_dtype_by_indexes(self):
- # see gh-9424
- expected = pd.concat([Series([], name='one', dtype='u1'),
- Series([], name='one.1', dtype='f')], axis=1)
-
- data = 'one,one'
- result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
- data = ''
- result = self.read_csv(StringIO(data), names=['one', 'one'],
- dtype={0: 'u1', 1: 'f'})
- tm.assert_frame_equal(result, expected, check_index_type=False)
-
def test_usecols_dtypes(self):
data = """\
1,2,3
@@ -400,16 +220,6 @@ def test_custom_lineterminator(self):
tm.assert_frame_equal(result, expected)
- def test_raise_on_passed_int_dtype_with_nas(self):
- # see gh-2631
- data = """YEAR, DOY, a
-2001,106380451,10
-2001,,11
-2001,106380451,67"""
- self.assertRaises(ValueError, self.read_csv, StringIO(data),
- sep=",", skipinitialspace=True,
- dtype={'DOY': np.int64})
-
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
@@ -561,49 +371,3 @@ def test_internal_null_byte(self):
result = self.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
-
- def test_empty_dtype(self):
- # see gh-14712
- data = 'a,b'
-
- expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
- result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame({'a': pd.Categorical([]),
- 'b': pd.Categorical([])},
- index=[])
- result = self.read_csv(StringIO(data), header=0,
- dtype='category')
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
- result = self.read_csv(StringIO(data), header=0,
- dtype='datetime64[ns]')
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
- 'b': pd.Series([], dtype='timedelta64[ns]')},
- index=[])
- result = self.read_csv(StringIO(data), header=0,
- dtype='timedelta64[ns]')
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame(columns=['a', 'b'])
- expected['a'] = expected['a'].astype(np.float64)
- result = self.read_csv(StringIO(data), header=0,
- dtype={'a': np.float64})
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame(columns=['a', 'b'])
- expected['a'] = expected['a'].astype(np.float64)
- result = self.read_csv(StringIO(data), header=0,
- dtype={0: np.float64})
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame(columns=['a', 'b'])
- expected['a'] = expected['a'].astype(np.int32)
- expected['b'] = expected['b'].astype(np.float64)
- result = self.read_csv(StringIO(data), header=0,
- dtype={'a': np.int32, 1: np.float64})
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/io/tests/parser/dtypes.py b/pandas/io/tests/parser/dtypes.py
new file mode 100644
index 0000000000000..18c37b31f6480
--- /dev/null
+++ b/pandas/io/tests/parser/dtypes.py
@@ -0,0 +1,274 @@
+# -*- coding: utf-8 -*-
+
+"""
+Tests dtype specification during parsing
+for all of the parsers defined in parsers.py
+"""
+
+import numpy as np
+import pandas as pd
+import pandas.util.testing as tm
+
+from pandas import DataFrame, Series, Index, MultiIndex, Categorical
+from pandas.compat import StringIO
+from pandas.types.dtypes import CategoricalDtype
+from pandas.io.common import ParserWarning
+
+
+class DtypeTests(object):
+ def test_passing_dtype(self):
+ # see gh-6607
+ df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
+ 'AB'), index=['1A', '1B', '1C', '1D', '1E'])
+
+ with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
+ df.to_csv(path)
+
+ # see gh-3795: passing 'str' as the dtype
+ result = self.read_csv(path, dtype=str, index_col=0)
+ expected = df.astype(str)
+ tm.assert_frame_equal(result, expected)
+
+ # for parsing, interpret object as str
+ result = self.read_csv(path, dtype=object, index_col=0)
+ tm.assert_frame_equal(result, expected)
+
+ # we expect all object columns, so need to
+ # convert to test for equivalence
+ result = result.astype(float)
+ tm.assert_frame_equal(result, df)
+
+ # invalid dtype
+ self.assertRaises(TypeError, self.read_csv, path,
+ dtype={'A': 'foo', 'B': 'float64'},
+ index_col=0)
+
+ # see gh-12048: empty frame
+ actual = self.read_csv(StringIO('A,B'), dtype=str)
+ expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_pass_dtype(self):
+ data = """\
+one,two
+1,2.5
+2,3.5
+3,4.5
+4,5.5"""
+
+ result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
+ self.assertEqual(result['one'].dtype, 'u1')
+ self.assertEqual(result['two'].dtype, 'object')
+
+ def test_categorical_dtype(self):
+ # GH 10153
+ data = """a,b,c
+1,a,3.4
+1,a,3.4
+2,b,4.5"""
+ expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
+ 'b': Categorical(['a', 'a', 'b']),
+ 'c': Categorical(['3.4', '3.4', '4.5'])})
+ actual = self.read_csv(StringIO(data), dtype='category')
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.read_csv(StringIO(data), dtype={'a': 'category',
+ 'b': 'category',
+ 'c': CategoricalDtype()})
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
+ expected = pd.DataFrame({'a': [1, 1, 2],
+ 'b': Categorical(['a', 'a', 'b']),
+ 'c': [3.4, 3.4, 4.5]})
+ tm.assert_frame_equal(actual, expected)
+
+ actual = self.read_csv(StringIO(data), dtype={1: 'category'})
+ tm.assert_frame_equal(actual, expected)
+
+ # unsorted
+ data = """a,b,c
+1,b,3.4
+1,b,3.4
+2,a,4.5"""
+ expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
+ 'b': Categorical(['b', 'b', 'a']),
+ 'c': Categorical(['3.4', '3.4', '4.5'])})
+ actual = self.read_csv(StringIO(data), dtype='category')
+ tm.assert_frame_equal(actual, expected)
+
+ # missing
+ data = """a,b,c
+1,b,3.4
+1,nan,3.4
+2,a,4.5"""
+ expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
+ 'b': Categorical(['b', np.nan, 'a']),
+ 'c': Categorical(['3.4', '3.4', '4.5'])})
+ actual = self.read_csv(StringIO(data), dtype='category')
+ tm.assert_frame_equal(actual, expected)
+
+ def test_categorical_dtype_encoding(self):
+ # GH 10153
+ pth = tm.get_data_path('unicode_series.csv')
+ encoding = 'latin-1'
+ expected = self.read_csv(pth, header=None, encoding=encoding)
+ expected[1] = Categorical(expected[1])
+ actual = self.read_csv(pth, header=None, encoding=encoding,
+ dtype={1: 'category'})
+ tm.assert_frame_equal(actual, expected)
+
+ pth = tm.get_data_path('utf16_ex.txt')
+ encoding = 'utf-16'
+ expected = self.read_table(pth, encoding=encoding)
+ expected = expected.apply(Categorical)
+ actual = self.read_table(pth, encoding=encoding, dtype='category')
+ tm.assert_frame_equal(actual, expected)
+
+ def test_categorical_dtype_chunksize(self):
+ # GH 10153
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ expecteds = [pd.DataFrame({'a': [1, 1],
+ 'b': Categorical(['a', 'b'])}),
+ pd.DataFrame({'a': [1, 2],
+ 'b': Categorical(['b', 'c'])},
+ index=[2, 3])]
+ actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
+ chunksize=2)
+
+ for actual, expected in zip(actuals, expecteds):
+ tm.assert_frame_equal(actual, expected)
+
+ def test_empty_pass_dtype(self):
+ data = 'one,two'
+ result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
+
+ expected = DataFrame({'one': np.empty(0, dtype='u1'),
+ 'two': np.empty(0, dtype=np.object)})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_empty_with_index_pass_dtype(self):
+ data = 'one,two'
+ result = self.read_csv(StringIO(data), index_col=['one'],
+ dtype={'one': 'u1', 1: 'f'})
+
+ expected = DataFrame({'two': np.empty(0, dtype='f')},
+ index=Index([], dtype='u1', name='one'))
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_empty_with_multiindex_pass_dtype(self):
+ data = 'one,two,three'
+ result = self.read_csv(StringIO(data), index_col=['one', 'two'],
+ dtype={'one': 'u1', 1: 'f8'})
+
+ exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
+ np.empty(0, dtype='O')],
+ names=['one', 'two'])
+ expected = DataFrame(
+ {'three': np.empty(0, dtype=np.object)}, index=exp_idx)
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_empty_with_mangled_column_pass_dtype_by_names(self):
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), dtype={
+ 'one': 'u1', 'one.1': 'f'})
+
+ expected = DataFrame(
+ {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
+
+ expected = DataFrame(
+ {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_empty_with_dup_column_pass_dtype_by_indexes(self):
+ # see gh-9424
+ expected = pd.concat([Series([], name='one', dtype='u1'),
+ Series([], name='one.1', dtype='f')], axis=1)
+
+ data = 'one,one'
+ result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ data = ''
+ result = self.read_csv(StringIO(data), names=['one', 'one'],
+ dtype={0: 'u1', 1: 'f'})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ def test_raise_on_passed_int_dtype_with_nas(self):
+ # see gh-2631
+ data = """YEAR, DOY, a
+2001,106380451,10
+2001,,11
+2001,106380451,67"""
+ self.assertRaises(ValueError, self.read_csv, StringIO(data),
+ sep=",", skipinitialspace=True,
+ dtype={'DOY': np.int64})
+
+ def test_dtype_with_converter(self):
+ data = """a,b
+1.1,2.2
+1.2,2.3"""
+ # dtype spec ignored if converted specified
+ with tm.assert_produces_warning(ParserWarning):
+ result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
+ converters={'a': lambda x: str(x)})
+ expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_dtype(self):
+ # see gh-14712
+ data = 'a,b'
+
+ expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
+ result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame({'a': pd.Categorical([]),
+ 'b': pd.Categorical([])},
+ index=[])
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='category')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='datetime64[ns]')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
+ 'b': pd.Series([], dtype='timedelta64[ns]')},
+ index=[])
+ result = self.read_csv(StringIO(data), header=0,
+ dtype='timedelta64[ns]')
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={'a': np.float64})
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={0: np.float64})
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame(columns=['a', 'b'])
+ expected['a'] = expected['a'].astype(np.int32)
+ expected['b'] = expected['b'].astype(np.float64)
+ result = self.read_csv(StringIO(data), header=0,
+ dtype={'a': np.int32, 1: np.float64})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/io/tests/parser/test_parsers.py b/pandas/io/tests/parser/test_parsers.py
index 6001c85ae76b1..6cca2e35e1135 100644
--- a/pandas/io/tests/parser/test_parsers.py
+++ b/pandas/io/tests/parser/test_parsers.py
@@ -22,6 +22,7 @@
from .compression import CompressionTests
from .multithread import MultithreadTests
from .python_parser_only import PythonParserTests
+from .dtypes import DtypeTests
class BaseParser(CommentTests, CompressionTests,
@@ -29,7 +30,8 @@ class BaseParser(CommentTests, CompressionTests,
IndexColTests, MultithreadTests,
NAvaluesTests, ParseDatesTests,
ParserTests, SkipRowsTests,
- UsecolsTests, QuotingTests):
+ UsecolsTests, QuotingTests,
+ DtypeTests):
def read_csv(self, *args, **kwargs):
raise NotImplementedError
diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py
index 5d60c20854a83..ffd1cfa9a2538 100644
--- a/pandas/io/tests/parser/test_unsupported.py
+++ b/pandas/io/tests/parser/test_unsupported.py
@@ -44,16 +44,6 @@ def test_c_engine(self):
data = 'a b c\n1 2 3'
msg = 'does not support'
- # specify C-unsupported options with python-unsupported option
- # (options will be ignored on fallback, raise)
- with tm.assertRaisesRegexp(ValueError, msg):
- read_table(StringIO(data), sep=None,
- delim_whitespace=False, dtype={'a': float})
- with tm.assertRaisesRegexp(ValueError, msg):
- read_table(StringIO(data), sep=r'\s', dtype={'a': float})
- with tm.assertRaisesRegexp(ValueError, msg):
- read_table(StringIO(data), skipfooter=1, dtype={'a': float})
-
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, msg):
read_table(StringIO(data), engine='c',
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 6b43dfbabc4a0..6760e822960f1 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -13,7 +13,7 @@ from cpython cimport (PyObject, PyBytes_FromString,
PyUnicode_Check, PyUnicode_AsUTF8String,
PyErr_Occurred, PyErr_Fetch)
from cpython.ref cimport PyObject, Py_XDECREF
-from io.common import ParserError, DtypeWarning, EmptyDataError
+from io.common import ParserError, DtypeWarning, EmptyDataError, ParserWarning
# Import CParserError as alias of ParserError for backwards compatibility.
# Ultimately, we want to remove this import. See gh-12665 and gh-14479.
@@ -987,7 +987,7 @@ cdef class TextReader:
Py_ssize_t i, nused
kh_str_t *na_hashset = NULL
int start, end
- object name, na_flist
+ object name, na_flist, col_dtype = None
bint na_filter = 0
Py_ssize_t num_cols
@@ -1043,14 +1043,34 @@ cdef class TextReader:
else:
na_filter = 0
+ col_dtype = None
+ if self.dtype is not None:
+ if isinstance(self.dtype, dict):
+ if name in self.dtype:
+ col_dtype = self.dtype[name]
+ elif i in self.dtype:
+ col_dtype = self.dtype[i]
+ else:
+ if self.dtype.names:
+ # structured array
+ col_dtype = np.dtype(self.dtype.descr[i][1])
+ else:
+ col_dtype = self.dtype
+
if conv:
+ if col_dtype is not None:
+ warnings.warn(("Both a converter and dtype were specified "
+ "for column {0} - only the converter will "
+ "be used").format(name), ParserWarning,
+ stacklevel=5)
results[i] = _apply_converter(conv, self.parser, i, start, end,
self.c_encoding)
continue
# Should return as the desired dtype (inferred or specified)
col_res, na_count = self._convert_tokens(
- i, start, end, name, na_filter, na_hashset, na_flist)
+ i, start, end, name, na_filter, na_hashset,
+ na_flist, col_dtype)
if na_filter:
self._free_na_set(na_hashset)
@@ -1075,32 +1095,17 @@ cdef class TextReader:
cdef inline _convert_tokens(self, Py_ssize_t i, int start, int end,
object name, bint na_filter,
kh_str_t *na_hashset,
- object na_flist):
- cdef:
- object col_dtype = None
-
- if self.dtype is not None:
- if isinstance(self.dtype, dict):
- if name in self.dtype:
- col_dtype = self.dtype[name]
- elif i in self.dtype:
- col_dtype = self.dtype[i]
- else:
- if self.dtype.names:
- # structured array
- col_dtype = np.dtype(self.dtype.descr[i][1])
- else:
- col_dtype = self.dtype
+ object na_flist, object col_dtype):
- if col_dtype is not None:
- col_res, na_count = self._convert_with_dtype(
- col_dtype, i, start, end, na_filter,
- 1, na_hashset, na_flist)
+ if col_dtype is not None:
+ col_res, na_count = self._convert_with_dtype(
+ col_dtype, i, start, end, na_filter,
+ 1, na_hashset, na_flist)
- # Fallback on the parse (e.g. we requested int dtype,
- # but its actually a float).
- if col_res is not None:
- return col_res, na_count
+ # Fallback on the parse (e.g. we requested int dtype,
+ # but its actually a float).
+ if col_res is not None:
+ return col_res, na_count
if i in self.noconvert:
return self._string_convert(i, start, end, na_filter, na_hashset)
| - [x] part of #12686
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Ultimately I'm working towards #8212 (types in excel parser), which should be pretty straightforward after this.
Right now the tests are moved from `c_parser_only.py`, may need to add some more
cc @gfyoung
| https://api.github.com/repos/pandas-dev/pandas/pulls/14295 | 2016-09-24T18:12:19Z | 2016-11-26T09:12:22Z | 2016-11-26T09:12:22Z | 2016-11-30T01:00:57Z |
PERF: faster grouping | diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 2eb6786356511..1c82560c7e630 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -22,7 +22,7 @@ def wrapper(fname):
return wrapper
-class nogil_groupby_count_2(object):
+class nogil_groupby_base(object):
goal_time = 0.2
def setup(self):
@@ -33,6 +33,9 @@ def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
+
+class nogil_groupby_count_2(nogil_groupby_base):
+
def time_nogil_groupby_count_2(self):
self.pg2()
@@ -41,16 +44,7 @@ def pg2(self):
self.df.groupby('key')['data'].count()
-class nogil_groupby_last_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_last_2(nogil_groupby_base):
def time_nogil_groupby_last_2(self):
self.pg2()
@@ -60,16 +54,7 @@ def pg2(self):
self.df.groupby('key')['data'].last()
-class nogil_groupby_max_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_max_2(nogil_groupby_base):
def time_nogil_groupby_max_2(self):
self.pg2()
@@ -79,16 +64,7 @@ def pg2(self):
self.df.groupby('key')['data'].max()
-class nogil_groupby_mean_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_mean_2(nogil_groupby_base):
def time_nogil_groupby_mean_2(self):
self.pg2()
@@ -98,16 +74,7 @@ def pg2(self):
self.df.groupby('key')['data'].mean()
-class nogil_groupby_min_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_min_2(nogil_groupby_base):
def time_nogil_groupby_min_2(self):
self.pg2()
@@ -117,16 +84,7 @@ def pg2(self):
self.df.groupby('key')['data'].min()
-class nogil_groupby_prod_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_prod_2(nogil_groupby_base):
def time_nogil_groupby_prod_2(self):
self.pg2()
@@ -136,16 +94,7 @@ def pg2(self):
self.df.groupby('key')['data'].prod()
-class nogil_groupby_sum_2(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_sum_2(nogil_groupby_base):
def time_nogil_groupby_sum_2(self):
self.pg2()
@@ -155,16 +104,7 @@ def pg2(self):
self.df.groupby('key')['data'].sum()
-class nogil_groupby_sum_4(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_sum_4(nogil_groupby_base):
def time_nogil_groupby_sum_4(self):
self.pg4()
@@ -172,41 +112,16 @@ def time_nogil_groupby_sum_4(self):
def f(self):
self.df.groupby('key')['data'].sum()
- def g2(self):
- for i in range(2):
- self.f()
-
def g4(self):
for i in range(4):
self.f()
- def g8(self):
- for i in range(8):
- self.f()
-
- @test_parallel(num_threads=2)
- def pg2(self):
- self.f()
-
@test_parallel(num_threads=4)
def pg4(self):
self.f()
- @test_parallel(num_threads=8)
- def pg8(self):
- self.f()
-
-class nogil_groupby_sum_8(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 1000000
- self.ngroups = 1000
- np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
- if (not have_real_test_parallel):
- raise NotImplementedError
+class nogil_groupby_sum_8(nogil_groupby_base):
def time_nogil_groupby_sum_8(self):
self.pg8()
@@ -214,48 +129,68 @@ def time_nogil_groupby_sum_8(self):
def f(self):
self.df.groupby('key')['data'].sum()
- def g2(self):
- for i in range(2):
- self.f()
-
- def g4(self):
- for i in range(4):
- self.f()
-
def g8(self):
for i in range(8):
self.f()
- @test_parallel(num_threads=2)
- def pg2(self):
- self.f()
-
- @test_parallel(num_threads=4)
- def pg4(self):
- self.f()
-
@test_parallel(num_threads=8)
def pg8(self):
self.f()
-class nogil_groupby_var_2(object):
+class nogil_groupby_var_2(nogil_groupby_base):
+
+ def time_nogil_groupby_var_2(self):
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].var()
+
+
+class nogil_groupby_groups(object):
goal_time = 0.2
def setup(self):
- self.N = 1000000
- self.ngroups = 1000
np.random.seed(1234)
- self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
+ self.size = 2**22
+ self.ngroups = 100
+ self.data = Series(np.random.randint(0, self.ngroups, size=self.size))
if (not have_real_test_parallel):
raise NotImplementedError
- def time_nogil_groupby_var_2(self):
+ def f(self):
+ self.data.groupby(self.data).groups
+
+
+class nogil_groupby_groups_2(nogil_groupby_groups):
+
+ def time_nogil_groupby_groups(self):
self.pg2()
@test_parallel(num_threads=2)
def pg2(self):
- self.df.groupby('key')['data'].var()
+ self.f()
+
+
+class nogil_groupby_groups_4(nogil_groupby_groups):
+
+ def time_nogil_groupby_groups(self):
+ self.pg4()
+
+ @test_parallel(num_threads=4)
+ def pg4(self):
+ self.f()
+
+
+class nogil_groupby_groups_8(nogil_groupby_groups):
+
+ def time_nogil_groupby_groups(self):
+ self.pg8()
+
+ @test_parallel(num_threads=8)
+ def pg8(self):
+ self.f()
class nogil_take1d_float64(object):
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 0611a3564ff7a..e12b00dd06b39 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -32,6 +32,32 @@ def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(self.f)
+#----------------------------------------------------------------------
+# groups
+
+class groupby_groups(object):
+ goal_time = 0.1
+
+ def setup(self):
+ size = 2**22
+ self.data = Series(np.random.randint(0, 100, size=size))
+ self.data2 = Series(np.random.randint(0, 10000, size=size))
+ self.data3 = Series(tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size)))
+ self.data4 = Series(tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size)))
+
+ def time_groupby_groups_int64_small(self):
+ self.data.groupby(self.data).groups
+
+ def time_groupby_groups_int64_large(self):
+ self.data2.groupby(self.data2).groups
+
+ def time_groupby_groups_object_small(self):
+ self.data3.groupby(self.data3).groups
+
+ def time_groupby_groups_object_large(self):
+ self.data4.groupby(self.data4).groups
+
+
#----------------------------------------------------------------------
# First / last functions
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 67beb468dce8a..355d12e113398 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1335,6 +1335,7 @@ Other API Changes
- ``Series`` and ``Index`` now support ``divmod`` which will return a tuple of
series or indices. This behaves like a standard binary operator with regards
to broadcasting rules (:issue:`14208`).
+- ``.groupby.groups`` will now return a dictionary of ``Index`` objects, rather than a dictionary of ``np.ndarray`` or ``lists`` (:issue:`14293`)
.. _whatsnew_0190.deprecations:
@@ -1407,6 +1408,7 @@ Performance Improvements
- Improved performance of hashing ``Period`` (:issue:`12817`)
- Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`)
- Improved performance of by lazily creating indexing hashtables on larger Indexes (:issue:`14266`)
+- Improved performance of ``groupby.groups`` (:issue:`14293`)
.. _whatsnew_0190.bug_fixes:
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 8710ef34504d1..04f3ac70bdf5c 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -989,129 +989,47 @@ def is_lexsorted(list list_of_arrays):
@cython.boundscheck(False)
-def groupby_indices(dict ids, ndarray[int64_t] labels,
- ndarray[int64_t] counts):
- """
- turn group_labels output into a combined indexer mapping the labels to
- indexers
-
- Parameters
- ----------
- ids: dict
- mapping of label -> group indexer
- labels: ndarray
- labels for positions
- counts: ndarray
- group counts
-
- Returns
- -------
- list of ndarrays of indices
-
- """
- cdef:
- Py_ssize_t i, n = len(labels)
- ndarray[int64_t] arr, seen
- int64_t loc
- int64_t k
- dict result = {}
-
- seen = np.zeros_like(counts)
-
- cdef int64_t **vecs = <int64_t **> malloc(len(ids) * sizeof(int64_t*))
- for i from 0 <= i < len(counts):
- arr = np.empty(counts[i], dtype=np.int64)
- result[ids[i]] = arr
- vecs[i] = <int64_t*> arr.data
-
- for i from 0 <= i < n:
- k = labels[i]
-
- # was NaN
- if k == -1:
- continue
-
- loc = seen[k]
- vecs[k][loc] = i
- seen[k] = loc + 1
-
- free(vecs)
- return result
-
-
@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_labels(ndarray[object] values):
+def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
"""
- Compute label vector from input values and associated useful data
+ compute a 1-d indexer that is an ordering of the passed index,
+ ordered by the groups. This is a reverse of the label
+ factorization process.
Parameters
----------
- values: object ndarray
+ index: int64 ndarray
+ mappings from group -> position
+ ngroups: int64
+ number of groups
- Returns
- -------
- tuple of (reverse mappings of label -> group indexer,
- factorized labels ndarray,
- group counts ndarray)
+ return a tuple of (1-d indexer ordered by groups, group counts)
"""
- cdef:
- Py_ssize_t i, n = len(values)
- ndarray[int64_t] labels = np.empty(n, dtype=np.int64)
- ndarray[int64_t] counts = np.empty(n, dtype=np.int64)
- dict ids = {}, reverse = {}
- int64_t idx
- object val
- int64_t count = 0
-
- for i from 0 <= i < n:
- val = values[i]
-
- # is NaN
- if val != val:
- labels[i] = -1
- continue
- # for large number of groups, not doing try: except: makes a big
- # difference
- if val in ids:
- idx = ids[val]
- labels[i] = idx
- counts[idx] = counts[idx] + 1
- else:
- ids[val] = count
- reverse[count] = val
- labels[i] = count
- counts[count] = 1
- count += 1
-
- return reverse, labels, counts[:count].copy()
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
cdef:
Py_ssize_t i, loc, label, n
ndarray[int64_t] counts, where, result
- # count group sizes, location 0 for NA
counts = np.zeros(ngroups + 1, dtype=np.int64)
n = len(index)
- for i from 0 <= i < n:
- counts[index[i] + 1] += 1
-
- # mark the start of each contiguous group of like-indexed data
+ result = np.zeros(n, dtype=np.int64)
where = np.zeros(ngroups + 1, dtype=np.int64)
- for i from 1 <= i < ngroups + 1:
- where[i] = where[i - 1] + counts[i - 1]
- # this is our indexer
- result = np.zeros(n, dtype=np.int64)
- for i from 0 <= i < n:
- label = index[i] + 1
- result[where[label]] = i
- where[label] += 1
+ with nogil:
+
+ # count group sizes, location 0 for NA
+ for i from 0 <= i < n:
+ counts[index[i] + 1] += 1
+
+ # mark the start of each contiguous group of like-indexed data
+ for i from 1 <= i < ngroups + 1:
+ where[i] = where[i - 1] + counts[i - 1]
+
+ # this is our indexer
+ for i from 0 <= i < n:
+ label = index[i] + 1
+ result[where[label]] = i
+ where[label] += 1
return result, counts
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 6b37a5e2cd202..db48f2a46eaf3 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -6,6 +6,7 @@
from pandas import compat, lib
from pandas.compat import u, lzip
+import pandas.algos as _algos
from pandas.types.generic import ABCSeries, ABCIndexClass, ABCCategoricalIndex
from pandas.types.missing import isnull, notnull
@@ -1699,6 +1700,45 @@ def __setitem__(self, key, value):
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
+ def _reverse_indexer(self):
+ """
+ Compute the inverse of a categorical, returning
+ a dict of categories -> indexers.
+
+ *This is an internal function*
+
+ Returns
+ -------
+ dict of categories -> indexers
+
+ Example
+ -------
+ In [1]: c = pd.Categorical(list('aabca'))
+
+ In [2]: c
+ Out[2]:
+ [a, a, b, c, a]
+ Categories (3, object): [a, b, c]
+
+ In [3]: c.categories
+ Out[3]: Index([u'a', u'b', u'c'], dtype='object')
+
+ In [4]: c.codes
+ Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
+
+ In [5]: c._reverse_indexer()
+ Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
+
+ """
+ categories = self.categories
+ r, counts = _algos.groupsort_indexer(self.codes.astype('int64'),
+ categories.size)
+ counts = counts.cumsum()
+ result = [r[counts[indexer]:counts[indexer + 1]]
+ for indexer in range(len(counts) - 1)]
+ result = dict(zip(categories, result))
+ return result
+
# reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 733fae0c34729..3c376e3188eac 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -28,6 +28,7 @@
_ensure_platform_int,
_ensure_int64,
_ensure_object,
+ _ensure_categorical,
_ensure_float)
from pandas.types.cast import _possibly_downcast_to_dtype
from pandas.types.missing import isnull, notnull, _maybe_fill
@@ -1657,7 +1658,7 @@ def groups(self):
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
- return self.axis.groupby(to_groupby.values)
+ return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
@@ -2319,7 +2320,8 @@ def ngroups(self):
@cache_readonly
def indices(self):
- return _groupby_indices(self.grouper)
+ values = _ensure_categorical(self.grouper)
+ return values._reverse_indexer()
@property
def labels(self):
@@ -2342,7 +2344,8 @@ def _make_labels(self):
@cache_readonly
def groups(self):
- return self.index.groupby(self.grouper)
+ return self.index.groupby(Categorical.from_codes(self.labels,
+ self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
@@ -4436,23 +4439,6 @@ def _reorder_by_uniques(uniques, labels):
return uniques, labels
-def _groupby_indices(values):
-
- if is_categorical_dtype(values):
- # we have a categorical, so we can do quite a bit
- # bit better than factorizing again
- reverse = dict(enumerate(values.categories))
- codes = values.codes.astype('int64')
-
- mask = 0 <= codes
- counts = np.bincount(codes[mask], minlength=values.categories.size)
- else:
- reverse, codes, counts = _algos.group_labels(
- _values_from_object(_ensure_object(values)))
-
- return _algos.groupby_indices(reverse, codes, counts)
-
-
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index f430305f5cb91..5138ca5a6b21e 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -17,7 +17,9 @@
from pandas.types.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex
from pandas.types.missing import isnull, array_equivalent
-from pandas.types.common import (_ensure_int64, _ensure_object,
+from pandas.types.common import (_ensure_int64,
+ _ensure_object,
+ _ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
@@ -111,7 +113,6 @@ class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
_join_precedence = 1
# Cython methods
- _groupby = _algos.groupby_object
_arrmap = _algos.arrmap_object
_left_indexer_unique = _join.left_join_indexer_unique_object
_left_indexer = _join.left_join_indexer_object
@@ -2352,13 +2353,13 @@ def _possibly_promote(self, other):
return self.astype('object'), other.astype('object')
return self, other
- def groupby(self, to_groupby):
+ def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
- to_groupby : array
+ values : array
Values used to determine the groups.
Returns
@@ -2366,7 +2367,19 @@ def groupby(self, to_groupby):
groups : dict
{group name -> group labels}
"""
- return self._groupby(self.values, _values_from_object(to_groupby))
+
+ # TODO: if we are a MultiIndex, we can do better
+ # that converting to tuples
+ from .multi import MultiIndex
+ if isinstance(values, MultiIndex):
+ values = values.values
+ values = _ensure_categorical(values)
+ result = values._reverse_indexer()
+
+ # map to the label
+ result = {k: self.take(v) for k, v in compat.iteritems(result)}
+
+ return result
def map(self, mapper):
"""
diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py
index b9625f3aaff92..97f7093e99064 100644
--- a/pandas/indexes/numeric.py
+++ b/pandas/indexes/numeric.py
@@ -113,7 +113,6 @@ class Int64Index(NumericIndex):
"""
_typ = 'int64index'
- _groupby = _algos.groupby_int64
_arrmap = _algos.arrmap_int64
_left_indexer_unique = _join.left_join_indexer_unique_int64
_left_indexer = _join.left_join_indexer_int64
@@ -200,7 +199,6 @@ class Float64Index(NumericIndex):
_typ = 'float64index'
_engine_type = _index.Float64Engine
- _groupby = _algos.groupby_float64
_arrmap = _algos.arrmap_float64
_left_indexer_unique = _join.left_join_indexer_unique_float64
_left_indexer = _join.left_join_indexer_float64
diff --git a/pandas/src/algos_common_helper.pxi b/pandas/src/algos_common_helper.pxi
index be587fbc9a019..9dede87e0c15b 100644
--- a/pandas/src/algos_common_helper.pxi
+++ b/pandas/src/algos_common_helper.pxi
@@ -10,7 +10,6 @@ Template for each `dtype` helper function using 1-d template
- backfill_1d
- backfill_2d
- is_monotonic
-- groupby
- arrmap
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
@@ -391,35 +390,6 @@ def is_monotonic_float64(ndarray[float64_t] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_float64(ndarray[float64_t] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_float64(ndarray[float64_t] index, object func):
@@ -806,35 +776,6 @@ def is_monotonic_float32(ndarray[float32_t] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_float32(ndarray[float32_t] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_float32(ndarray[float32_t] index, object func):
@@ -1221,35 +1162,6 @@ def is_monotonic_object(ndarray[object] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_object(ndarray[object] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_object(ndarray[object] index, object func):
@@ -1636,35 +1548,6 @@ def is_monotonic_int32(ndarray[int32_t] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_int32(ndarray[int32_t] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_int32(ndarray[int32_t] index, object func):
@@ -2051,35 +1934,6 @@ def is_monotonic_int64(ndarray[int64_t] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_int64(ndarray[int64_t] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_int64(ndarray[int64_t] index, object func):
@@ -2466,35 +2320,6 @@ def is_monotonic_bool(ndarray[uint8_t] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_bool(ndarray[uint8_t] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_bool(ndarray[uint8_t] index, object func):
diff --git a/pandas/src/algos_common_helper.pxi.in b/pandas/src/algos_common_helper.pxi.in
index cec5712c0b7f4..c52c734f727e9 100644
--- a/pandas/src/algos_common_helper.pxi.in
+++ b/pandas/src/algos_common_helper.pxi.in
@@ -10,7 +10,6 @@ Template for each `dtype` helper function using 1-d template
- backfill_1d
- backfill_2d
- is_monotonic
-- groupby
- arrmap
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
@@ -413,35 +412,6 @@ def is_monotonic_{{name}}(ndarray[{{c_type}}] arr, bint timelike):
is_unique and (is_monotonic_inc or is_monotonic_dec)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def groupby_{{name}}(ndarray[{{c_type}}] index, ndarray labels):
- cdef dict result = {}
- cdef Py_ssize_t i, length
- cdef list members
- cdef object idx, key
-
- length = len(index)
-
- if not length == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- for i in range(length):
- key = util.get_value_1d(labels, i)
-
- if is_null_datetimelike(key):
- continue
-
- idx = index[i]
- if key in result:
- members = result[key]
- members.append(idx)
- else:
- result[key] = [idx]
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def arrmap_{{name}}(ndarray[{{c_type}}] index, object func):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7f68318d4d7d3..421174ded57d5 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1541,7 +1541,7 @@ def get_reindex_type(target):
def test_groupby(self):
idx = Index(range(5))
groups = idx.groupby(np.array([1, 1, 2, 2, 2]))
- exp = {1: [0, 1], 2: [2, 3, 4]}
+ exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(groups, exp)
def test_equals_op_multiindex(self):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index b04d7f128e133..b362c9716b672 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -144,8 +144,8 @@ def test_index_groupby(self):
for idx in [int_idx, float_idx, obj_idx, dt_idx]:
to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1])
- self.assertEqual(idx.groupby(to_groupby),
- {1.0: [idx[0], idx[5]], 2.0: [idx[1], idx[4]]})
+ tm.assert_dict_equal(idx.groupby(to_groupby),
+ {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]})
to_groupby = Index([datetime(2011, 11, 1),
datetime(2011, 12, 1),
@@ -155,11 +155,10 @@ def test_index_groupby(self):
datetime(2011, 11, 1)],
tz='UTC').values
- ex_keys = pd.tslib.datetime_to_datetime64(np.array([Timestamp(
- '2011-11-01'), Timestamp('2011-12-01')]))
- expected = {ex_keys[0][0]: [idx[0], idx[5]],
- ex_keys[0][1]: [idx[1], idx[4]]}
- self.assertEqual(idx.groupby(to_groupby), expected)
+ ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')]
+ expected = {ex_keys[0]: idx[[0, 5]],
+ ex_keys[1]: idx[[1, 4]]}
+ tm.assert_dict_equal(idx.groupby(to_groupby), expected)
def test_modulo(self):
# GH 9244
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index a21295e1a9823..01c1d48c6d5c0 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -521,13 +521,6 @@ def test_groupby_dict_mapping(self):
assert_series_equal(result, result2)
assert_series_equal(result, expected2)
- def test_groupby_bounds_check(self):
- # groupby_X is code-generated, so if one variant
- # does, the rest probably do to
- a = np.array([1, 2], dtype='object')
- b = np.array([1, 2, 3], dtype='object')
- self.assertRaises(AssertionError, pd.algos.groupby_object, a, b)
-
def test_groupby_grouper_f_sanity_checked(self):
dates = date_range('01-Jan-2013', periods=12, freq='MS')
ts = Series(np.random.randn(12), index=dates)
@@ -3478,13 +3471,13 @@ def test_groupby_nat_exclude(self):
'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
grouped = df.groupby('dt')
- expected = [[1, 7], [3, 5]]
+ expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
self.assertEqual(len(keys), 2)
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
- self.assertEqual(grouped.groups[k], e)
+ tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
@@ -4447,7 +4440,7 @@ def test_multiindex_columns_empty_level(self):
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
- self.assertEqual(result, expected)
+ tm.assert_dict_equal(result, expected)
def test_cython_median(self):
df = DataFrame(np.random.randn(1000))
diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py
index 9a12220f5b41d..a63ae5f7cf74e 100644
--- a/pandas/tests/types/test_inference.py
+++ b/pandas/tests/types/test_inference.py
@@ -16,7 +16,7 @@
from pandas import lib, tslib
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
- Panel, Period)
+ Panel, Period, Categorical)
from pandas.compat import u, PY2, lrange
from pandas.types import inference
from pandas.types.common import (is_timedelta64_dtype,
@@ -26,7 +26,8 @@
is_float,
is_bool,
is_scalar,
- _ensure_int32)
+ _ensure_int32,
+ _ensure_categorical)
from pandas.types.missing import isnull
from pandas.util import testing as tm
@@ -842,6 +843,16 @@ def test_ensure_int32():
assert (result.dtype == np.int32)
+def test_ensure_categorical():
+ values = np.arange(10, dtype=np.int32)
+ result = _ensure_categorical(values)
+ assert (result.dtype == 'category')
+
+ values = Categorical(values)
+ result = _ensure_categorical(values)
+ tm.assert_categorical_equal(result, values)
+
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 3b676b894d355..96213a4aec34d 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -226,10 +226,6 @@ def _box_values(self, values):
"""
return lib.map_infer(values, self._box_func)
- def groupby(self, f):
- objs = self.asobject.values
- return _algos.groupby_object(objs, f)
-
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
diff --git a/pandas/types/common.py b/pandas/types/common.py
index 2e7a67112e6db..e0e4501738745 100644
--- a/pandas/types/common.py
+++ b/pandas/types/common.py
@@ -42,6 +42,13 @@ def _ensure_float(arr):
_ensure_object = algos.ensure_object
+def _ensure_categorical(arr):
+ if not is_categorical(arr):
+ from pandas import Categorical
+ arr = Categorical(arr)
+ return arr
+
+
def is_object_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
| closes #14293
| https://api.github.com/repos/pandas-dev/pandas/pulls/14294 | 2016-09-24T13:53:05Z | 2016-09-27T22:10:32Z | null | 2016-09-27T22:10:32Z |
BUG: Fix concat key name | diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 1c5f4915bb3a4..b237d095fab34 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -30,3 +30,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug in ``concat`` where names of keys were not propagated to the resulting MultiIndex (:issue:`14252`)
\ No newline at end of file
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index e5aaba26135e7..b7cd8a1c01224 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -4,21 +4,21 @@
from datetime import datetime
-from numpy import nan
import numpy as np
+from numpy import nan
-from pandas.compat import lrange
-from pandas import DataFrame, Series, Index, Timestamp
import pandas as pd
-from pandas.util.testing import (assert_series_equal,
- assert_frame_equal,
- assertRaisesRegexp)
-
-import pandas.util.testing as tm
+from pandas import DataFrame, Index, Series, Timestamp
+from pandas.compat import lrange
from pandas.tests.frame.common import TestData
+import pandas.util.testing as tm
+from pandas.util.testing import (assertRaisesRegexp,
+ assert_frame_equal,
+ assert_series_equal)
+
class TestDataFrameConcatCommon(tm.TestCase, TestData):
@@ -324,6 +324,29 @@ def test_join_multiindex_leftright(self):
assert_frame_equal(df2.join(df1, how='left'),
exp[['value2', 'value1']])
+ def test_concat_named_keys(self):
+ # GH 14252
+ df = pd.DataFrame({'foo': [1, 2], 'bar': [0.1, 0.2]})
+ index = Index(['a', 'b'], name='baz')
+ concatted_named_from_keys = pd.concat([df, df], keys=index)
+ expected_named = pd.DataFrame(
+ {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
+ names=['baz', None]))
+ assert_frame_equal(concatted_named_from_keys, expected_named)
+
+ index_no_name = Index(['a', 'b'], name=None)
+ concatted_named_from_names = pd.concat(
+ [df, df], keys=index_no_name, names=['baz'])
+ assert_frame_equal(concatted_named_from_names, expected_named)
+
+ concatted_unnamed = pd.concat([df, df], keys=index_no_name)
+ expected_unnamed = pd.DataFrame(
+ {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]},
+ index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]),
+ names=[None, None]))
+ assert_frame_equal(concatted_unnamed, expected_unnamed)
+
class TestDataFrameCombineFirst(tm.TestCase, TestData):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 8cdde8d92b28f..dc8b1feef51cc 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1369,7 +1369,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
- keys = clean_keys
+ name = getattr(keys, 'name', None)
+ keys = Index(clean_keys)
+ keys.name = name
if len(objs) == 0:
raise ValueError('All objects passed were None')
@@ -1454,7 +1456,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
self.axis = axis
self.join_axes = join_axes
self.keys = keys
- self.names = names
+ self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
| - [x] closes #14252
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Fixes a bug where `pd.concat` didn't propagate the names of keys used to create
a hierarchical index.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14292 | 2016-09-23T19:44:46Z | 2016-10-09T17:38:05Z | null | 2016-10-09T17:38:30Z |
BUG: set_levels set illegal levels. | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 454ffc5e5c685..1e6d8543dc55a 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1576,5 +1576,6 @@ Bug Fixes
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
-- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- ``PeriodIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
+- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 09c755b2c9792..12a7964f807fd 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -116,12 +116,22 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
return result
- def _verify_integrity(self):
+ def _verify_integrity(self, new_labels=None, new_levels=None):
"""Raises ValueError if length of levels and labels don't match or any
- label would exceed level bounds"""
+ label would exceed level bounds
+
+ Parameters
+ ----------
+ new_labels : optional list
+ Labels to check for validity. Defaults to current labels.
+ new_levels : optional list
+ Levels to check for validity. Defaults to current levels.
+ """
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
- labels, levels = self.labels, self.levels
+ labels = new_labels or self.labels
+ levels = new_levels or self.levels
+
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
@@ -162,6 +172,9 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
+ if verify_integrity:
+ self._verify_integrity(new_levels=new_levels)
+
names = self.names
self._levels = new_levels
if any(names):
@@ -170,9 +183,6 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
@@ -268,13 +278,13 @@ def _set_labels(self, labels, level=None, copy=False, validate=True,
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
+ if verify_integrity:
+ self._verify_integrity(new_labels=new_labels)
+
self._labels = new_labels
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 5248f0775d22f..76211c9b3eee4 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -154,7 +154,7 @@ def assert_matching(actual, expected):
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
- act = np.asarray(act)
+ act = np.asarray(act, dtype=np.object_)
exp = np.asarray(exp, dtype=np.object_)
tm.assert_numpy_array_equal(act, exp)
@@ -204,6 +204,25 @@ def assert_matching(actual, expected):
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
+ # illegal level changing should not change levels
+ # GH 13754
+ original_index = self.index.copy()
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_levels(['c'], level=0, inplace=True)
+ assert_matching(self.index.levels, original_index.levels)
+
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_labels([0, 1, 2, 3, 4, 5], level=0, inplace=True)
+ assert_matching(self.index.labels, original_index.labels)
+
+ with assertRaisesRegexp(TypeError, "^Levels"):
+ self.index.set_levels('c', level=0, inplace=True)
+ assert_matching(self.index.levels, original_index.levels)
+
+ with assertRaisesRegexp(TypeError, "^Labels"):
+ self.index.set_labels(1, level=0, inplace=True)
+ assert_matching(self.index.labels, original_index.labels)
+
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
| - [x] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
`MultiIndex.set_levels`, when passed in illegal level values, raises an error.
When `inplace=True`, though, the illegal level values are still accepted. This
commit fixes that behavior by checking that the proposed level values are legal
before setting them.
Duplicate of https://github.com/pydata/pandas/pull/14236, which kept failing the build with Qt errors. This is a retry with a fresh Travis build. Assuming it passes, I'll close https://github.com/pydata/pandas/pull/14236 in favor of this PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14290 | 2016-09-23T13:20:23Z | 2016-09-23T13:37:31Z | null | 2016-09-23T13:37:48Z |
DOC: Expand reference doc for read_json | diff --git a/pandas/io/json.py b/pandas/io/json.py
index e697351484f68..a34824dcfc190 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -122,33 +122,42 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
+ meta_prefix : string, default None
- orient
+ orient : string,
+ Indication of expected JSON input format.
+ The set of allowed orients changes depending on the value
+ of the `typ` parameter.
- * `Series`
+ * when ``typ == 'series'``,
+ - allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- - allowed values are: ``{'split','records','index'}``
- The Series index must be unique for orient ``'index'``.
- * `DataFrame`
+ * when ``typ == 'frame'``,
+ - allowed orients are ``{'split','records','index',
+ 'columns','values'}``
- default is ``'columns'``
- - allowed values are: {'split','records','index','columns','values'}
- - The DataFrame index must be unique for orients 'index' and
- 'columns'.
- - The DataFrame columns must be unique for orients 'index',
- 'columns', and 'records'.
-
- * The format of the JSON string
-
- - split : dict like
- ``{index -> [index], columns -> [columns], data -> [values]}``
- - records : list like
- ``[{column -> value}, ... , {column -> value}]``
- - index : dict like ``{index -> {column -> value}}``
- - columns : dict like ``{column -> {index -> value}}``
- - values : just the values array
+ - The DataFrame index must be unique for orients ``'index'`` and
+ ``'columns'``.
+ - The DataFrame columns must be unique for orients ``'index'``,
+ ``'columns'``, and ``'records'``.
+
+
+ The value of `orient` specifies the expected format of the
+ JSON string. The expected JSON formats are compatible with the
+ strings produced by ``to_json()`` with a corresponding value
+ of `orient`.
+
+ - ``'split'`` : dict like
+ ``{index -> [index], columns -> [columns], data -> [values]}``
+ - ``'records'`` : list like
+ ``[{column -> value}, ... , {column -> value}]``
+ - ``'index'`` : dict like ``{index -> {column -> value}}``
+ - ``'columns'`` : dict like ``{column -> {index -> value}}``
+ - ``'values'`` : just the values array
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
@@ -197,7 +206,41 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
Returns
-------
- result : Series or DataFrame
+ result : Series or DataFrame, depending on the value of `typ`.
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
+ ... index=['row 1', 'row 2'],
+ ... columns=['col 1', 'col 2'])
+
+ >>> df.to_json(orient='split')
+ '{"columns":["col 1","col 2"],
+ "index":["row 1","row 2"],
+ "data":[["a","b"],["c","d"]]}'
+ <BLANKLINE>
+ >>> pd.read_json(_, orient='split')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
+ >>> df.to_json(orient='records')
+ '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
+ <BLANKLINE>
+ >>> pd.read_json(_, orient='records')
+ col 1 col 2
+ 0 a b
+ 1 c d
+
+ >>> df.to_json(orient='index')
+ '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
+ <BLANKLINE>
+ >>> pd.read_json(_, orient='index')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] expanded reference document for `pandas.read_json()`, especially concentrating on the `orient` parameter. Also added some example usage code and explicitly mention `to_json()` as a source of valid JSON strings.
<div class="section" id="pandas-read-json">
<h1>pandas.read_json<a class="headerlink" href="#pandas-read-json" title="Permalink to this headline">¶</a></h1>
<dl class="function">
<dt id="pandas.read_json">
<code class="descclassname">pandas.</code><code class="descname">read_json</code><span class="sig-paren">(</span><em>path_or_buf=None</em>, <em>orient=None</em>, <em>typ='frame'</em>, <em>dtype=True</em>, <em>convert_axes=True</em>, <em>convert_dates=True</em>, <em>keep_default_dates=True</em>, <em>numpy=False</em>, <em>precise_float=False</em>, <em>date_unit=None</em>, <em>encoding=None</em>, <em>lines=False</em><span class="sig-paren">)</span><a class="reference external" href="http://github.com/pydata/pandas/blob/master/pandas/io/json.py#L112-L272"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pandas.read_json" title="Permalink to this definition">¶</a></dt>
<dd><p>Convert a JSON string to pandas object</p>
<table class="docutils field-list" frame="void" rules="none">
<colgroup><col class="field-name">
<col class="field-body">
</colgroup><tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><p class="first"><strong>path_or_buf</strong> : a valid JSON string or file-like, default: None</p>
<blockquote>
<div><p>The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be <code class="docutils literal"><span class="pre">file://localhost/path/to/table.json</span></code></p>
</div></blockquote>
<p><strong>orient</strong> : string, indicating the expected format of the JSON input.</p>
<blockquote>
<div><p>The set of allowed orients changes depending on the value
of the <code class="docutils literal"><span class="pre">typ</span></code> parameter.</p>
<ul class="simple">
<li>when <code class="docutils literal"><span class="pre">typ</span> <span class="pre">==</span> <span class="pre">'series'</span></code>,<ul>
<li>allowed orients are <code class="docutils literal"><span class="pre">{'split','records','index'}</span></code></li>
<li>default is <code class="docutils literal"><span class="pre">'index'</span></code></li>
<li>The Series index must be unique for orient <code class="docutils literal"><span class="pre">'index'</span></code>.</li>
</ul>
</li>
<li>when <code class="docutils literal"><span class="pre">typ</span> <span class="pre">==</span> <span class="pre">'frame'</span></code>,<ul>
<li>allowed orients are <code class="docutils literal"><span class="pre">{'split','records','index',</span>
<span class="pre">'columns','values'}</span></code></li>
<li>default is <code class="docutils literal"><span class="pre">'columns'</span></code></li>
<li>The DataFrame index must be unique for orients ‘index’ and
‘columns’.</li>
<li>The DataFrame columns must be unique for orients ‘index’,
‘columns’, and ‘records’.</li>
</ul>
</li>
</ul>
<p>The value of <code class="docutils literal"><span class="pre">orient</span></code> specifies the expected format of the
JSON string. The expected JSON formats are compatible with the
strings produced by <code class="docutils literal"><span class="pre">to_json()</span></code> with a corresponding value
of <code class="docutils literal"><span class="pre">orient</span></code>.</p>
<blockquote>
<div><ul class="simple">
<li><code class="docutils literal"><span class="pre">'split'</span></code> : dict like
<code class="docutils literal"><span class="pre">{index</span> <span class="pre">-></span> <span class="pre">[index],</span> <span class="pre">columns</span> <span class="pre">-></span> <span class="pre">[columns],</span> <span class="pre">data</span> <span class="pre">-></span> <span class="pre">[values]}</span></code></li>
<li><code class="docutils literal"><span class="pre">'records'</span></code> : list like
<code class="docutils literal"><span class="pre">[{column</span> <span class="pre">-></span> <span class="pre">value},</span> <span class="pre">...</span> <span class="pre">,</span> <span class="pre">{column</span> <span class="pre">-></span> <span class="pre">value}]</span></code></li>
<li><code class="docutils literal"><span class="pre">'index'</span></code> : dict like <code class="docutils literal"><span class="pre">{index</span> <span class="pre">-></span> <span class="pre">{column</span> <span class="pre">-></span> <span class="pre">value}}</span></code></li>
<li><code class="docutils literal"><span class="pre">'columns'</span></code> : dict like <code class="docutils literal"><span class="pre">{column</span> <span class="pre">-></span> <span class="pre">{index</span> <span class="pre">-></span> <span class="pre">value}}</span></code></li>
<li><code class="docutils literal"><span class="pre">'values'</span></code> : just the values array</li>
</ul>
</div></blockquote>
</div></blockquote>
<p><strong>typ</strong> : type of object to recover (series or frame), default ‘frame’</p>
<p><strong>dtype</strong> : boolean or dict, default True</p>
<blockquote>
<div><p>If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don’t infer dtypes at all, applies only to the data.</p>
</div></blockquote>
<p><strong>convert_axes</strong> : boolean, default True</p>
<blockquote>
<div><p>Try to convert the axes to the proper dtypes.</p>
</div></blockquote>
<p><strong>convert_dates</strong> : boolean, default True</p>
<blockquote>
<div><p>List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if</p>
<ul class="simple">
<li>it ends with <code class="docutils literal"><span class="pre">'_at'</span></code>,</li>
<li>it ends with <code class="docutils literal"><span class="pre">'_time'</span></code>,</li>
<li>it begins with <code class="docutils literal"><span class="pre">'timestamp'</span></code>,</li>
<li>it is <code class="docutils literal"><span class="pre">'modified'</span></code>, or</li>
<li>it is <code class="docutils literal"><span class="pre">'date'</span></code></li>
</ul>
</div></blockquote>
<p><strong>keep_default_dates</strong> : boolean, default True</p>
<blockquote>
<div><p>If parsing dates, then parse the default datelike columns</p>
</div></blockquote>
<p><strong>numpy</strong> : boolean, default False</p>
<blockquote>
<div><p>Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.</p>
</div></blockquote>
<p><strong>precise_float</strong> : boolean, default False</p>
<blockquote>
<div><p>Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality</p>
</div></blockquote>
<p><strong>date_unit</strong> : string, default None</p>
<blockquote>
<div><p>The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of ‘s’, ‘ms’, ‘us’ or ‘ns’ to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.</p>
</div></blockquote>
<p><strong>lines</strong> : boolean, default False</p>
<blockquote>
<div><p>Read the file as a json object per line.</p>
<div class="versionadded">
<p><span class="versionmodified">New in version 0.19.0.</span></p>
</div>
</div></blockquote>
<p><strong>encoding</strong> : str, default is ‘utf-8’</p>
<blockquote>
<div><p>The encoding to use to decode py3 bytes.</p>
<div class="versionadded">
<p><span class="versionmodified">New in version 0.19.0.</span></p>
</div>
</div></blockquote>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last"><strong>result</strong> : Series or DataFrame, depending on the value of <code class="docutils literal"><span class="pre">typ</span></code>.</p>
</td>
</tr>
</tbody>
</table>
<p class="rubric">Examples</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">df</span> <span class="o">=</span> <span class="n">pd</span><span class="o">.</span><span class="n">DataFrame</span><span class="p">([[</span><span class="s1">'a'</span><span class="p">,</span> <span class="s1">'b'</span><span class="p">],</span> <span class="p">[</span><span class="s1">'c'</span><span class="p">,</span> <span class="s1">'d'</span><span class="p">]],</span>
<span class="go"> index=['row 1', 'row 2'],</span>
<span class="go"> columns=['col 1', 'col 2'])</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="n">df</span>
<span class="go"> col 1 col 2</span>
<span class="go">row 1 a b</span>
<span class="go">row 2 c d</span>
<span class="gp">>>> </span><span class="k">for</span> <span class="n">orient</span> <span class="ow">in</span> <span class="p">[</span><span class="s1">'split'</span><span class="p">,</span> <span class="s1">'records'</span><span class="p">,</span> <span class="s1">'index'</span><span class="p">]:</span>
<span class="go"> str = df.to_json(orient=orient)</span>
<span class="go"> print "'{}': '{}'".format(orient, str)</span>
<span class="go"> pd.read_json(str, orient=orient)</span>
<span class="go">'split':</span>
<span class="go">'{"columns":["col 1","col 2"],"index":["row 1","row 2"],"data":[["a","b"],</span>
<span class="go">["c","d"]]}'</span>
<span class="go">'records':</span>
<span class="go">'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'</span>
<span class="go">'index':</span>
<span class="go">'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'</span>
</pre></div>
</div>
</dd></dl>
</div>
| https://api.github.com/repos/pandas-dev/pandas/pulls/14284 | 2016-09-23T01:41:01Z | 2016-10-17T17:08:39Z | null | 2016-10-17T17:08:39Z |
TST: remove usages of np.full_like for numpy<1.8.0 compat | diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 51d8c95f9d783..b04d7f128e133 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -19,6 +19,14 @@
from .common import Base
+def full_like(array, value):
+ """Compatibility for numpy<1.8.0
+ """
+ ret = np.empty(array.shape, dtype=np.array(value).dtype)
+ ret.fill(value)
+ return ret
+
+
class Numeric(Base):
def test_numeric_compat(self):
@@ -80,18 +88,18 @@ def test_numeric_compat(self):
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
- result = divmod(idx, np.full_like(idx.values, 2))
+ result = divmod(idx, full_like(idx.values, 2))
with np.errstate(all='ignore'):
- div, mod = divmod(idx.values, np.full_like(idx.values, 2))
+ div, mod = divmod(idx.values, full_like(idx.values, 2))
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
- result = divmod(idx, Series(np.full_like(idx.values, 2)))
+ result = divmod(idx, Series(full_like(idx.values, 2)))
with np.errstate(all='ignore'):
div, mod = divmod(
idx.values,
- np.full_like(idx.values, 2),
+ full_like(idx.values, 2),
)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
| - [x] closes ##14276
- [x] passes `git diff upstream/master | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/14277 | 2016-09-22T01:20:03Z | 2016-09-22T10:02:13Z | null | 2016-09-22T10:02:14Z |
DOC: Typo fix in ordered_merge warning | diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 6521acbd0b733..8cdde8d92b28f 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -146,7 +146,7 @@ def ordered_merge(left, right, on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
- warnings.warn("ordered_merge is deprecated and replace by merged_ordered",
+ warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
| https://api.github.com/repos/pandas-dev/pandas/pulls/14271 | 2016-09-21T13:52:06Z | 2016-09-22T16:39:03Z | 2016-09-22T16:39:03Z | 2016-09-24T11:41:21Z | |
PERF: use uniqueness_check from monotonic check when possible | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index ffc6757b674ea..29071c99c6df6 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1404,6 +1404,7 @@ Performance Improvements
- Improved performance of datetime string parsing in ``DatetimeIndex`` (:issue:`13692`)
- Improved performance of hashing ``Period`` (:issue:`12817`)
- Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`)
+- Improved performance of by lazily creating indexing hashtables on larger Indexes (:issue:`14266`)
.. _whatsnew_0190.bug_fixes:
@@ -1422,7 +1423,6 @@ Bug Fixes
- Bug in selection from a ``HDFStore`` with a fixed format and ``start`` and/or ``stop`` specified will now return the selected range (:issue:`8287`)
- Bug in ``Categorical.from_codes()`` where an unhelpful error was raised when an invalid ``ordered`` parameter was passed in (:issue:`14058`)
- Bug in ``Series`` construction from a tuple of integers on windows not returning default dtype (int64) (:issue:`13646`)
-
- Bug in ``.groupby(..).resample(..)`` when the same object is called multiple times (:issue:`13174`)
- Bug in ``.to_records()`` when index name is a unicode string (:issue:`13172`)
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 2935560a05b6b..a6eb74727a999 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -82,7 +82,7 @@ cdef class IndexEngine:
cdef:
bint unique, monotonic_inc, monotonic_dec
- bint initialized, monotonic_check
+ bint initialized, monotonic_check, unique_check
def __init__(self, vgetter, n):
self.vgetter = vgetter
@@ -91,6 +91,7 @@ cdef class IndexEngine:
self.initialized = 0
self.monotonic_check = 0
+ self.unique_check = 0
self.unique = 0
self.monotonic_inc = 0
@@ -177,8 +178,8 @@ cdef class IndexEngine:
return left
else:
return slice(left, right)
- else:
- return self._maybe_get_bool_indexer(val)
+
+ return self._maybe_get_bool_indexer(val)
cdef _maybe_get_bool_indexer(self, object val):
cdef:
@@ -215,6 +216,7 @@ cdef class IndexEngine:
if not self.initialized:
self.initialize()
+ self.unique_check = 1
return self.unique == 1
property is_monotonic_increasing:
@@ -234,16 +236,24 @@ cdef class IndexEngine:
return self.monotonic_dec == 1
cdef inline _do_monotonic_check(self):
+ cdef object is_unique
try:
values = self._get_index_values()
- self.monotonic_inc, self.monotonic_dec = \
+ self.monotonic_inc, self.monotonic_dec, is_unique = \
self._call_monotonic(values)
except TypeError:
self.monotonic_inc = 0
self.monotonic_dec = 0
+ is_unique = 0
self.monotonic_check = 1
+ # we can only be sure of uniqueness if is_unique=1
+ if is_unique:
+ self.initialized = 1
+ self.unique = 1
+ self.unique_check = 1
+
cdef _get_index_values(self):
return self.vgetter()
@@ -257,6 +267,10 @@ cdef class IndexEngine:
hash(val)
cdef inline _ensure_mapping_populated(self):
+ # need to reset if we have previously
+ # set the initialized from monotonic checks
+ if self.unique_check:
+ self.initialized = 0
if not self.initialized:
self.initialize()
@@ -274,6 +288,12 @@ cdef class IndexEngine:
def clear_mapping(self):
self.mapping = None
self.initialized = 0
+ self.monotonic_check = 0
+ self.unique_check = 0
+
+ self.unique = 0
+ self.monotonic_inc = 0
+ self.monotonic_dec = 0
def get_indexer(self, values):
self._ensure_mapping_populated()
@@ -537,7 +557,6 @@ cdef class DatetimeEngine(Int64Engine):
raise TypeError
# Welcome to the spaghetti factory
-
if self.over_size_threshold and self.is_monotonic_increasing:
if not self.is_unique:
val = _to_i8(val)
diff --git a/pandas/src/algos_common_helper.pxi b/pandas/src/algos_common_helper.pxi
index b89a80a73e2dd..be587fbc9a019 100644
--- a/pandas/src/algos_common_helper.pxi
+++ b/pandas/src/algos_common_helper.pxi
@@ -340,27 +340,28 @@ def is_monotonic_float64(ndarray[float64_t] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
float64_t prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
with nogil:
prev = arr[0]
@@ -375,7 +376,7 @@ def is_monotonic_float64(ndarray[float64_t] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -386,7 +387,8 @@ def is_monotonic_float64(ndarray[float64_t] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
@@ -753,27 +755,28 @@ def is_monotonic_float32(ndarray[float32_t] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
float32_t prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
with nogil:
prev = arr[0]
@@ -788,7 +791,7 @@ def is_monotonic_float32(ndarray[float32_t] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -799,7 +802,8 @@ def is_monotonic_float32(ndarray[float32_t] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
@@ -1166,27 +1170,28 @@ def is_monotonic_object(ndarray[object] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
object prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
prev = arr[0]
@@ -1201,7 +1206,7 @@ def is_monotonic_object(ndarray[object] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -1212,7 +1217,8 @@ def is_monotonic_object(ndarray[object] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
@@ -1579,27 +1585,28 @@ def is_monotonic_int32(ndarray[int32_t] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
int32_t prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
with nogil:
prev = arr[0]
@@ -1614,7 +1621,7 @@ def is_monotonic_int32(ndarray[int32_t] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -1625,7 +1632,8 @@ def is_monotonic_int32(ndarray[int32_t] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
@@ -1992,27 +2000,28 @@ def is_monotonic_int64(ndarray[int64_t] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
int64_t prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
with nogil:
prev = arr[0]
@@ -2027,7 +2036,7 @@ def is_monotonic_int64(ndarray[int64_t] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -2038,7 +2047,8 @@ def is_monotonic_int64(ndarray[int64_t] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
@@ -2405,27 +2415,28 @@ def is_monotonic_bool(ndarray[uint8_t] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
uint8_t prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
with nogil:
prev = arr[0]
@@ -2440,7 +2451,7 @@ def is_monotonic_bool(ndarray[uint8_t] arr, bint timelike):
elif cur > prev:
is_monotonic_dec = 0
elif cur == prev:
- pass # is_unique = 0
+ is_unique = 0
else:
# cur or prev is NaN
is_monotonic_inc = 0
@@ -2451,7 +2462,8 @@ def is_monotonic_bool(ndarray[uint8_t] arr, bint timelike):
is_monotonic_dec = 0
break
prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
diff --git a/pandas/src/algos_common_helper.pxi.in b/pandas/src/algos_common_helper.pxi.in
index 1451ffb054e5d..cec5712c0b7f4 100644
--- a/pandas/src/algos_common_helper.pxi.in
+++ b/pandas/src/algos_common_helper.pxi.in
@@ -362,27 +362,28 @@ def is_monotonic_{{name}}(ndarray[{{c_type}}] arr, bint timelike):
"""
Returns
-------
- is_monotonic_inc, is_monotonic_dec
+ is_monotonic_inc, is_monotonic_dec, is_unique
"""
cdef:
Py_ssize_t i, n
{{c_type}} prev, cur
bint is_monotonic_inc = 1
bint is_monotonic_dec = 1
+ bint is_unique = 1
n = len(arr)
if n == 1:
if arr[0] != arr[0] or (timelike and arr[0] == iNaT):
# single value is NaN
- return False, False
+ return False, False, True
else:
- return True, True
+ return True, True, True
elif n < 2:
- return True, True
+ return True, True, True
if timelike and arr[0] == iNaT:
- return False, False
+ return False, False, True
{{nogil_str}}
{{tab}}prev = arr[0]
@@ -397,7 +398,7 @@ def is_monotonic_{{name}}(ndarray[{{c_type}}] arr, bint timelike):
{{tab}} elif cur > prev:
{{tab}} is_monotonic_dec = 0
{{tab}} elif cur == prev:
- {{tab}} pass # is_unique = 0
+ {{tab}} is_unique = 0
{{tab}} else:
{{tab}} # cur or prev is NaN
{{tab}} is_monotonic_inc = 0
@@ -408,7 +409,8 @@ def is_monotonic_{{name}}(ndarray[{{c_type}}] arr, bint timelike):
{{tab}} is_monotonic_dec = 0
{{tab}} break
{{tab}} prev = cur
- return is_monotonic_inc, is_monotonic_dec
+ return is_monotonic_inc, is_monotonic_dec, \
+ is_unique and (is_monotonic_inc or is_monotonic_dec)
@cython.wraparound(False)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index e26a0548fdc78..b2326cb7b3255 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1387,6 +1387,7 @@ def get_loc(self, key, method=None, tolerance=None):
-------
loc : int
"""
+
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
| closes #14266
| https://api.github.com/repos/pandas-dev/pandas/pulls/14270 | 2016-09-21T13:06:43Z | 2016-09-22T10:03:25Z | null | 2016-09-22T10:03:25Z |
Update Github issue template | diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 8a9f717e1c428..6f91eba1ad239 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,6 +1,15 @@
-#### Code Sample, a copy-pastable example if possible
+#### A small, complete example of the issue
+
+```python
+# Your code here
+
+```
#### Expected Output
-#### output of ``pd.show_versions()``
+#### Output of ``pd.show_versions()``
+
+<details>
+# Paste the output here
+</details>
| Mostly just using the `details` tag to reduce some clutter in all the issues, e.g.
---
#### A small, complete example of the issue
``` python
# Your code here
def fib(n):
return fib(n-1) + fib(n-2)
```
#### Expected Output
Nothing
#### Output of `pd.show_versions()`
<details>
## INSTALLED VERSIONS
commit: None
python: 3.5.2.final.0
python-bits: 64
OS: Darwin
OS-release: 15.6.0
machine: x86_64
processor: i386
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
LOCALE: en_US.UTF-8
pandas: 0.19.0rc1+21.ge596cbf
nose: 1.3.7
pip: 8.1.2
setuptools: 26.1.1
Cython: 0.25a0
numpy: 1.11.1
scipy: 0.18.0
statsmodels: 0.8.0rc1
xarray: 0.7.2
IPython: 5.1.0
sphinx: 1.4.6
patsy: 0.4.1
dateutil: 2.5.3
pytz: 2016.6.1
blosc: 1.4.1
bottleneck: 1.0.0
tables: 3.2.2
numexpr: 2.6.1
matplotlib: 1.5.2
openpyxl: 2.3.5
xlrd: 0.9.4
xlwt: 1.0.0
xlsxwriter: None
lxml: 3.4.4
bs4: 4.4.1
html5lib: 0.9999999
httplib2: 0.9.2
apiclient: 1.5.1
sqlalchemy: 1.0.12
pymysql: 0.7.6.None
psycopg2: 2.6.2 (dt dec pq3 ext lo64)
jinja2: 2.8
boto: 2.39.0
pandas_datareader: None
</details>
| https://api.github.com/repos/pandas-dev/pandas/pulls/14268 | 2016-09-21T12:22:46Z | 2016-09-23T21:38:17Z | 2016-09-23T21:38:17Z | 2017-04-05T02:08:33Z |
BUG: fix for GH14252 | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index ffc6757b674ea..6dd2242372a04 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1576,3 +1576,4 @@ Bug Fixes
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
+- Bug in ``pandas.concat`` does not preserve Index name (:issue:`14252`)
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 09c755b2c9792..a6548710590fe 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -152,9 +152,10 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
raise ValueError('Length of levels must match length of level.')
if level is None:
- new_levels = FrozenList(
- _ensure_index(lev, copy=copy)._shallow_copy()
- for lev in levels)
+ new_levels = []
+ for lev in levels:
+ new_levels.append(
+ _ensure_index(lev, copy=copy)._shallow_copy())
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 6521acbd0b733..da0b8cdf77ef3 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1369,7 +1369,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
- keys = clean_keys
+ keys = Index(clean_keys, name=keys.name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
@@ -1685,7 +1685,6 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
# also copies
names = names + _get_consensus_names(indexes)
-
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
@@ -1694,8 +1693,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
kpieces = len(indexes)
# also copies
- new_names = list(names)
- new_levels = list(levels)
+ new_names = names
+ new_levels = levels
# construct labels
new_labels = []
@@ -1723,8 +1722,12 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
- return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
- verify_integrity=False)
+ if any(new_names):
+ return MultiIndex(levels=new_levels, labels=new_labels,
+ names=new_names, verify_integrity=False)
+ else:
+ return MultiIndex(levels=new_levels, labels=new_labels,
+ verify_integrity=False)
def _should_fill(lname, rname):
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 6e36100ddd0b4..1675ffc54f8e9 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas.compat import lrange, lzip
from pandas.tools.merge import merge, concat, MergeError
+from pandas.core.base import FrozenList
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
slow)
@@ -834,6 +835,14 @@ def test_merge_right_vs_left(self):
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
+ def test_concat_keys(self):
+ df = pd.DataFrame({'foo': [1, 2, 3, 4],
+ 'bar': [0.1, 0.2, 0.3, 0.4]})
+ index = pd.Index(['a', 'b'], name='baz')
+
+ concatted = pd.concat([df, df], keys=index)
+ self.assertEqual(FrozenList(['baz', None]), concatted.index.names)
+
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
| - [x] closes #14252
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
This should fix #14252 . It's possible that not every one of these changes is needed, or there is a better way to fix it, but I think this should do it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14261 | 2016-09-20T17:54:33Z | 2016-09-20T18:39:53Z | null | 2016-09-20T18:39:53Z |
BUG: float truncation in eval with py 2 | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index ffc6757b674ea..6bf1080f7c906 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1567,7 +1567,7 @@ Bug Fixes
- Bug in ``DataFrame.to_csv()`` with ``MultiIndex`` columns in which a stray empty line was added (:issue:`6618`)
- Bug in ``DatetimeIndex``, ``TimedeltaIndex`` and ``PeriodIndex.equals()`` may return ``True`` when input isn't ``Index`` but contains the same values (:issue:`13107`)
- Bug in assignment against datetime with timezone may not work if it contains datetime near DST boundary (:issue:`14146`)
-
+- Bug in ``pd.eval()`` and ``HDFStore`` query truncating long float literals with python 2 (:issue:`14241`)
- Bug in ``Index`` raises ``KeyError`` displaying incorrect column when column is not in the df and columns contains duplicate values (:issue:`13822`)
- Bug in ``Period`` and ``PeriodIndex`` creating wrong dates when frequency has combined offset aliases (:issue:`13874`)
- Bug in ``.to_string()`` when called with an integer ``line_width`` and ``index=False`` raises an UnboundLocalError exception because ``idx`` referenced before assignment.
diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py
index 9446e84d891c4..6ba2a21940d55 100644
--- a/pandas/computation/ops.py
+++ b/pandas/computation/ops.py
@@ -166,6 +166,11 @@ def _resolve_name(self):
def name(self):
return self.value
+ def __unicode__(self):
+ # in python 2 str() of float
+ # can truncate shorter than repr()
+ return repr(self.name)
+
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index a4dd03a0fa7ee..9dc18284ec22c 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -611,10 +611,14 @@ def __init__(self, value, converted, kind):
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
- if self.kind == u('string'):
+ if self.kind == u'string':
if encoding is not None:
return self.converted
return '"%s"' % self.converted
+ elif self.kind == u'float':
+ # python 2 str(float) is not always
+ # round-trippable so use repr()
+ return repr(self.converted)
return self.converted
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 02ed11c65706c..72fbc3906cafb 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -678,6 +678,31 @@ def test_line_continuation(self):
result = pd.eval(exp, engine=self.engine, parser=self.parser)
self.assertEqual(result, 12)
+ def test_float_truncation(self):
+ # GH 14241
+ exp = '1000000000.006'
+ result = pd.eval(exp, engine=self.engine, parser=self.parser)
+ expected = np.float64(exp)
+ self.assertEqual(result, expected)
+
+ df = pd.DataFrame({'A': [1000000000.0009,
+ 1000000000.0011,
+ 1000000000.0015]})
+ cutoff = 1000000000.0006
+ result = df.query("A < %.4f" % cutoff)
+ self.assertTrue(result.empty)
+
+ cutoff = 1000000000.0010
+ result = df.query("A > %.4f" % cutoff)
+ expected = df.loc[[1, 2], :]
+ tm.assert_frame_equal(expected, result)
+
+ exact = 1000000000.0011
+ result = df.query('A == %.4f' % exact)
+ expected = df.loc[[1], :]
+ tm.assert_frame_equal(expected, result)
+
+
class TestEvalNumexprPython(TestEvalNumexprPandas):
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 44ff9f8a5a1dd..213bc53e3aab4 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -5002,6 +5002,29 @@ def test_read_from_py_localpath(self):
tm.assert_frame_equal(expected, actual)
+ def test_query_long_float_literal(self):
+ # GH 14241
+ df = pd.DataFrame({'A': [1000000000.0009,
+ 1000000000.0011,
+ 1000000000.0015]})
+
+ with ensure_clean_store(self.path) as store:
+ store.append('test', df, format='table', data_columns=True)
+
+ cutoff = 1000000000.0006
+ result = store.select('test', "A < %.4f" % cutoff)
+ self.assertTrue(result.empty)
+
+ cutoff = 1000000000.0010
+ result = store.select('test', "A > %.4f" % cutoff)
+ expected = df.loc[[1, 2], :]
+ tm.assert_frame_equal(expected, result)
+
+ exact = 1000000000.0011
+ result = store.select('test', 'A == %.4f' % exact)
+ expected = df.loc[[1], :]
+ tm.assert_frame_equal(expected, result)
+
class TestHDFComplexValues(Base):
# GH10447
| - [x] closes #14241
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Python 2 only - apparently `str()` rounds shorter than `repr()`
```
In [1]: f = 1000000000.006
In [2]: str(f)
Out[2]: '1000000000.01'
In [3]: repr(f)
Out[3]: '1000000000.006'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14255 | 2016-09-20T00:43:22Z | 2016-09-22T10:20:26Z | null | 2016-09-24T14:38:16Z |
Pivot table drops column/index names=nan when dropna=false | diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 0f56b0b076897..4995e4569ed28 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -9,6 +9,7 @@
from pandas.tools.util import cartesian_product
from pandas.compat import range, lrange, zip
from pandas import compat
+from pandas import isnull
import pandas.core.common as com
import numpy as np
@@ -81,9 +82,21 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
+ pd_null = "_null_pd"
+
index = _convert_by(index)
columns = _convert_by(columns)
+ keys = index + columns
+
+ if not dropna:
+ key_data = np.array(data[keys], dtype='object')
+ _data_null_idx = isnull(key_data)
+ _data_null_val = key_data[_data_null_idx]
+ key_data[_data_null_idx] = pd_null
+ for idx, k in enumerate(keys):
+ data[k] = key_data[:, idx]
+
if isinstance(aggfunc, list):
pieces = []
keys = []
@@ -96,8 +109,6 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
keys.append(func.__name__)
return concat(pieces, keys=keys, axis=1)
- keys = index + columns
-
values_passed = values is not None
if values_passed:
if is_list_like(values):
@@ -180,6 +191,27 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
if len(index) == 0 and len(columns) > 0:
table = table.T
+ if not dropna:
+ if _data_null_val.size > 0:
+ def _convert_null_vals(indexes):
+ if isinstance(indexes, MultiIndex):
+ _new_level = []
+ for _tmp_index in indexes.levels:
+ tmp = np.array(_tmp_index)
+ tmp[tmp == pd_null] = _data_null_val[0]
+ _new_level.append(Index(tmp, name=_tmp_index.name))
+ indexes = MultiIndex(levels=_new_level,
+ labels=indexes.labels,
+ names=indexes.names)
+ else:
+ tmp = np.array(indexes)
+ tmp[tmp == pd_null] = _data_null_val[0]
+ indexes = Index(tmp, name=indexes.name)
+ return indexes
+
+ table.columns = _convert_null_vals(table.columns)
+ table.index = _convert_null_vals(table.index)
+
return table
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index e63cfcc8c0590..06896835954e1 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -87,6 +87,26 @@ def test_pivot_table_dropna(self):
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
+ df = DataFrame([[1, 'a', 'A'], [1, 'b', 'B'], [1, 'c', None]],
+ columns=['x', 'y', 'z'])
+ actual = df.pivot_table(values='x', index='y', columns='z',
+ aggfunc='sum', fill_value=0, margins=True,
+ dropna=True)
+ expected = pd.DataFrame([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0],
+ [1.0, 1.0, 2.0]])
+ expected.index = Index(['a', 'b', 'All'], name='y')
+ expected.columns = Index(['A', 'B', 'All'], name='z')
+ tm.assert_frame_equal(actual, expected)
+
+ actual = df.pivot_table(values='x', index='y', columns='z',
+ aggfunc='sum', fill_value=0, margins=True,
+ dropna=False)
+ expected = pd.DataFrame([[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0],
+ [0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 3.0]])
+ expected.index = Index(['a', 'b', 'c', 'All'], name='y')
+ expected.columns = Index(['A', 'B', None, 'All'], name='z')
+ tm.assert_frame_equal(actual, expected)
+
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
@@ -1072,17 +1092,18 @@ def test_margin_dropna(self):
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
- expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
- expected.index = Index([1.0, 2.0, 'All'], name='a')
- expected.columns = Index([3, 4, 'All'], name='b')
+ expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [0, 1, 1], [2, 4, 6]])
+ expected.index = Index([1.0, 2.0, np.nan, 'All'], name='a')
+ expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
- expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
- expected.index = Index([1.0, 2.0, 'All'], name='a')
- expected.columns = Index([3.0, 4.0, 'All'], name='b')
+ expected = pd.DataFrame([[1, 0, 0, 1], [0, 1, 0, 1], [0, 3, 1, 4],
+ [1, 4, 1, 6]])
+ expected.index = Index([1.0, 2.0, np.nan, 'All'], name='a')
+ expected.columns = Index([3.0, 4.0, np.nan, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
a = np.array(['foo', 'foo', 'foo', 'bar',
@@ -1094,21 +1115,24 @@ def test_margin_dropna(self):
actual = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], margins=True, dropna=False)
- m = MultiIndex.from_arrays([['one', 'one', 'two', 'two', 'All'],
- ['dull', 'shiny', 'dull', 'shiny', '']],
- names=['b', 'c'])
- expected = DataFrame([[1, 0, 1, 0, 2], [2, 0, 1, 1, 5],
- [3, 0, 2, 1, 7]], columns=m)
+
+ m = MultiIndex(levels=[Index(['All', np.nan, 'one', 'two']),
+ Index(['', 'dull', 'shiny'])],
+ labels=[[1, 1, 2, 2, 3, 3, 0],
+ [1, 2, 1, 2, 1, 2, 0]], names=['b', 'c'])
+ expected = DataFrame([[0, 0, 1, 0, 1, 0, 2], [0, 1, 2, 0, 1, 1, 5],
+ [0, 1, 3, 0, 2, 1, 7]], columns=m)
expected.index = Index(['bar', 'foo', 'All'], name='a')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=False)
- m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
- ['one', 'two', 'one', 'two', '']],
- names=['a', 'b'])
- expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
- [5, 2, 7]], index=m)
+ m = MultiIndex(levels=[['All', 'bar', 'foo'],
+ ['', np.nan, 'one', 'two']],
+ labels=[[1, 1, 1, 2, 2, 2, 0], [1, 2, 3, 1, 2, 3, 0]],
+ names=['a', 'b'])
+ expected = DataFrame([[0, 0, 0], [1, 0, 1], [1, 0, 1], [0, 1, 1],
+ [2, 0, 2], [1, 1, 2], [5, 2, 7]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
| - [x] closes #14072
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14246 | 2016-09-18T17:53:08Z | 2017-04-26T05:53:04Z | null | 2017-04-26T05:57:13Z |
TST: Fix generator tests to run | diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index c50944f0a4d3b..02ed11c65706c 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -758,21 +758,21 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
# typecasting rules consistency with python
# issue #12388
-class TestTypeCasting(tm.TestCase):
+class TestTypeCasting(object):
def check_binop_typecasting(self, engine, parser, op, dt):
tm.skip_if_no_ne(engine)
df = mkdf(5, 3, data_gen_f=f, dtype=dt)
s = 'df {} 3'.format(op)
res = pd.eval(s, engine=engine, parser=parser)
- self.assertTrue(df.values.dtype == dt)
- self.assertTrue(res.values.dtype == dt)
+ assert df.values.dtype == dt
+ assert res.values.dtype == dt
assert_frame_equal(res, eval(s))
s = '3 {} df'.format(op)
res = pd.eval(s, engine=engine, parser=parser)
- self.assertTrue(df.values.dtype == dt)
- self.assertTrue(res.values.dtype == dt)
+ assert df.values.dtype == dt
+ assert res.values.dtype == dt
assert_frame_equal(res, eval(s))
def test_binop_typecasting(self):
| Closes #14244
This class inheriting from TestCase caused
the tests the yield fixture tests to not actually
run. The new output is
```
nosetests pandas/computation/tests/test_eval.py:TestTypeCasting
........................................
----------------------------------------------------------------------
Ran 40 tests in 0.264s
OK
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14245 | 2016-09-18T14:38:51Z | 2016-09-19T20:11:32Z | 2016-09-19T20:11:32Z | 2017-04-05T02:08:32Z |
Revert "TST/TEMP: fix pyqt to 4.x for plotting tests" | diff --git a/ci/requirements-2.7-64.run b/ci/requirements-2.7-64.run
index ce085a6ebf91c..42b5a789ae31a 100644
--- a/ci/requirements-2.7-64.run
+++ b/ci/requirements-2.7-64.run
@@ -16,4 +16,3 @@ bottleneck
html5lib
beautiful-soup
jinja2=2.8
-pyqt=4.11.4
diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run
index eec7886fed38d..560d6571b8771 100644
--- a/ci/requirements-2.7.run
+++ b/ci/requirements-2.7.run
@@ -21,4 +21,3 @@ beautiful-soup=4.2.1
statsmodels
jinja2=2.8
xarray
-pyqt=4.11.4
diff --git a/ci/requirements-3.5-64.run b/ci/requirements-3.5-64.run
index 1dc88ed2c94af..96de21e3daa5e 100644
--- a/ci/requirements-3.5-64.run
+++ b/ci/requirements-3.5-64.run
@@ -10,4 +10,3 @@ numexpr
pytables
matplotlib
blosc
-pyqt=4.11.4
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
index d9ce708585a33..333641caf26c4 100644
--- a/ci/requirements-3.5.run
+++ b/ci/requirements-3.5.run
@@ -18,7 +18,6 @@ pymysql
psycopg2
xarray
boto
-pyqt=4.11.4
# incompat with conda ATM
# beautiful-soup
| Reverts pydata/pandas#14240
For when https://github.com/ContinuumIO/anaconda-issues/issues/1068 is solved
| https://api.github.com/repos/pandas-dev/pandas/pulls/14243 | 2016-09-18T09:39:40Z | 2016-11-25T16:19:02Z | null | 2023-05-11T01:14:09Z |
TST/TEMP: fix pyqt to 4.x for plotting tests | diff --git a/ci/requirements-2.7-64.run b/ci/requirements-2.7-64.run
index 42b5a789ae31a..ce085a6ebf91c 100644
--- a/ci/requirements-2.7-64.run
+++ b/ci/requirements-2.7-64.run
@@ -16,3 +16,4 @@ bottleneck
html5lib
beautiful-soup
jinja2=2.8
+pyqt=4.11.4
diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run
index 560d6571b8771..eec7886fed38d 100644
--- a/ci/requirements-2.7.run
+++ b/ci/requirements-2.7.run
@@ -21,3 +21,4 @@ beautiful-soup=4.2.1
statsmodels
jinja2=2.8
xarray
+pyqt=4.11.4
diff --git a/ci/requirements-3.5-64.run b/ci/requirements-3.5-64.run
index 96de21e3daa5e..1dc88ed2c94af 100644
--- a/ci/requirements-3.5-64.run
+++ b/ci/requirements-3.5-64.run
@@ -10,3 +10,4 @@ numexpr
pytables
matplotlib
blosc
+pyqt=4.11.4
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
index 333641caf26c4..d9ce708585a33 100644
--- a/ci/requirements-3.5.run
+++ b/ci/requirements-3.5.run
@@ -18,6 +18,7 @@ pymysql
psycopg2
xarray
boto
+pyqt=4.11.4
# incompat with conda ATM
# beautiful-soup
| To fix breaking tests (latest travis build on master broken, and also all recent PR builds), see https://github.com/matplotlib/matplotlib/issues/7124
| https://api.github.com/repos/pandas-dev/pandas/pulls/14240 | 2016-09-17T10:05:09Z | 2016-09-18T09:39:00Z | 2016-09-18T09:39:00Z | 2016-09-18T09:39:00Z |
BUG: Check for overflow in TimedeltaIndex addition. | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 454ffc5e5c685..7e62384f4b789 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1425,6 +1425,8 @@ Bug Fixes
- Bug in selection from a ``HDFStore`` with a fixed format and ``start`` and/or ``stop`` specified will now return the selected range (:issue:`8287`)
- Bug in ``Categorical.from_codes()`` where an unhelpful error was raised when an invalid ``ordered`` parameter was passed in (:issue:`14058`)
- Bug in ``Series`` construction from a tuple of integers on windows not returning default dtype (int64) (:issue:`13646`)
+- Bug in ``TimedeltaIndex`` addition with a Datetime-like object where addition overflow was not being caught (:issue:`14068`)
+
- Bug in ``.groupby(..).resample(..)`` when the same object is called multiple times (:issue:`13174`)
- Bug in ``.to_records()`` when index name is a unicode string (:issue:`13172`)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index a76e348b7dee2..564586eec5a8e 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -809,3 +809,29 @@ def unique1d(values):
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
+
+
+def _checked_add_with_arr(arr, b):
+ """
+ Performs the addition of an int64 array and an int64 integer (or array)
+ but checks that they do not result in overflow first.
+
+ Parameters
+ ----------
+ arr : array addend.
+ b : array or scalar addend.
+
+ Returns
+ -------
+ sum : An array for elements x + b for each element x in arr if b is
+ a scalar or an array for elements x + y for each element pair
+ (x, y) in (arr, b).
+
+ Raises
+ ------
+ OverflowError if any x + y exceeds the maximum int64 value.
+ """
+ if (np.iinfo(np.int64).max - b < arr).any():
+ raise OverflowError("Python int too large to "
+ "convert to C long")
+ return arr + b
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index dd3a49de55d73..f00fdd196abea 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1002,6 +1002,21 @@ def prng(self):
return np.random.RandomState(1234)
+def test_int64_add_overflow():
+ # see gh-14068
+ msg = "too (big|large) to convert"
+ m = np.iinfo(np.int64).max
+
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([m, m]), m)
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ nanops._checked_add_with_arr(np.array([m, m]), np.array([m, m]))
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ with tm.assert_produces_warning(RuntimeWarning):
+ nanops._checked_add_with_arr(np.array([m, m]),
+ np.array([np.nan, m]))
+
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s'
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index c527bbad555f9..f1e199adeebfc 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -21,6 +21,7 @@
from pandas.compat import u
from pandas.tseries.frequencies import to_offset
from pandas.core.base import _shared_docs
+from pandas.core.nanops import _checked_add_with_arr
from pandas.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.types.concat as _concat
@@ -343,7 +344,7 @@ def _add_datelike(self, other):
else:
other = Timestamp(other)
i8 = self.asi8
- result = i8 + other.value
+ result = _checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index ab413af897215..38e210d698035 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -1950,6 +1950,17 @@ def test_tdi_ops_attributes(self):
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
+ def test_add_overflow(self):
+ # see gh-14068
+ msg = "too (big|large) to convert"
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ to_timedelta(106580, 'D') + Timestamp('2000')
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ Timestamp('2000') + to_timedelta(106580, 'D')
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ to_timedelta([106580], 'D') + Timestamp('2000')
+ with tm.assertRaisesRegexp(OverflowError, msg):
+ Timestamp('2000') + to_timedelta([106580], 'D')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| Title is self-explanatory. Closes #14068.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14237 | 2016-09-16T19:05:55Z | 2016-09-27T10:43:45Z | null | 2016-09-28T22:17:31Z |
BUG: set_levels set illegal levels. | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 60847469aa02c..8e7e95c071ea4 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1560,6 +1560,6 @@ Bug Fixes
- Bug in ``.to_string()`` when called with an integer ``line_width`` and ``index=False`` raises an UnboundLocalError exception because ``idx`` referenced before assignment.
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
-- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- ``PeriodIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
- Bug in ``Index.copy()`` where ``name`` parameter was ignored (:issue:`14302`)
diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt
index 21b9fac6ffacf..92c7746d1c023 100644
--- a/doc/source/whatsnew/v0.19.1.txt
+++ b/doc/source/whatsnew/v0.19.1.txt
@@ -31,3 +31,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
- Bug in ``pd.concat`` where names of the ``keys`` were not propagated to the resulting ``MultiIndex`` (:issue:`14252`)
+- Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`)
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 1ab5dbb737739..0c465da24a17e 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -116,12 +116,27 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
return result
- def _verify_integrity(self):
- """Raises ValueError if length of levels and labels don't match or any
- label would exceed level bounds"""
+ def _verify_integrity(self, labels=None, levels=None):
+ """
+
+ Parameters
+ ----------
+ labels : optional list
+ Labels to check for validity. Defaults to current labels.
+ levels : optional list
+ Levels to check for validity. Defaults to current levels.
+
+ Raises
+ ------
+ ValueError
+ * if length of levels and labels don't match or any label would
+ exceed level bounds
+ """
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
- labels, levels = self.labels, self.levels
+ labels = labels or self.labels
+ levels = levels or self.levels
+
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
@@ -162,6 +177,9 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
+ if verify_integrity:
+ self._verify_integrity(levels=new_levels)
+
names = self.names
self._levels = new_levels
if any(names):
@@ -170,9 +188,6 @@ def _set_levels(self, levels, level=None, copy=False, validate=True,
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
@@ -268,13 +283,13 @@ def _set_labels(self, labels, level=None, copy=False, validate=True,
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
+ if verify_integrity:
+ self._verify_integrity(labels=new_labels)
+
self._labels = new_labels
self._tuples = None
self._reset_cache()
- if verify_integrity:
- self._verify_integrity()
-
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index cd9ce0102ca1e..fdc5a2eaec812 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -149,14 +149,14 @@ def test_set_levels(self):
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
- def assert_matching(actual, expected):
+ def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
- exp = np.asarray(exp, dtype=np.object_)
- tm.assert_numpy_array_equal(act, exp)
+ exp = np.asarray(exp)
+ tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
@@ -204,6 +204,31 @@ def assert_matching(actual, expected):
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
+ # illegal level changing should not change levels
+ # GH 13754
+ original_index = self.index.copy()
+ for inplace in [True, False]:
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_levels(['c'], level=0, inplace=inplace)
+ assert_matching(self.index.levels, original_index.levels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(ValueError, "^On"):
+ self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
+ inplace=inplace)
+ assert_matching(self.index.labels, original_index.labels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(TypeError, "^Levels"):
+ self.index.set_levels('c', level=0, inplace=inplace)
+ assert_matching(self.index.levels, original_index.levels,
+ check_dtype=True)
+
+ with assertRaisesRegexp(TypeError, "^Labels"):
+ self.index.set_labels(1, level=0, inplace=inplace)
+ assert_matching(self.index.labels, original_index.labels,
+ check_dtype=True)
+
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
| - [x] closes #13754
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
`MultiIndex.set_levels`, when given illegal level values, raises an error.
When `inplace=True`, though, the illegal level values are still accepted. This
commit fixes that behavior by checking that the proposed level values are legal
before setting them.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14236 | 2016-09-16T18:53:30Z | 2016-10-10T12:30:22Z | 2016-10-10T12:30:22Z | 2016-10-10T12:30:37Z |
ENH: Allow usecols to accept callable (GH14154) | diff --git a/doc/source/io.rst b/doc/source/io.rst
index f22374553e9c3..75f36c5274cd2 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -126,13 +126,23 @@ index_col : int or sequence or ``False``, default ``None``
MultiIndex is used. If you have a malformed file with delimiters at the end of
each line, you might consider ``index_col=False`` to force pandas to *not* use
the first column as the index (row names).
-usecols : array-like, default ``None``
- Return a subset of the columns. All elements in this array must either
+usecols : array-like or callable, default ``None``
+ Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
- inferred from the document header row(s). For example, a valid `usecols`
- parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Using this parameter
- results in much faster parsing time and lower memory usage.
+ inferred from the document header row(s). For example, a valid array-like
+ `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
+
+ If callable, the callable function will be evaluated against the column names,
+ returning names where the callable function evaluates to True:
+
+ .. ipython:: python
+
+ data = 'col1,col2,col3\na,b,1\na,b,2\nc,d,3'
+ pd.read_csv(StringIO(data))
+ pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['COL1', 'COL3'])
+
+ Using this parameter results in much faster parsing time and lower memory usage.
as_recarray : boolean, default ``False``
DEPRECATED: this argument will be removed in a future version. Please call
``pd.read_csv(...).to_records()`` instead.
@@ -617,7 +627,9 @@ Filtering columns (``usecols``)
+++++++++++++++++++++++++++++++
The ``usecols`` argument allows you to select any subset of the columns in a
-file, either using the column names or position numbers:
+file, either using the column names, position numbers or a callable:
+
+.. versionadded:: 0.20.0 support for callable `usecols` arguments
.. ipython:: python
@@ -625,6 +637,7 @@ file, either using the column names or position numbers:
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data), usecols=['b', 'd'])
pd.read_csv(StringIO(data), usecols=[0, 2, 3])
+ pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['A', 'C'])
Comments and Empty Lines
''''''''''''''''''''''''
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 9774c3ec9cc7f..0bfd755aae40c 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -52,6 +52,7 @@ Other enhancements
- ``pd.read_excel`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`)
- ``pd.cut`` and ``pd.qcut`` now support datetime64 and timedelta64 dtypes (issue:`14714`)
- ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`)
+- The ``usecols`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`14154`)
.. _whatsnew_0200.api_breaking:
@@ -106,4 +107,4 @@ Performance Improvements
.. _whatsnew_0200.bug_fixes:
Bug Fixes
-~~~~~~~~~
\ No newline at end of file
+~~~~~~~~~
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index ef839297c80d3..30443f894a64d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -90,13 +90,18 @@
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
-usecols : array-like, default None
- Return a subset of the columns. All elements in this array must either
+usecols : array-like or callable, default None
+ Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
- inferred from the document header row(s). For example, a valid `usecols`
- parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Using this parameter
- results in much faster parsing time and lower memory usage.
+ inferred from the document header row(s). For example, a valid array-like
+ `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
+
+ If callable, the callable function will be evaluated against the column
+ names, returning names where the callable function evaluates to True. An
+ example of a valid callable argument would be ``lambda x: x.upper() in
+ ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
+ parsing time and lower memory usage.
as_recarray : boolean, default False
DEPRECATED: this argument will be removed in a future version. Please call
`pd.read_csv(...).to_records()` instead.
@@ -977,17 +982,33 @@ def _is_index_col(col):
return col is not None and col is not False
+def _evaluate_usecols(usecols, names):
+ """
+ Check whether or not the 'usecols' parameter
+ is a callable. If so, enumerates the 'names'
+ parameter and returns a set of indices for
+ each entry in 'names' that evaluates to True.
+ If not a callable, returns 'usecols'.
+ """
+ if callable(usecols):
+ return set([i for i, name in enumerate(names)
+ if usecols(name)])
+ return usecols
+
+
def _validate_usecols_arg(usecols):
"""
Check whether or not the 'usecols' parameter
- contains all integers (column selection by index)
- or strings (column by name). Raises a ValueError
- if that is not the case.
+ contains all integers (column selection by index),
+ strings (column by name) or is a callable. Raises
+ a ValueError if that is not the case.
"""
- msg = ("The elements of 'usecols' must "
- "either be all strings, all unicode, or all integers")
+ msg = ("'usecols' must either be all strings, all unicode, "
+ "all integers or a callable")
if usecols is not None:
+ if callable(usecols):
+ return usecols
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ('empty', 'integer',
'string', 'unicode'):
@@ -1499,11 +1520,12 @@ def __init__(self, src, **kwds):
self.orig_names = self.names[:]
if self.usecols:
- if len(self.names) > len(self.usecols):
+ usecols = _evaluate_usecols(self.usecols, self.orig_names)
+ if len(self.names) > len(usecols):
self.names = [n for i, n in enumerate(self.names)
- if (i in self.usecols or n in self.usecols)]
+ if (i in usecols or n in usecols)]
- if len(self.names) < len(self.usecols):
+ if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
@@ -1665,9 +1687,10 @@ def read(self, nrows=None):
def _filter_usecols(self, names):
# hackish
- if self.usecols is not None and len(names) != len(self.usecols):
+ usecols = _evaluate_usecols(self.usecols, names)
+ if usecols is not None and len(names) != len(usecols):
names = [name for i, name in enumerate(names)
- if i in self.usecols or name in self.usecols]
+ if i in usecols or name in usecols]
return names
def _get_index_names(self):
@@ -2291,7 +2314,9 @@ def _handle_usecols(self, columns, usecols_key):
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
- if any([isinstance(col, string_types) for col in self.usecols]):
+ if callable(self.usecols):
+ col_indices = _evaluate_usecols(self.usecols, usecols_key)
+ elif any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py
index 5051171ccb8f0..26b4b5b8ec7d1 100644
--- a/pandas/io/tests/parser/usecols.py
+++ b/pandas/io/tests/parser/usecols.py
@@ -23,8 +23,9 @@ def test_raise_on_mixed_dtype_usecols(self):
1000,2000,3000
4000,5000,6000
"""
- msg = ("The elements of 'usecols' must "
- "either be all strings, all unicode, or all integers")
+
+ msg = ("'usecols' must either be all strings, all unicode, "
+ "all integers or a callable")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
@@ -302,8 +303,8 @@ def test_usecols_with_mixed_encoding_strings(self):
3.568935038,7,False,a
'''
- msg = ("The elements of 'usecols' must "
- "either be all strings, all unicode, or all integers")
+ msg = ("'usecols' must either be all strings, all unicode, "
+ "all integers or a callable")
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(s), usecols=[u'AAA', b'BBB'])
@@ -366,3 +367,31 @@ def test_np_array_usecols(self):
expected = DataFrame([[1, 2]], columns=usecols)
result = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
+
+ def test_callable_usecols(self):
+ # See gh-14154
+ s = '''AaA,bBb,CCC,ddd
+ 0.056674973,8,True,a
+ 2.613230982,2,False,b
+ 3.568935038,7,False,a
+ '''
+
+ data = {
+ 'AaA': {
+ 0: 0.056674972999999997,
+ 1: 2.6132309819999997,
+ 2: 3.5689350380000002
+ },
+ 'bBb': {0: 8, 1: 2, 2: 7},
+ 'ddd': {0: 'a', 1: 'b', 2: 'a'}
+ }
+ expected = DataFrame(data)
+ df = self.read_csv(StringIO(s), usecols=lambda x:
+ x.upper() in ['AAA', 'BBB', 'DDD'])
+ tm.assert_frame_equal(df, expected)
+
+ # Check that a callable returning only False returns
+ # an empty DataFrame
+ expected = DataFrame()
+ df = self.read_csv(StringIO(s), usecols=lambda x: False)
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 6760e822960f1..d94a4ef278dee 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -300,8 +300,9 @@ cdef class TextReader:
object compression
object mangle_dupe_cols
object tupleize_cols
+ object usecols
list dtype_cast_order
- set noconvert, usecols
+ set noconvert
def __cinit__(self, source,
delimiter=b',',
@@ -437,7 +438,10 @@ cdef class TextReader:
# suboptimal
if usecols is not None:
self.has_usecols = 1
- self.usecols = set(usecols)
+ if callable(usecols):
+ self.usecols = usecols
+ else:
+ self.usecols = set(usecols)
# XXX
if skipfooter > 0:
@@ -701,7 +705,6 @@ cdef class TextReader:
cdef StringPath path = _string_path(self.c_encoding)
header = []
-
if self.parser.header_start >= 0:
# Header is in the file
@@ -821,7 +824,8 @@ cdef class TextReader:
# 'data has %d fields'
# % (passed_count, field_count))
- if self.has_usecols and self.allow_leading_cols:
+ if self.has_usecols and self.allow_leading_cols and \
+ not callable(self.usecols):
nuse = len(self.usecols)
if nuse == passed_count:
self.leading_cols = 0
@@ -1019,13 +1023,20 @@ cdef class TextReader:
if i < self.leading_cols:
# Pass through leading columns always
name = i
- elif self.usecols and nused == len(self.usecols):
+ elif self.usecols and not callable(self.usecols) and \
+ nused == len(self.usecols):
# Once we've gathered all requested columns, stop. GH5766
break
else:
name = self._get_column_name(i, nused)
- if self.has_usecols and not (i in self.usecols or
- name in self.usecols):
+ usecols = set()
+ if callable(self.usecols):
+ if self.usecols(name):
+ usecols = set([i])
+ else:
+ usecols = self.usecols
+ if self.has_usecols and not (i in usecols or
+ name in usecols):
continue
nused += 1
| - [ X] closes #14154
- [ X] tests added / passed
- [ X] passes `git diff upstream/master | flake8 --diff`
- [X ] whatsnew entry
<img width="1515" alt="asv_bench" src="https://cloud.githubusercontent.com/assets/609873/18575075/942a9240-7ba1-11e6-9dca-bab8b9987f31.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/14234 | 2016-09-16T04:08:04Z | 2016-12-06T11:38:06Z | 2016-12-06T11:38:06Z | 2017-12-12T15:39:47Z |
DOC: added example to Series.map showing use of na_action parameter (GH14231) | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8379c8bcdcae8..1c6b13885dd01 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2099,10 +2099,19 @@ def map(self, arg, na_action=None):
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
- If 'ignore', propagate NA values
+ If 'ignore', propagate NA values, without passing them to the
+ mapping function
+
+ Returns
+ -------
+ y : Series
+ same index as caller
Examples
--------
+
+ Map inputs to outputs
+
>>> x
one 1
two 2
@@ -2118,10 +2127,27 @@ def map(self, arg, na_action=None):
two bar
three baz
- Returns
- -------
- y : Series
- same index as caller
+ Use na_action to control whether NA values are affected by the mapping
+ function.
+
+ >>> s = pd.Series([1, 2, 3, np.nan])
+
+ >>> s2 = s.map(lambda x: 'this is a string {}'.format(x),
+ na_action=None)
+ 0 this is a string 1.0
+ 1 this is a string 2.0
+ 2 this is a string 3.0
+ 3 this is a string nan
+ dtype: object
+
+ >>> s3 = s.map(lambda x: 'this is a string {}'.format(x),
+ na_action='ignore')
+ 0 this is a string 1.0
+ 1 this is a string 2.0
+ 2 this is a string 3.0
+ 3 NaN
+ dtype: object
+
"""
if is_extension_type(self.dtype):
| - [x] closes #14231
- [x] tests passed
- [x] passes `git diff upstream/master | flake8 --diff`
Added example to Series.map showing use of na_action parameter.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14232 | 2016-09-15T23:16:35Z | 2016-09-16T23:20:39Z | 2016-09-16T23:20:39Z | 2016-12-14T05:11:15Z |
BUG: fix alignment in series ops (GH14227) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a3cac2d6f9f2f..4aa1ac4a47090 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -20,6 +20,7 @@
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
+ is_datetime64tz_dtype,
is_list_like,
is_dict_like,
is_re_compilable)
@@ -4438,13 +4439,23 @@ def _align_frame(self, other, join='outer', axis=None, level=None,
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
+ # if DatetimeIndex have different tz, convert to UTC
+ if is_datetime64tz_dtype(left.index):
+ if left.index.tz != right.index.tz:
+ if join_index is not None:
+ left.index = join_index
+ right.index = join_index
+
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
+
+ is_series = isinstance(self, ABCSeries)
+
# series/series compat, other must always be a Series
- if isinstance(self, ABCSeries):
+ if is_series:
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
@@ -4503,6 +4514,15 @@ def _align_series(self, other, join='outer', axis=None, level=None,
left = left.fillna(fill_value, method=method, limit=limit,
axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
+
+ # if DatetimeIndex have different tz, convert to UTC
+ if is_series or (not is_series and axis == 0):
+ if is_datetime64tz_dtype(left.index):
+ if left.index.tz != right.index.tz:
+ if join_index is not None:
+ left.index = join_index
+ right.index = join_index
+
return left.__finalize__(self), right.__finalize__(other)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 237b9394dfc25..7cff1104c50be 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -622,12 +622,6 @@ def _align_method_SERIES(left, right, align_asobject=False):
left, right = left.align(right, copy=False)
- index, lidx, ridx = left.index.join(right.index, how='outer',
- return_indexers=True)
- # if DatetimeIndex have different tz, convert to UTC
- left.index = index
- right.index = index
-
return left, right
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 24c26276ea24d..f688ec2d43789 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1810,3 +1810,11 @@ def test_dti_tz_convert_to_utc(self):
res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))
+
+ def test_op_duplicate_index(self):
+ # GH14227
+ s1 = Series([1, 2], index=[1, 1])
+ s2 = Series([10, 10], index=[1, 2])
+ result = s1 + s2
+ expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
+ assert_series_equal(result, expected)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index b8247fe01b3f2..a85a606075911 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -1290,6 +1290,28 @@ def test_align_aware(self):
self.assertEqual(df1.index.tz, new1.index.tz)
self.assertEqual(df2.index.tz, new2.index.tz)
+ # # different timezones convert to UTC
+
+ # frame
+ df1_central = df1.tz_convert('US/Central')
+ new1, new2 = df1.align(df1_central)
+ self.assertEqual(new1.index.tz, pytz.UTC)
+ self.assertEqual(new2.index.tz, pytz.UTC)
+
+ # series
+ new1, new2 = df1[0].align(df1_central[0])
+ self.assertEqual(new1.index.tz, pytz.UTC)
+ self.assertEqual(new2.index.tz, pytz.UTC)
+
+ # combination
+ new1, new2 = df1.align(df1_central[0], axis=0)
+ self.assertEqual(new1.index.tz, pytz.UTC)
+ self.assertEqual(new2.index.tz, pytz.UTC)
+
+ df1[0].align(df1_central, axis=0)
+ self.assertEqual(new1.index.tz, pytz.UTC)
+ self.assertEqual(new2.index.tz, pytz.UTC)
+
def test_append_aware(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
| - [x] closes #14227
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14230 | 2016-09-15T17:55:24Z | 2016-09-30T21:23:35Z | 2016-09-30T21:23:35Z | 2016-09-30T21:23:56Z |
DOC: #14195. to_csv warns regarding quoting behaviour for floats | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d436fa52918d3..3661d4b4cdff7 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1481,7 +1481,7 @@ function takes a number of arguments. Only the first is required.
- ``encoding``: a string representing the encoding to use if the contents are
non-ASCII, for python versions prior to 3
- ``line_terminator``: Character sequence denoting line end (default '\\n')
- - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL)
+ - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL). Note that if you have set a `float_format` then floats are converted to strings and csv.QUOTE_NONNUMERIC will treat them as non-numeric
- ``quotechar``: Character used to quote fields (default '"')
- ``doublequote``: Control quoting of ``quotechar`` in fields (default True)
- ``escapechar``: Character used to escape ``sep`` and ``quotechar`` when
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1cc689528caaa..0b446c26c977d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1345,7 +1345,9 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
- defaults to csv.QUOTE_MINIMAL
+ defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
+ then floats are comverted to strings and thus csv.QUOTE_NONNUMERIC
+ will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
| - [x] closes #14195
- [ ] passes `git diff upstream/master | flake8 --diff`
Added a small warning that if `float_format` is set then floats will be quoted even if csv.QUOTE_NONNUMERIC is set
| https://api.github.com/repos/pandas-dev/pandas/pulls/14228 | 2016-09-15T16:45:57Z | 2016-10-06T16:04:57Z | 2016-10-06T16:04:57Z | 2016-10-06T16:04:58Z |
BUG: GH13629 Binned groupby median function calculates median on empt… | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index d3239c4562765..6933cbedb5d67 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1572,3 +1572,4 @@ Bug Fixes
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index de5c5fc661d4d..8710ef34504d1 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -992,7 +992,7 @@ def is_lexsorted(list list_of_arrays):
def groupby_indices(dict ids, ndarray[int64_t] labels,
ndarray[int64_t] counts):
"""
- turn group_labels output into a combined indexer maping the labels to
+ turn group_labels output into a combined indexer mapping the labels to
indexers
Parameters
@@ -1313,6 +1313,9 @@ cdef inline float64_t _median_linear(float64_t* a, int n):
cdef float64_t result
cdef float64_t* tmp
+ if n == 0:
+ return NaN
+
# count NAs
for i in range(n):
if a[i] != a[i]:
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 66e30229cd52b..7ed84b970d9c3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -4424,12 +4424,13 @@ def _reorder_by_uniques(uniques, labels):
def _groupby_indices(values):
if is_categorical_dtype(values):
-
# we have a categorical, so we can do quite a bit
# bit better than factorizing again
reverse = dict(enumerate(values.categories))
codes = values.codes.astype('int64')
- _, counts = _hash.value_count_int64(codes, False)
+
+ mask = 0 <= codes
+ counts = np.bincount(codes[mask], minlength=values.categories.size)
else:
reverse, codes, counts = _algos.group_labels(
_values_from_object(_ensure_object(values)))
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 9d8873d843642..492326d0898f0 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -799,6 +799,17 @@ def test_get_group(self):
self.assertRaises(ValueError,
lambda: g.get_group(('foo', 'bar', 'baz')))
+ def test_get_group_empty_bins(self):
+ d = pd.DataFrame([3, 1, 7, 6])
+ bins = [0, 5, 10, 15]
+ g = d.groupby(pd.cut(d[0], bins))
+
+ result = g.get_group('(0, 5]')
+ expected = DataFrame([3, 1], index=[0, 1])
+ assert_frame_equal(result, expected)
+
+ self.assertRaises(KeyError, lambda: g.get_group('(10, 15]'))
+
def test_get_group_grouped_by_tuple(self):
# GH 8121
df = DataFrame([[(1, ), (1, 2), (1, ), (1, 2)]], index=['ids']).T
@@ -4415,6 +4426,16 @@ def test_cython_median(self):
xp = df.groupby(labels).median()
assert_frame_equal(rs, xp)
+ def test_median_empty_bins(self):
+ df = pd.DataFrame(np.random.randint(0, 44, 500))
+
+ grps = range(0, 55, 5)
+ bins = pd.cut(df[0], grps)
+
+ result = df.groupby(bins).median()
+ expected = df.groupby(bins).agg(lambda x: x.median())
+ assert_frame_equal(result, expected)
+
def test_groupby_categorical_no_compress(self):
data = Series(np.random.randn(9))
@@ -6123,6 +6144,27 @@ def test__cython_agg_general(self):
exc.args += ('operation: %s' % op, )
raise
+ def test_cython_agg_empty_buckets(self):
+ ops = [('mean', np.mean),
+ ('median', np.median),
+ ('var', lambda x: np.var(x, ddof=1)),
+ ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan),
+ ('prod', np.prod),
+ ('min', np.min),
+ ('max', np.max), ]
+
+ df = pd.DataFrame([11, 12, 13])
+ grps = range(0, 55, 5)
+
+ for op, targop in ops:
+ result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op)
+ expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x))
+ try:
+ tm.assert_frame_equal(result, expected)
+ except BaseException as exc:
+ exc.args += ('operation: %s' % op,)
+ raise
+
def test_cython_group_transform_algos(self):
# GH 4095
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
| - [ x] closes #13629
- [ x] tests added / passed
- [ x] passes `git diff upstream/master | flake8 --diff`
- [x ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14225 | 2016-09-15T01:57:41Z | 2016-09-18T15:57:27Z | 2016-09-18T15:57:27Z | 2016-09-18T22:36:27Z |
add check for infinity in __call__ of EngFormatter | diff --git a/pandas/formats/format.py b/pandas/formats/format.py
index 4740dd25c419d..e5089983ac8f7 100644
--- a/pandas/formats/format.py
+++ b/pandas/formats/format.py
@@ -2586,6 +2586,9 @@ def __call__(self, num):
if decimal.Decimal.is_nan(dnum):
return 'NaN'
+ if decimal.Decimal.is_infinite(dnum):
+ return 'inf'
+
sign = 1
if dnum < 0: # pragma: no cover
diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index ba7ad55a081cd..58e9b30e7f624 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -4047,6 +4047,14 @@ def test_nan(self):
self.assertTrue('NaN' in result)
self.reset_display_options()
+ def test_inf(self):
+ # Issue #11981
+
+ formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.inf)
+ self.assertEqual(result, u('inf'))
+
+
def _three_digit_exp():
return '%.4g' % 1.7e8 == '1.7e+008'
| EngFormatter throws an exception if passed 'inf' as below.
This patch checks for infinity and just returns 'inf' in that case. The fix is analagous to the recent fix for NaN:
https://github.com/pydata/pandas/commit/d0734ba4d0f4c228110dc3974943ce4ec2adeea4
``` python
import pandas
import pandas.core.format as fmt
ef = fmt.EngFormatter()
ef(pandas.np.inf)
...
```
---
OverflowError Traceback (most recent call last)
<ipython-input-17-c4aa659346ee> in <module>()
3
4 ef = fmt.EngFormatter()
----> 5 ef(pandas.np.inf)
/usr/lib/python3/dist-packages/pandas/core/format.py in **call**(self, num)
2485
2486 if dnum != 0:
-> 2487 pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) \* 3))
2488 else:
2489 pow10 = decimal.Decimal(0)
OverflowError: cannot convert Infinity to integer
| https://api.github.com/repos/pandas-dev/pandas/pulls/14214 | 2016-09-13T19:09:55Z | 2016-09-13T22:00:08Z | null | 2016-09-13T23:18:07Z |
BUG: loffset not applied when using resample with agg() (GH13218) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0873e4b34b0b1..41d5090ef6c32 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -321,3 +321,4 @@ Bug Fixes
- Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`)
- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`)
- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`)
+- Bug in ``resample``, where a non-string ```loffset`` argument would not be applied when resampling a timeseries (:issue:`13218`)
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 31781eb3fc131..cf96a688fb21f 100755
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -323,6 +323,11 @@ def aggregate(self, arg, *args, **kwargs):
*args,
**kwargs)
+ # if arg was a string, _aggregate called resampler's _downsample or
+ # _groupby_and_agg methods, which would've already applied the loffset
+ if not isinstance(arg, compat.string_types):
+ result = self._apply_loffset(result)
+
return result
agg = aggregate
@@ -381,7 +386,7 @@ def _gotitem(self, key, ndim, subset=None):
return grouped
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
- """ revaluate the obj with a groupby aggregation """
+ """ re-evaluate the obj with a groupby aggregation """
if grouper is None:
self._set_binner()
@@ -409,7 +414,14 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
return self._wrap_result(result)
def _apply_loffset(self, result):
- """if loffset if set, offset the result index"""
+ """
+ if loffset is set, offset the result index
+
+ Parameters
+ ----------
+ result : Series or DataFrame
+ the result of resample
+ """
loffset = self.loffset
if isinstance(loffset, compat.string_types):
loffset = to_offset(self.loffset)
@@ -419,6 +431,7 @@ def _apply_loffset(self, result):
isinstance(result.index, DatetimeIndex) and
len(result.index) > 0
)
+
if needs_offset:
result.index = result.index + loffset
@@ -797,6 +810,11 @@ def aggregate(self, arg, *args, **kwargs):
if result is None:
result = self._downsample(arg, *args, **kwargs)
+ # if arg was a string, _aggregate called resamplers' _downsample or
+ # _groupby_and_agg methods, which would've already applied the loffset
+ if not isinstance(arg, compat.string_types):
+ result = self._apply_loffset(result)
+
return result
agg = aggregate
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index b8c060c024867..8f1c653210298 100755
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -24,7 +24,7 @@
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import (DatetimeIndex, TimeGrouper,
DatetimeIndexResampler)
-from pandas.tseries.tdi import timedelta_range
+from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
from pandas._period import IncompatibleFrequency
@@ -769,6 +769,36 @@ def test_resample_empty_dtypes(self):
# (ex: doing mean with dtype of np.object)
pass
+ def test_resample_loffset_arg_type(self):
+ # GH 13218, 15002
+ df = self.create_series().to_frame('value')
+ expected_means = [df.values[i:i + 2].mean()
+ for i in range(0, len(df.values), 2)]
+ expected_index = self.create_index(df.index[0],
+ periods=len(df.index) / 2,
+ freq='2D')
+ # loffset coreces PeriodIndex to DateTimeIndex
+ if isinstance(expected_index, PeriodIndex):
+ expected_index = expected_index.to_timestamp()
+ expected_index += timedelta(hours=2)
+ expected = DataFrame({'value': expected_means}, index=expected_index)
+ for arg in ['mean', {'value': 'mean'}, ['mean']]:
+ result_agg = df.resample('2D', loffset='2H').agg(arg)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result_how = df.resample('2D', how=arg, loffset='2H')
+ if isinstance(arg, list):
+ expected.columns = pd.MultiIndex.from_tuples([('value',
+ 'mean')])
+ # GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
+ if isinstance(expected.index, TimedeltaIndex):
+ with tm.assertRaises(AssertionError):
+ assert_frame_equal(result_agg, expected)
+ assert_frame_equal(result_how, expected)
+ else:
+ assert_frame_equal(result_agg, expected)
+ assert_frame_equal(result_how, expected)
+
class TestDatetimeIndex(Base, tm.TestCase):
_multiprocess_can_split_ = True
| - [x] closes #13218 and #15002 (continuation of closed PR https://github.com/pydata/pandas/pull/13861)
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
_apply_loffset is called in resampler's downsample and groupby_and_agg methods, defined [here](https://github.com/pydata/pandas/blob/master/pandas/tseries/resample.py#L535) and [here](https://github.com/pydata/pandas/blob/master/pandas/tseries/resample.py#L545)
When a `str` is passed into `aggregate`, the above methods are called [here](https://github.com/pydata/pandas/blob/master/pandas/core/base.py#L431), and consequently the loffset is applied.
**However**, if anything other than a `str` is passed into `aggregate`, then a different path will be taken and **the loffset must be applied**. So a simple check in `aggregate` will apply the loffset correctly.
note: my last PR was closed, and I can't reopen it by a force push, hence the new one
| https://api.github.com/repos/pandas-dev/pandas/pulls/14213 | 2016-09-13T16:32:56Z | 2016-12-31T16:56:53Z | null | 2016-12-31T16:58:13Z |
DOC: fix incorrect example in unstack docstring (GH14206) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index de74b70cdfaac..e46d4c6b928a9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3805,28 +3805,29 @@ def unstack(self, level=-1):
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
- one a 1
- b 2
- two a 3
- b 4
+ one a 1.0
+ b 2.0
+ two a 3.0
+ b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
- one 1 2
- two 3 4
+ one 1.0 2.0
+ two 3.0 4.0
>>> s.unstack(level=0)
one two
- a 1 3
- b 2 4
+ a 1.0 3.0
+ b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
- one a 1.
- b 3.
- two a 2.
- b 4.
+ one a 1.0
+ b 2.0
+ two a 3.0
+ b 4.0
+ dtype: float64
Returns
-------
| - [x] closes #14206
The following PR closes #14206 and corrects the Docs as suggested in the issue thread.
@jorisvandenbossche Kindly review.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14211 | 2016-09-13T08:13:44Z | 2016-09-13T23:00:15Z | 2016-09-13T23:00:15Z | 2016-09-14T04:39:58Z |
WIP/API: add magic 'X' for selection | diff --git a/LICENSES/PANDAS_PLY_LICENSE b/LICENSES/PANDAS_PLY_LICENSE
new file mode 100644
index 0000000000000..eac7ca890d560
--- /dev/null
+++ b/LICENSES/PANDAS_PLY_LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 Coursera Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py
index d4d8b7e4e9747..10a90488020b6 100644
--- a/pandas/api/tests/test_api.py
+++ b/pandas/api/tests/test_api.py
@@ -81,7 +81,7 @@ class TestPDApi(Base, tm.TestCase):
'pivot', 'pivot_table', 'plot_params', 'qcut',
'scatter_matrix',
'show_versions', 'timedelta_range', 'unique',
- 'value_counts', 'wide_to_long']
+ 'value_counts', 'wide_to_long', 'X']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
diff --git a/pandas/computation/api.py b/pandas/computation/api.py
index e5814e08c4bbe..a821a579ab6f0 100644
--- a/pandas/computation/api.py
+++ b/pandas/computation/api.py
@@ -2,3 +2,4 @@
from pandas.computation.eval import eval
from pandas.computation.expr import Expr
+from pandas.computation.delayed import X
diff --git a/pandas/computation/delayed.py b/pandas/computation/delayed.py
new file mode 100644
index 0000000000000..2640f04727d0e
--- /dev/null
+++ b/pandas/computation/delayed.py
@@ -0,0 +1,333 @@
+from __future__ import print_function
+from pandas.compat import iteritems
+from pandas import Series, DataFrame
+
+"""
+Delayed (rename?)
+
+delayed selection api through magic `X` variable
+"""
+
+# includes large portions of pandas_ply, see LICENSES
+
+_error_doc = """
+pandas `X` is a deferred object that cannot be passed into
+most functions. {case} which is invalid.
+To pass a deferred Series into a function, use the .pipe
+function, for example, X.a.pipe(np.log), instead np.log(X.a) """
+
+# numpy / pandas access
+_disallow_attr = [
+ '__array_struct__', '__array_interface__',
+ '_typ', '_data', 'columns', 'values',
+]
+
+_allowed_magic_methods = [
+ '__add__', '__div__', '__sub__', '__truediv__', '__mul__',
+ '__radd__', '__rdiv__', '__rsub__', '__rtruediv__', '__rmul__',
+ '__mod__', '__rmod__',
+ '__eq__', '__ge__', '__gt__', '__lt__', '__le__', '__ne__',
+ '__and__', '__or__', '__invert__' '__neg__', '__pos__',
+ '__rand__', '__ror__',
+ '__abs__', '__pow__',
+]
+# can leave most not implemented, but __iter__ is
+# needed, otherwise __getitem__ sastifies sequence protocol
+# and may iterate forever
+_blacklisted_magic_methods = [
+ '__iter__'
+]
+
+_accessors = [
+ 'cat', 'dt', 'str'
+]
+
+
+class Expression(object):
+ """
+ Expression is the (abstract) base class for symbolic expressions.
+ Symbolic expressions are encoded representations of Python expressions,
+ kept on ice until you are ready to evaluate them.
+
+ If an expression is complete, it will act as a 1-argument
+ function, taking a DataFrame whose context to evaluate
+ the expr in. If not complete, __call__ will create
+ a symbolic call node.
+ """
+
+ def _eval(self, context, **options):
+ """Evaluate a symbolic expression.
+
+ Args:
+ context: The context object for evaluation. Currently, this is a
+ dictionary mapping symbol names to values,
+ `**options`: Options for evaluation. Currently, the only option is
+ `log`, which results in some debug output during evaluation if
+ it is set to `True`.
+
+ Returns:
+ anything
+ """
+ raise NotImplementedError
+
+ def __repr__(self):
+ raise NotImplementedError
+
+ def __getattr__(self, name):
+ """Construct a symbolic representation of `getattr(self, name)`."""
+ if name in _disallow_attr:
+ msg = "The {0} attribuate was called on the object".format(name)
+ raise TypeError(_error_doc.format(case=msg))
+
+ # generally completeness alternates, for instance
+ # in the following expression, marking
+ # incomplete [i], complete [c]
+ # X . a . pipe(np.exp) . sum()
+ # [i] [c] [i] [c] [i][c]
+
+ # but, accessors may break this pattern, so
+ # as a special case do some introspection
+ # to check if what's being asked for is callable
+ if self._name in _accessors:
+ # cleaner way to do this?
+ acc = getattr(getattr(Series, self._name), name)
+ complete = True
+ if hasattr(acc, '__call__'):
+ complete = False
+ else:
+ complete = not self._complete
+ return GetAttr(self, name, complete)
+
+ def __getitem__(self, name):
+ return GetItem(self, name, complete=True)
+
+ def __call__(self, *args, **kwargs):
+ if self._complete:
+ # selection lambda passed to pandas
+ if len(args) != 1:
+ msg = ("too many values passed into `X`, selection "
+ "likely malformed")
+ raise ValueError(msg)
+ df, = args
+ if not isinstance(df, DataFrame):
+ msg = ("`X` selection can only be evaluated in the context "
+ "of a DataFrame ")
+ raise ValueError(msg)
+ return self._eval({0: df})
+ # symbolic call
+ return Call(self, args=args, kwargs=kwargs)
+
+ # error trapping
+ def __array__(self, *args, **kwargs):
+ msg = "The object was attempted to be converted to a numpy array"
+ raise TypeError(_error_doc.format(case=msg))
+
+
+def _get_sym_magic_method(name):
+ def magic_method(self, *args, **kwargs):
+ return Call(GetAttr(self, name), args, kwargs)
+ return magic_method
+
+
+def _get_blacklisted_method(name):
+ def blacklisted_method(self, *args, **kwargs):
+ msg = "The {0} method was called".format(name)
+ raise TypeError(_error_doc.format(case=msg))
+ return blacklisted_method
+
+for name in _allowed_magic_methods:
+ setattr(Expression, name, _get_sym_magic_method(name))
+for name in _blacklisted_magic_methods:
+ setattr(Expression, name, _get_blacklisted_method(name))
+
+
+class Symbol(Expression):
+ """`Symbol(name)` is an atomic symbolic expression, labelled with an
+ arbitrary `name`."""
+
+ def __init__(self, name, complete=False):
+ self._name = name
+ self._complete = complete
+
+ def _eval(self, context, **options):
+ if options.get('log'):
+ print('Symbol._eval', repr(self))
+ result = context[self._name]
+ if options.get('log'):
+ print('Returning', repr(self), '=>', repr(result))
+ return result
+
+ def __repr__(self):
+ return 'Symbol(%s)' % repr(self._name)
+
+
+class GetAttr(Expression):
+ """`GetItem(obj, name)` is a symbolic expression representing the result of
+ `getattr(obj, name)`. (`obj` and `name` can themselves be symbolic.)"""
+
+ def __init__(self, obj, name, complete=True):
+ self._obj = obj
+ self._name = name
+ self._complete = complete
+
+ def _eval(self, context, **options):
+ if options.get('log'):
+ print('GetAttr._eval', repr(self))
+ evaled_obj = eval_if_symbolic(self._obj, context, **options)
+ result = getattr(evaled_obj, self._name)
+ if options.get('log'):
+ print('Returning', repr(self), '=>', repr(result))
+ return result
+
+ def __repr__(self):
+ return 'getattr(%s, %s)' % (repr(self._obj), repr(self._name))
+
+
+class GetItem(Expression):
+ """`GetAttr(obj, name)` is a symbolic expression representing the result of
+ `getattr(obj, name)`. (`obj` and `name` can themselves be symbolic.)"""
+
+ def __init__(self, obj, name, complete=True):
+ self._obj = obj
+ self._name = name
+ self._complete = complete
+
+ def _eval(self, context, **options):
+ if options.get('log'):
+ print('GetItem._eval', repr(self))
+ evaled_obj = eval_if_symbolic(self._obj, context, **options)
+ result = evaled_obj[self._name]
+ if options.get('log'):
+ print('Returning', repr(self), '=>', repr(result))
+ return result
+
+ def __repr__(self):
+ return 'getitem(%s, %s)' % (repr(self._obj), repr(self._name))
+
+
+class Call(Expression):
+ """`Call(func, args, kwargs)` is a symbolic expression representing the
+ result of `func(*args, **kwargs)`. (`func`, each member of the `args`
+ iterable, and each value in the `kwargs` dictionary can themselves be
+ symbolic)."""
+
+ def __init__(self, func, args=None, kwargs=None, complete=True):
+ self._func = func
+ if not args:
+ args = []
+ if not kwargs:
+ kwargs = {}
+ self._args = args
+ self._kwargs = kwargs
+ self._complete = True
+ self._name = None
+
+ def _eval(self, context, **options):
+ if options.get('log'):
+ print('Call._eval', repr(self))
+ evaled_func = eval_if_symbolic(self._func, context, **options)
+ evaled_args = [eval_if_symbolic(v, context, **options)
+ for v in self._args]
+ evaled_kwargs = dict((k, eval_if_symbolic(v, context, **options))
+ for k, v in iteritems(self._kwargs))
+ result = evaled_func(*evaled_args, **evaled_kwargs)
+ if options.get('log'):
+ print('Returning', repr(self), '=>', repr(result))
+ return result
+
+ def __repr__(self):
+ return '{func}(*{args}, **{kwargs})'.format(
+ func=repr(self._func),
+ args=repr(self._args),
+ kwargs=repr(self._kwargs))
+
+
+def eval_if_symbolic(obj, context, **options):
+ """Evaluate an object if it is a symbolic expression, or otherwise just
+ returns it back.
+
+ Args:
+ obj: Either a symbolic expression, or anything else (in which case this
+ is a noop).
+ context: Passed as an argument to `obj._eval` if `obj` is symbolic.
+ `**options`: Passed as arguments to `obj._eval` if `obj` is symbolic.
+
+ Returns:
+ anything
+
+ Examples:
+ >>> eval_if_symbolic(Symbol('x'), {'x': 10})
+ 10
+ >>> eval_if_symbolic(7, {'x': 10})
+ 7
+ """
+ return obj._eval(context, **options) if hasattr(obj, '_eval') else obj
+
+
+def to_callable(obj):
+ """Turn an object into a callable.
+
+ Args:
+ obj: This can be
+
+ * **a symbolic expression**, in which case the output callable
+ evaluates the expression with symbols taking values from the
+ callable's arguments (listed arguments named according to their
+ numerical index, keyword arguments named according to their
+ string keys),
+ * **a callable**, in which case the output callable is just the
+ input object, or
+ * **anything else**, in which case the output callable is a
+ constant function which always returns the input object.
+
+ Returns:
+ callable
+
+ Examples:
+ >>> to_callable(Symbol(0) + Symbol('x'))(3, x=4)
+ 7
+ >>> to_callable(lambda x: x + 1)(10)
+ 11
+ >>> to_callable(12)(3, x=4)
+ 12
+ """
+ if hasattr(obj, '_eval'):
+ return lambda *args, **kwargs: obj._eval(
+ dict(enumerate(args), **kwargs))
+ elif callable(obj):
+ return obj
+ else:
+ return lambda *args, **kwargs: obj
+
+
+# keep?
+def sym_call(func, *args, **kwargs):
+ """Construct a symbolic representation of `func(*args, **kwargs)`.
+
+ This is necessary because `func(symbolic)` will not (ordinarily) know to
+ construct a symbolic expression when it receives the symbolic
+ expression `symbolic` as a parameter (if `func` is not itself symbolic).
+ So instead, we write `sym_call(func, symbolic)`.
+
+ Tip: If the main argument of the function is a (symbolic) DataFrame, then
+ pandas' `pipe` method takes care of this problem without `sym_call`. For
+ instance, while `np.sqrt(X)` won't work, `X.pipe(np.sqrt)` will.
+
+ Args:
+ func: Function to call on evaluation (can be symbolic).
+ `*args`: Arguments to provide to `func` on evaluation (can be symbolic).
+ `**kwargs`: Keyword arguments to provide to `func` on evaluation (can be
+ symbolic).
+
+ Returns:
+ `ply.symbolic.Expression`
+
+ Example:
+ >>> sym_call(math.sqrt, Symbol('x'))._eval({'x': 16})
+ 4
+ """
+
+ return Call(func, args=args, kwargs=kwargs)
+
+X = Symbol(0)
+"""A Symbol for "the first argument" (for convenience)."""
diff --git a/pandas/computation/tests/test_delayed.py b/pandas/computation/tests/test_delayed.py
new file mode 100644
index 0000000000000..54c6d2bbddd10
--- /dev/null
+++ b/pandas/computation/tests/test_delayed.py
@@ -0,0 +1,21 @@
+import pandas as pd
+from pandas import X
+
+import pandas.util.testing as tm
+from pandas.util.testing import assert_frame_equal
+
+
+class TestDelayedApi(tm.TestCase):
+ def test_basics(self):
+ df = pd.DataFrame({'a': [1, 2, 3], 'b': [1.5, 2.5, 3.4],
+ 'c': ['abc', 'def', 'efg'],
+ 'd': pd.to_datetime(['2014-01-01',
+ '2014-01-02', '2014-01-03'])})
+ assert_frame_equal(df[df['a'] > 1], df[X.a > 1])
+ assert_frame_equal(df[df['a'] == 1], df[X.a == 1])
+ assert_frame_equal(df.assign(e=lambda x: x['b'] + 1),
+ df.assign(e=X.b + 1))
+ assert_frame_equal(df.assign(e=lambda x: x['d'].dt.day),
+ df.assign(e=X.d.dt.day))
+ assert_frame_equal(df.assign(e=lambda x: x['c'].str.upper()),
+ df.assign(e=X.c.str.upper()))
| - [ ] could close #13133
- [ ] tests added / passed
- [ ] passes `git diff upstream/master | flake8 --diff`
- [ ] whatsnew entry
This is very WIP, but wanted to put it up and show the general direction. This adds essentially a modified version of `pandas_ply` that produces plain callables that can be passed to the existing `[]`/`assign` methods. Short demo below.
One thing that's tricky is figuring out when an expression is "complete." `pandas_ply` and `dplython` don't have to do this because they use a special method to instantiate the selection, but I'd prefer not to do this if possible, so this doesn't touch any pandas internals. There's one example below (`X.c.str.upper()`) that shows where the current heuristic is failing.
cc @shoyer, @jreback, @joshuahhh @dodger487, welcome any thoughts
```
df = pd.DataFrame({'a':[1,2,3], 'b':[1.5, 2.5, 3.4],
'c':['abc', 'def', 'efg'],
'd':pd.to_datetime(['2014-01-01', '2014-01-02', '2014-01-03'])})
from pandas import X
df[X.a > 1]
Out[3]:
a b c d
1 2 2.5 def 2014-01-02
2 3 3.4 efg 2014-01-03
df[X.d.dt.day == 2]
Out[4]:
a b c d
1 2 2.5 def 2014-01-02
df.assign(e=X.a+1)
Out[5]:
a b c d e
0 1 1.5 abc 2014-01-01 2
1 2 2.5 def 2014-01-02 3
2 3 3.4 efg 2014-01-03 4
df.assign(e=X.b.pipe(np.exp))
Out[6]:
a b c d e
0 1 1.5 abc 2014-01-01 4.481689
1 2 2.5 def 2014-01-02 12.182494
2 3 3.4 efg 2014-01-03 29.964100
# this should work, but doesn't
df.assign(e=X.c.str.upper())
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-65b030f82188> in <module>()
----> 1 df.assign(e=X.c.str.upper())
# this can't work, but need to give a good error msg
df.assign(e=np.log(X.a))
---------------------------------------------------------------------------
TypeError:
pandas `X` is a deferred object that cannot be passed into
functions. The object was attempted to be converted to a numpy array which is invalid.
To pass a deferred Series into a function, use the .pipe
function, for example, X.a.pipe(np.log), instead np.log(df.a)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14209 | 2016-09-13T01:51:32Z | 2016-10-30T14:59:50Z | null | 2016-10-30T14:59:50Z |
ENH: Add divmod to series. | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 1f670fb7fb593..19318aad3d53d 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -188,6 +188,32 @@ And similarly for ``axis="items"`` and ``axis="minor"``.
match the broadcasting behavior of Panel. Though it would require a
transition period so users can change their code...
+Series and Index also support the :func:`divmod` builtin. This function takes
+the floor division and modulo operation at the same time returning a two-tuple
+of the same type as the left hand side. For example:
+
+.. ipython:: python
+
+ s = pd.Series(np.arange(10))
+ s
+ div, rem = divmod(s, 3)
+ div
+ rem
+
+ idx = pd.Index(np.arange(10))
+ idx
+ div, rem = divmod(idx, 3)
+ div
+ rem
+
+We can also do elementwise :func:`divmod`:
+
+.. ipython:: python
+
+ div, rem = divmod(s, [2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
+ div
+ rem
+
Missing data / operations with fill values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index f3a6736ff9920..ffb6e72019602 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1328,6 +1328,9 @@ Other API Changes
- ``pd.read_csv()`` in the C engine will now issue a ``ParserWarning`` or raise a ``ValueError`` when ``sep`` encoded is more than one character long (:issue:`14065`)
- ``DataFrame.values`` will now return ``float64`` with a ``DataFrame`` of mixed ``int64`` and ``uint64`` dtypes, conforming to ``np.find_common_type`` (:issue:`10364`, :issue:`13917`)
- ``pd.read_stata()`` can now handle some format 111 files, which are produced by SAS when generating Stata dta files (:issue:`11526`)
+- ``Series`` and ``Index`` now support ``divmod`` which will return a tuple of
+ series or indices. This behaves like a standard binary operator with regards
+ to broadcasting rules (:issue:`14208`).
.. _whatsnew_0190.deprecations:
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index b81d62c3cda18..237b9394dfc25 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -39,7 +39,8 @@
def _create_methods(arith_method, comp_method, bool_method,
- use_numexpr, special=False, default_axis='columns'):
+ use_numexpr, special=False, default_axis='columns',
+ have_divmod=False):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
@@ -127,6 +128,15 @@ def names(x):
names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x),
names('rxor'), op('^'))))
+ if have_divmod:
+ # divmod doesn't have an op that is supported by numexpr
+ new_methods['divmod'] = arith_method(
+ divmod,
+ names('divmod'),
+ None,
+ default_axis=default_axis,
+ construct_result=_construct_divmod_result,
+ )
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
@@ -156,7 +166,7 @@ def add_methods(cls, new_methods, force, select, exclude):
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
- exclude=None):
+ exclude=None, have_divmod=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
@@ -177,6 +187,9 @@ def add_special_arithmetic_methods(cls, arith_method=None,
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
+ have_divmod : bool, (optional)
+ should a divmod method be added? this method is special because it
+ returns a tuple of cls instead of a single element of type cls
"""
# in frame, special methods have default_axis = None, comp methods use
@@ -184,7 +197,7 @@ def add_special_arithmetic_methods(cls, arith_method=None,
new_methods = _create_methods(arith_method, comp_method,
bool_method, use_numexpr, default_axis=None,
- special=True)
+ special=True, have_divmod=have_divmod)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
@@ -618,8 +631,22 @@ def _align_method_SERIES(left, right, align_asobject=False):
return left, right
+def _construct_result(left, result, index, name, dtype):
+ return left._constructor(result, index=index, name=name, dtype=dtype)
+
+
+def _construct_divmod_result(left, result, index, name, dtype):
+ """divmod returns a tuple of like indexed series instead of a single series.
+ """
+ constructor = left._constructor
+ return (
+ constructor(result[0], index=index, name=name, dtype=dtype),
+ constructor(result[1], index=index, name=name, dtype=dtype),
+ )
+
+
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
- **eval_kwargs):
+ construct_result=_construct_result, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
@@ -692,8 +719,14 @@ def wrapper(left, right, name=name, na_op=na_op):
lvalues = lvalues.values
result = wrap_results(safe_na_op(lvalues, rvalues))
- return left._constructor(result, index=left.index,
- name=name, dtype=dtype)
+ return construct_result(
+ left,
+ result,
+ index=left.index,
+ name=name,
+ dtype=dtype,
+ )
+
return wrapper
@@ -933,6 +966,10 @@ def wrapper(self, other):
'desc': 'Integer division',
'reversed': False,
'reverse': 'rfloordiv'},
+ 'divmod': {'op': 'divmod',
+ 'desc': 'Integer division and modulo',
+ 'reversed': False,
+ 'reverse': None},
'eq': {'op': '==',
'desc': 'Equal to',
@@ -1033,7 +1070,8 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
series_special_funcs = dict(arith_method=_arith_method_SERIES,
comp_method=_comp_method_SERIES,
- bool_method=_bool_method_SERIES)
+ bool_method=_bool_method_SERIES,
+ have_divmod=True)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index d4ca18a6713b5..f430305f5cb91 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -3426,7 +3426,7 @@ def _validate_for_numeric_binop(self, other, op, opstr):
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
- def _make_evaluate_binop(op, opstr, reversed=False):
+ def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
from pandas.tseries.offsets import DateOffset
@@ -3448,7 +3448,7 @@ def _evaluate_numeric_binop(self, other):
attrs = self._maybe_update_attributes(attrs)
with np.errstate(all='ignore'):
result = op(values, other)
- return Index(result, **attrs)
+ return constructor(result, **attrs)
return _evaluate_numeric_binop
@@ -3478,6 +3478,15 @@ def _evaluate_numeric_binop(self, other):
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
+ cls.__divmod__ = _make_evaluate_binop(
+ divmod,
+ '__divmod__',
+ constructor=lambda result, **attrs: (
+ Index(result[0], **attrs),
+ Index(result[1], **attrs),
+ ),
+ )
+
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index d3a89b301ae46..51d8c95f9d783 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -73,6 +73,30 @@ def test_numeric_compat(self):
self.assertRaises(ValueError, lambda: idx * idx[0:3])
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
+ result = divmod(idx, 2)
+ with np.errstate(all='ignore'):
+ div, mod = divmod(idx.values, 2)
+ expected = Index(div), Index(mod)
+ for r, e in zip(result, expected):
+ tm.assert_index_equal(r, e)
+
+ result = divmod(idx, np.full_like(idx.values, 2))
+ with np.errstate(all='ignore'):
+ div, mod = divmod(idx.values, np.full_like(idx.values, 2))
+ expected = Index(div), Index(mod)
+ for r, e in zip(result, expected):
+ tm.assert_index_equal(r, e)
+
+ result = divmod(idx, Series(np.full_like(idx.values, 2)))
+ with np.errstate(all='ignore'):
+ div, mod = divmod(
+ idx.values,
+ np.full_like(idx.values, 2),
+ )
+ expected = Index(div), Index(mod)
+ for r, e in zip(result, expected):
+ tm.assert_index_equal(r, e)
+
def test_explicit_conversions(self):
# GH 8608
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 197311868b768..24c26276ea24d 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1,6 +1,7 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+from collections import Iterable
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
@@ -19,7 +20,7 @@
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
- assert_frame_equal)
+ assert_frame_equal, assert_index_equal)
import pandas.util.testing as tm
from .common import TestData
@@ -185,6 +186,34 @@ def check_comparators(series, other, check_dtype=True):
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
+ def test_divmod(self):
+ def check(series, other):
+ results = divmod(series, other)
+ if isinstance(other, Iterable) and len(series) != len(other):
+ # if the lengths don't match, this is the test where we use
+ # `self.ts[::2]`. Pad every other value in `other_np` with nan.
+ other_np = []
+ for n in other:
+ other_np.append(n)
+ other_np.append(np.nan)
+ else:
+ other_np = other
+ other_np = np.asarray(other_np)
+ with np.errstate(all='ignore'):
+ expecteds = divmod(series.values, np.asarray(other_np))
+
+ for result, expected in zip(results, expecteds):
+ # check the values, name, and index separatly
+ assert_almost_equal(np.asarray(result), expected)
+
+ self.assertEqual(result.name, series.name)
+ assert_index_equal(result.index, series.index)
+
+ check(self.ts, self.ts * 2)
+ check(self.ts, self.ts * 0)
+ check(self.ts, self.ts[::2])
+ check(self.ts, 5)
+
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
| - [x] closes #8174
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
edit: in the issue it said this is uncommon; however, I recently ran into an issue where I was doing arithmetic with fiscal quarters and using divmod seemed more natural.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14208 | 2016-09-12T18:58:01Z | 2016-09-19T21:07:02Z | 2016-09-19T21:07:02Z | 2016-09-19T21:09:29Z |
In gbq.to_gbq allow the DataFrame column order to differ from schema | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 96ec624f4fd3c..d436fa52918d3 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4579,8 +4579,7 @@ a ``TableCreationError`` if the destination table already exists.
If the ``if_exists`` argument is set to ``'append'``, the destination dataframe will
be written to the table using the defined table schema and column types. The
- dataframe must match the destination table in column order, structure, and
- data types.
+ dataframe must match the destination table in structure and data types.
If the ``if_exists`` argument is set to ``'replace'``, and the existing table has a
different schema, a delay of 2 minutes will be forced to ensure that the new schema
has propagated in the Google environment. See
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index f3a6736ff9920..3fcc848f4c225 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -397,6 +397,7 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci
Google BigQuery Enhancements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :func:`pandas.io.gbq.read_gbq` method has gained the ``dialect`` argument to allow users to specify whether to use BigQuery's legacy SQL or BigQuery's standard SQL. See the :ref:`docs <io.bigquery_reader>` for more details (:issue:`13615`).
+The :func:`pandas.io.gbq.to_gbq` method now allows the DataFrame column order to differ from the destination table schema (:issue:`11359`).
.. _whatsnew_0190.errstate:
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 8f23e82daf2e3..d6f8660f20ef6 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -547,12 +547,17 @@ def verify_schema(self, dataset_id, table_id, schema):
from apiclient.errors import HttpError
try:
- return (self.service.tables().get(
+ remote_schema = self.service.tables().get(
projectId=self.project_id,
datasetId=dataset_id,
- tableId=table_id
- ).execute()['schema']) == schema
+ tableId=table_id).execute()['schema']
+ fields_remote = set([json.dumps(field_remote)
+ for field_remote in remote_schema['fields']])
+ fields_local = set(json.dumps(field_local)
+ for field_local in schema['fields'])
+
+ return fields_remote == fields_local
except HttpError as ex:
self.process_http_error(ex)
@@ -819,10 +824,9 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
dataset_id, table_id, table_schema)
elif if_exists == 'append':
if not connector.verify_schema(dataset_id, table_id, table_schema):
- raise InvalidSchema("Please verify that the column order, "
- "structure and data types in the "
- "DataFrame match the schema of the "
- "destination table.")
+ raise InvalidSchema("Please verify that the structure and "
+ "data types in the DataFrame match the "
+ "schema of the destination table.")
else:
table.create(table_id, table_schema)
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index 921fd824d6ffd..0ea4b5204e150 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -743,6 +743,8 @@ def setUp(self):
private_key=_get_private_key_path())
self.table = gbq._Table(_get_project_id(), DATASET_ID + "1",
private_key=_get_private_key_path())
+ self.sut = gbq.GbqConnector(_get_project_id(),
+ private_key=_get_private_key_path())
@classmethod
def tearDownClass(cls):
@@ -906,6 +908,69 @@ def test_list_table(self):
'Expected table list to contain table {0}'
.format(destination_table))
+ def test_verify_schema_allows_flexible_column_order(self):
+ destination_table = TABLE_ID + "10"
+ test_schema_1 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'B', 'type': 'FLOAT'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+ test_schema_2 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'B', 'type': 'FLOAT'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+
+ self.table.create(destination_table, test_schema_1)
+ self.assertTrue(self.sut.verify_schema(
+ DATASET_ID + "1", destination_table, test_schema_2),
+ 'Expected schema to match')
+
+ def test_verify_schema_fails_different_data_type(self):
+ destination_table = TABLE_ID + "11"
+ test_schema_1 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'B', 'type': 'FLOAT'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+ test_schema_2 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'B', 'type': 'STRING'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+
+ self.table.create(destination_table, test_schema_1)
+ self.assertFalse(self.sut.verify_schema(
+ DATASET_ID + "1", destination_table, test_schema_2),
+ 'Expected different schema')
+
+ def test_verify_schema_fails_different_structure(self):
+ destination_table = TABLE_ID + "12"
+ test_schema_1 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'B', 'type': 'FLOAT'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+ test_schema_2 = {'fields': [{'name': 'A', 'type': 'FLOAT'},
+ {'name': 'B2', 'type': 'FLOAT'},
+ {'name': 'C', 'type': 'STRING'},
+ {'name': 'D', 'type': 'TIMESTAMP'}]}
+
+ self.table.create(destination_table, test_schema_1)
+ self.assertFalse(self.sut.verify_schema(
+ DATASET_ID + "1", destination_table, test_schema_2),
+ 'Expected different schema')
+
+ def test_upload_data_flexible_column_order(self):
+ destination_table = DESTINATION_TABLE + "13"
+
+ test_size = 10
+ df = make_mixed_dataframe_v2(test_size)
+
+ # Initialize table with sample data
+ gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000,
+ private_key=_get_private_key_path())
+
+ df_columns_reversed = df[df.columns[::-1]]
+
+ gbq.to_gbq(df_columns_reversed, destination_table, _get_project_id(),
+ if_exists='append', private_key=_get_private_key_path())
+
def test_list_dataset(self):
dataset_id = DATASET_ID + "1"
self.assertTrue(dataset_id in self.dataset.datasets(),
| - [x] closes #11359
- [x] tests added / passed. [Link to Travis-CI build with my gbq credentials](https://travis-ci.org/parthea/pandas/builds/159134242)
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14202 | 2016-09-11T16:31:45Z | 2016-09-13T10:24:03Z | null | 2016-09-13T10:24:04Z |
DOC: add source links to api docs | diff --git a/doc/source/conf.py b/doc/source/conf.py
index a1b71f0279c7a..fd3a2493a53e8 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -13,6 +13,7 @@
import sys
import os
import re
+import inspect
from pandas.compat import u, PY3
# If extensions (or modules to document with autodoc) are in another directory,
@@ -47,6 +48,7 @@
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
+ 'sphinx.ext.linkcode',
]
@@ -424,6 +426,55 @@ def get_items(self, names):
return items
+# based on numpy doc/source/conf.py
+def linkcode_resolve(domain, info):
+ """
+ Determine the URL corresponding to Python object
+ """
+ if domain != 'py':
+ return None
+
+ modname = info['module']
+ fullname = info['fullname']
+
+ submod = sys.modules.get(modname)
+ if submod is None:
+ return None
+
+ obj = submod
+ for part in fullname.split('.'):
+ try:
+ obj = getattr(obj, part)
+ except:
+ return None
+
+ try:
+ fn = inspect.getsourcefile(obj)
+ except:
+ fn = None
+ if not fn:
+ return None
+
+ try:
+ source, lineno = inspect.getsourcelines(obj)
+ except:
+ lineno = None
+
+ if lineno:
+ linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
+ else:
+ linespec = ""
+
+ fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
+
+ if '+' in pandas.__version__:
+ return "http://github.com/pydata/pandas/blob/master/pandas/%s%s" % (
+ fn, linespec)
+ else:
+ return "http://github.com/pydata/pandas/blob/v%s/pandas/%s%s" % (
+ pandas.__version__, fn, linespec)
+
+
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
| - [x] closes #14178
- [x] tests not needed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew not needed
cc @jorisvandenbossche - like you mentioned, this doesn't work for everything (properties, accessors, some generated methods)
| https://api.github.com/repos/pandas-dev/pandas/pulls/14200 | 2016-09-10T14:42:28Z | 2016-09-12T07:57:26Z | 2016-09-12T07:57:26Z | 2016-09-12T11:32:40Z |
BUG: union_categorical with Series and cat idx | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 59ddfe602c033..b1795cb37200c 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -695,6 +695,40 @@ The below raises ``TypeError`` because the categories are ordered and not identi
Out[3]:
TypeError: to union ordered Categoricals, all categories must be the same
+``union_categoricals`` also works with a ``CategoricalIndex``, or ``Series`` containing
+categorical data, but note that the resulting array will always be a plain ``Categorical``
+
+.. ipython:: python
+
+ a = pd.Series(["b", "c"], dtype='category')
+ b = pd.Series(["a", "b"], dtype='category')
+ union_categoricals([a, b])
+
+.. note::
+
+ ``union_categoricals`` may recode the integer codes for categories
+ when combining categoricals. This is likely what you want,
+ but if you are relying on the exact numbering of the categories, be
+ aware.
+
+ .. ipython:: python
+
+ c1 = pd.Categorical(["b", "c"])
+ c2 = pd.Categorical(["a", "b"])
+
+ c1
+ # "b" is coded to 0
+ c1.codes
+
+ c2
+ # "b" is coded to 1
+ c2.codes
+
+ c = union_categoricals([c1, c2])
+ c
+ # "b" is coded to 0 throughout, same as c1, different from c2
+ c.codes
+
.. _categorical.concat:
Concatenation
diff --git a/pandas/tools/tests/test_concat.py b/pandas/tools/tests/test_concat.py
index 8e20cfa83c405..f541de316661a 100644
--- a/pandas/tools/tests/test_concat.py
+++ b/pandas/tools/tests/test_concat.py
@@ -9,7 +9,7 @@
from pandas import (DataFrame, concat,
read_csv, isnull, Series, date_range,
Index, Panel, MultiIndex, Timestamp,
- DatetimeIndex, Categorical)
+ DatetimeIndex, Categorical, CategoricalIndex)
from pandas.types.concat import union_categoricals
from pandas.util import testing as tm
from pandas.util.testing import (assert_frame_equal,
@@ -1539,10 +1539,12 @@ def test_union_categorical(self):
]
for a, b, combined in data:
- result = union_categoricals([Categorical(a), Categorical(b)])
- expected = Categorical(combined)
- tm.assert_categorical_equal(result, expected,
- check_category_order=True)
+ for box in [Categorical, CategoricalIndex, Series]:
+ result = union_categoricals([box(Categorical(a)),
+ box(Categorical(b))])
+ expected = Categorical(combined)
+ tm.assert_categorical_equal(result, expected,
+ check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
@@ -1771,6 +1773,25 @@ def test_union_categoricals_sort_false(self):
categories=['b', 'a', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
+ def test_union_categorical_unwrap(self):
+ # GH 14173
+ c1 = Categorical(['a', 'b'])
+ c2 = pd.Series(['b', 'c'], dtype='category')
+ result = union_categoricals([c1, c2])
+ expected = Categorical(['a', 'b', 'b', 'c'])
+ tm.assert_categorical_equal(result, expected)
+
+ c2 = CategoricalIndex(c2)
+ result = union_categoricals([c1, c2])
+ tm.assert_categorical_equal(result, expected)
+
+ c1 = Series(c1)
+ result = union_categoricals([c1, c2])
+ tm.assert_categorical_equal(result, expected)
+
+ with tm.assertRaises(TypeError):
+ union_categoricals([c1, ['a', 'b', 'c']])
+
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
diff --git a/pandas/types/concat.py b/pandas/types/concat.py
index 8bdd71348a537..827eb160c452d 100644
--- a/pandas/types/concat.py
+++ b/pandas/types/concat.py
@@ -210,14 +210,15 @@ def _concat_asobject(to_concat):
def union_categoricals(to_union, sort_categories=False):
"""
- Combine list-like of Categoricals, unioning categories. All
+ Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
- to_union : list-like of Categoricals
+ to_union : list-like of Categorical, CategoricalIndex,
+ or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
@@ -236,11 +237,20 @@ def union_categoricals(to_union, sort_categories=False):
ValueError
Emmpty list of categoricals passed
"""
- from pandas import Index, Categorical
+ from pandas import Index, Categorical, CategoricalIndex, Series
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
+ def _maybe_unwrap(x):
+ if isinstance(x, (CategoricalIndex, Series)):
+ return x.values
+ elif isinstance(x, Categorical):
+ return x
+ else:
+ raise TypeError("all components to combine must be Categorical")
+
+ to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype)
| - [x] closes #14173
- [ ] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry (not needed)
| https://api.github.com/repos/pandas-dev/pandas/pulls/14199 | 2016-09-10T13:31:36Z | 2016-09-10T14:30:49Z | null | 2016-09-28T10:52:57Z |
Fix: F999 dictionary key '2000q4' repeated with different values | diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 6cee45df2a63c..21cfe84f153fa 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -687,7 +687,6 @@ def test_parsers(self):
'00-Q4': datetime.datetime(2000, 10, 1),
'4Q-2000': datetime.datetime(2000, 10, 1),
'4Q-00': datetime.datetime(2000, 10, 1),
- '2000q4': datetime.datetime(2000, 10, 1),
'00q4': datetime.datetime(2000, 10, 1),
'2005': datetime.datetime(2005, 1, 1),
'2005-11': datetime.datetime(2005, 11, 1),
| Removal of duplicate line to fix lint error
```
pandas/tseries/tests/test_tslib.py:685:18: F999 dictionary key '2000q4' repeated with different values
pandas/tseries/tests/test_tslib.py:690:18: F999 dictionary key '2000q4' repeated with different values
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14198 | 2016-09-10T13:15:18Z | 2016-09-10T19:00:13Z | 2016-09-10T19:00:13Z | 2016-09-10T19:00:13Z |
Fix minor spacing issue | diff --git a/pandas/formats/format.py b/pandas/formats/format.py
index 4740dd25c419d..baa24f95a768c 100644
--- a/pandas/formats/format.py
+++ b/pandas/formats/format.py
@@ -604,8 +604,6 @@ def to_string(self):
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
- if not self.index:
- text = text.replace('\n ', '\n').strip()
self.buf.writelines(text)
if self.should_show_dimensions:
@@ -735,7 +733,7 @@ def space_format(x, y):
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
- str_columns = [[' ' + x if not self._get_formatter(i) and
+ str_columns = [[x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
@@ -1988,11 +1986,11 @@ def _format(x):
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
- fmt_values.append(' %s' % _format(v))
+ fmt_values.append('%s' % _format(v))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
- fmt_values.append(' %s' % _format(v))
+ fmt_values.append('%s' % _format(v))
return fmt_values
@@ -2134,7 +2132,7 @@ def _format_strings(self):
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
- formatter = self.formatter or (lambda x: '% d' % x)
+ formatter = self.formatter or (lambda x: '%d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
| Continuation of #13350
- [x] closes #13032
- [ ] tests added / passed
- [x] passes `pep8radius master --diff`
- [x] whatsnew entry - not necessary
See https://github.com/pydata/pandas/pull/13350 for more details, mucked up my pull request. Tests will need a lot of work.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14196 | 2016-09-10T02:34:40Z | 2016-10-27T14:54:00Z | null | 2016-10-27T14:54:07Z |
BUG: Categorical constructor not idempotent with ext dtype | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index d672b9b897fda..7e40d04c6b889 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1427,7 +1427,7 @@ Bug Fixes
- Bug in ``SeriesGroupBy.transform`` with datetime values and missing groups (:issue:`13191`)
- Bug where empty ``Series`` were incorrectly coerced in datetime-like numeric operations (:issue:`13844`)
-
+- Bug in ``Categorical`` constructor when passed a ``Categorical`` containing datetimes with timezones (:issue:`14190`)
- Bug in ``Series.str.extractall()`` with ``str`` index raises ``ValueError`` (:issue:`13156`)
- Bug in ``Series.str.extractall()`` with single group and quantifier (:issue:`13382`)
- Bug in ``DatetimeIndex`` and ``Period`` subtraction raises ``ValueError`` or ``AttributeError`` rather than ``TypeError`` (:issue:`13078`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 48054c5bd34fa..0a13c8936eeec 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -259,7 +259,7 @@ def __init__(self, values, categories=None, ordered=False,
ordered = values.ordered
if categories is None:
categories = values.categories
- values = values.__array__()
+ values = values.get_values()
elif isinstance(values, (ABCIndexClass, ABCSeries)):
pass
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index c4ddd2c0981d9..a494a0d53b123 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -362,6 +362,22 @@ def test_constructor_from_index_series_period(self):
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
+ def test_constructor_invariant(self):
+ # GH 14190
+ vals = [
+ np.array([1., 1.2, 1.8, np.nan]),
+ np.array([1, 2, 3], dtype='int64'),
+ ['a', 'b', 'c', np.nan],
+ [pd.Period('2014-01'), pd.Period('2014-02'), pd.NaT],
+ [pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-02'), pd.NaT],
+ [pd.Timestamp('2014-01-01', tz='US/Eastern'),
+ pd.Timestamp('2014-01-02', tz='US/Eastern'), pd.NaT],
+ ]
+ for val in vals:
+ c = Categorical(val)
+ c2 = Categorical(c)
+ tm.assert_categorical_equal(c, c2)
+
def test_from_codes(self):
# too few categories
| - [x] closes #14190
- [ ] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14191 | 2016-09-09T00:07:11Z | 2016-09-09T19:33:37Z | null | 2016-09-10T12:37:58Z |
DOC: minor typo in 0.19.0 whatsnew file | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 3f3ebcb6e5830..7e8e1b15654a0 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -296,8 +296,7 @@ Categorical Concatenation
b = pd.Categorical(["a", "b"])
union_categoricals([a, b])
-- ``concat`` and ``append`` now can concat ``category`` dtypes wifht different
-``categories`` as ``object`` dtype (:issue:`13524`)
+- ``concat`` and ``append`` now can concat ``category`` dtypes with different ``categories`` as ``object`` dtype (:issue:`13524`)
**Previous behavior**:
| https://api.github.com/repos/pandas-dev/pandas/pulls/14185 | 2016-09-08T14:36:38Z | 2016-09-08T15:02:13Z | 2016-09-08T15:02:13Z | 2016-09-08T15:02:19Z | |
ENH: Added multicolumn/multirow support for latex | diff --git a/doc/source/options.rst b/doc/source/options.rst
index 77cac6d495d13..10a13ed36df8d 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -273,151 +273,156 @@ Options are 'right', and 'left'.
Available Options
-----------------
-========================== ============ ==================================
-Option Default Function
-========================== ============ ==================================
-display.chop_threshold None If set to a float value, all float
- values smaller then the given
- threshold will be displayed as
- exactly 0 by repr and friends.
-display.colheader_justify right Controls the justification of
- column headers. used by DataFrameFormatter.
-display.column_space 12 No description available.
-display.date_dayfirst False When True, prints and parses dates
- with the day first, eg 20/01/2005
-display.date_yearfirst False When True, prints and parses dates
- with the year first, eg 2005/01/20
-display.encoding UTF-8 Defaults to the detected encoding
- of the console. Specifies the encoding
- to be used for strings returned by
- to_string, these are generally strings
- meant to be displayed on the console.
-display.expand_frame_repr True Whether to print out the full DataFrame
- repr for wide DataFrames across
- multiple lines, `max_columns` is
- still respected, but the output will
- wrap-around across multiple "pages"
- if its width exceeds `display.width`.
-display.float_format None The callable should accept a floating
- point number and return a string with
- the desired format of the number.
- This is used in some places like
- SeriesFormatter.
- See core.format.EngFormatter for an example.
-display.height 60 Deprecated. Use `display.max_rows` instead.
-display.large_repr truncate For DataFrames exceeding max_rows/max_cols,
- the repr (and HTML repr) can show
- a truncated table (the default from 0.13),
- or switch to the view from df.info()
- (the behaviour in earlier versions of pandas).
- allowable settings, ['truncate', 'info']
-display.latex.repr False Whether to produce a latex DataFrame
- representation for jupyter frontends
- that support it.
-display.latex.escape True Escapes special caracters in Dataframes, when
- using the to_latex method.
-display.latex.longtable False Specifies if the to_latex method of a Dataframe
- uses the longtable format.
-display.line_width 80 Deprecated. Use `display.width` instead.
-display.max_columns 20 max_rows and max_columns are used
- in __repr__() methods to decide if
- to_string() or info() is used to
- render an object to a string. In
- case python/IPython is running in
- a terminal this can be set to 0 and
- pandas will correctly auto-detect
- the width the terminal and swap to
- a smaller format in case all columns
- would not fit vertically. The IPython
- notebook, IPython qtconsole, or IDLE
- do not run in a terminal and hence
- it is not possible to do correct
- auto-detection. 'None' value means
- unlimited.
-display.max_colwidth 50 The maximum width in characters of
- a column in the repr of a pandas
- data structure. When the column overflows,
- a "..." placeholder is embedded in
- the output.
-display.max_info_columns 100 max_info_columns is used in DataFrame.info
- method to decide if per column information
- will be printed.
-display.max_info_rows 1690785 df.info() will usually show null-counts
- for each column. For large frames
- this can be quite slow. max_info_rows
- and max_info_cols limit this null
- check only to frames with smaller
- dimensions then specified.
-display.max_rows 60 This sets the maximum number of rows
- pandas should output when printing
- out various output. For example,
- this value determines whether the
- repr() for a dataframe prints out
- fully or just a summary repr.
- 'None' value means unlimited.
-display.max_seq_items 100 when pretty-printing a long sequence,
- no more then `max_seq_items` will
- be printed. If items are omitted,
- they will be denoted by the addition
- of "..." to the resulting string.
- If set to None, the number of items
- to be printed is unlimited.
-display.memory_usage True This specifies if the memory usage of
- a DataFrame should be displayed when the
- df.info() method is invoked.
-display.multi_sparse True "Sparsify" MultiIndex display (don't
- display repeated elements in outer
- levels within groups)
-display.notebook_repr_html True When True, IPython notebook will
- use html representation for
- pandas objects (if it is available).
-display.pprint_nest_depth 3 Controls the number of nested levels
- to process when pretty-printing
-display.precision 6 Floating point output precision in
- terms of number of places after the
- decimal, for regular formatting as well
- as scientific notation. Similar to
- numpy's ``precision`` print option
-display.show_dimensions truncate Whether to print out dimensions
- at the end of DataFrame repr.
- If 'truncate' is specified, only
- print out the dimensions if the
- frame is truncated (e.g. not display
- all rows and/or columns)
-display.width 80 Width of the display in characters.
- In case python/IPython is running in
- a terminal this can be set to None
- and pandas will correctly auto-detect
- the width. Note that the IPython notebook,
- IPython qtconsole, or IDLE do not run in a
- terminal and hence it is not possible
- to correctly detect the width.
-html.border 1 A ``border=value`` attribute is
- inserted in the ``<table>`` tag
- for the DataFrame HTML repr.
-io.excel.xls.writer xlwt The default Excel writer engine for
- 'xls' files.
-io.excel.xlsm.writer openpyxl The default Excel writer engine for
- 'xlsm' files. Available options:
- 'openpyxl' (the default).
-io.excel.xlsx.writer openpyxl The default Excel writer engine for
- 'xlsx' files.
-io.hdf.default_format None default format writing format, if
- None, then put will default to
- 'fixed' and append will default to
- 'table'
-io.hdf.dropna_table True drop ALL nan rows when appending
- to a table
-mode.chained_assignment warn Raise an exception, warn, or no
- action if trying to use chained
- assignment, The default is warn
-mode.sim_interactive False Whether to simulate interactive mode
- for purposes of testing
-mode.use_inf_as_null False True means treat None, NaN, -INF,
- INF as null (old way), False means
- None and NaN are null, but INF, -INF
- are not null (new way).
-========================== ============ ==================================
+=================================== ============ ==================================
+Option Default Function
+=================================== ============ ==================================
+display.chop_threshold None If set to a float value, all float
+ values smaller then the given
+ threshold will be displayed as
+ exactly 0 by repr and friends.
+display.colheader_justify right Controls the justification of
+ column headers. used by DataFrameFormatter.
+display.column_space 12 No description available.
+display.date_dayfirst False When True, prints and parses dates
+ with the day first, eg 20/01/2005
+display.date_yearfirst False When True, prints and parses dates
+ with the year first, eg 2005/01/20
+display.encoding UTF-8 Defaults to the detected encoding
+ of the console. Specifies the encoding
+ to be used for strings returned by
+ to_string, these are generally strings
+ meant to be displayed on the console.
+display.expand_frame_repr True Whether to print out the full DataFrame
+ repr for wide DataFrames across
+ multiple lines, `max_columns` is
+ still respected, but the output will
+ wrap-around across multiple "pages"
+ if its width exceeds `display.width`.
+display.float_format None The callable should accept a floating
+ point number and return a string with
+ the desired format of the number.
+ This is used in some places like
+ SeriesFormatter.
+ See core.format.EngFormatter for an example.
+display.height 60 Deprecated. Use `display.max_rows` instead.
+display.large_repr truncate For DataFrames exceeding max_rows/max_cols,
+ the repr (and HTML repr) can show
+ a truncated table (the default from 0.13),
+ or switch to the view from df.info()
+ (the behaviour in earlier versions of pandas).
+ allowable settings, ['truncate', 'info']
+display.latex.repr False Whether to produce a latex DataFrame
+ representation for jupyter frontends
+ that support it.
+display.latex.escape True Escapes special caracters in Dataframes, when
+ using the to_latex method.
+display.latex.longtable False Specifies if the to_latex method of a Dataframe
+ uses the longtable format.
+display.latex.multicolumn True Combines columns when using a MultiIndex
+display.latex.multicolumn_format 'l' Alignment of multicolumn labels
+display.latex.multirow False Combines rows when using a MultiIndex.
+ Centered instead of top-aligned,
+ separated by clines.
+display.line_width 80 Deprecated. Use `display.width` instead.
+display.max_columns 20 max_rows and max_columns are used
+ in __repr__() methods to decide if
+ to_string() or info() is used to
+ render an object to a string. In
+ case python/IPython is running in
+ a terminal this can be set to 0 and
+ pandas will correctly auto-detect
+ the width the terminal and swap to
+ a smaller format in case all columns
+ would not fit vertically. The IPython
+ notebook, IPython qtconsole, or IDLE
+ do not run in a terminal and hence
+ it is not possible to do correct
+ auto-detection. 'None' value means
+ unlimited.
+display.max_colwidth 50 The maximum width in characters of
+ a column in the repr of a pandas
+ data structure. When the column overflows,
+ a "..." placeholder is embedded in
+ the output.
+display.max_info_columns 100 max_info_columns is used in DataFrame.info
+ method to decide if per column information
+ will be printed.
+display.max_info_rows 1690785 df.info() will usually show null-counts
+ for each column. For large frames
+ this can be quite slow. max_info_rows
+ and max_info_cols limit this null
+ check only to frames with smaller
+ dimensions then specified.
+display.max_rows 60 This sets the maximum number of rows
+ pandas should output when printing
+ out various output. For example,
+ this value determines whether the
+ repr() for a dataframe prints out
+ fully or just a summary repr.
+ 'None' value means unlimited.
+display.max_seq_items 100 when pretty-printing a long sequence,
+ no more then `max_seq_items` will
+ be printed. If items are omitted,
+ they will be denoted by the addition
+ of "..." to the resulting string.
+ If set to None, the number of items
+ to be printed is unlimited.
+display.memory_usage True This specifies if the memory usage of
+ a DataFrame should be displayed when the
+ df.info() method is invoked.
+display.multi_sparse True "Sparsify" MultiIndex display (don't
+ display repeated elements in outer
+ levels within groups)
+display.notebook_repr_html True When True, IPython notebook will
+ use html representation for
+ pandas objects (if it is available).
+display.pprint_nest_depth 3 Controls the number of nested levels
+ to process when pretty-printing
+display.precision 6 Floating point output precision in
+ terms of number of places after the
+ decimal, for regular formatting as well
+ as scientific notation. Similar to
+ numpy's ``precision`` print option
+display.show_dimensions truncate Whether to print out dimensions
+ at the end of DataFrame repr.
+ If 'truncate' is specified, only
+ print out the dimensions if the
+ frame is truncated (e.g. not display
+ all rows and/or columns)
+display.width 80 Width of the display in characters.
+ In case python/IPython is running in
+ a terminal this can be set to None
+ and pandas will correctly auto-detect
+ the width. Note that the IPython notebook,
+ IPython qtconsole, or IDLE do not run in a
+ terminal and hence it is not possible
+ to correctly detect the width.
+html.border 1 A ``border=value`` attribute is
+ inserted in the ``<table>`` tag
+ for the DataFrame HTML repr.
+io.excel.xls.writer xlwt The default Excel writer engine for
+ 'xls' files.
+io.excel.xlsm.writer openpyxl The default Excel writer engine for
+ 'xlsm' files. Available options:
+ 'openpyxl' (the default).
+io.excel.xlsx.writer openpyxl The default Excel writer engine for
+ 'xlsx' files.
+io.hdf.default_format None default format writing format, if
+ None, then put will default to
+ 'fixed' and append will default to
+ 'table'
+io.hdf.dropna_table True drop ALL nan rows when appending
+ to a table
+mode.chained_assignment warn Raise an exception, warn, or no
+ action if trying to use chained
+ assignment, The default is warn
+mode.sim_interactive False Whether to simulate interactive mode
+ for purposes of testing
+mode.use_inf_as_null False True means treat None, NaN, -INF,
+ INF as null (old way), False means
+ None and NaN are null, but INF, -INF
+ are not null (new way).
+=================================== ============ ==================================
.. _basics.console_output:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index dca4f890e496b..0991f3873b06f 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -182,6 +182,7 @@ Other enhancements
- ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`)
- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
- ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`)
+- The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements
- ``pd.merge_asof()`` gained the option ``direction='backward'|'forward'|'nearest'`` (:issue:`14887`)
- ``Series/DataFrame.asfreq()`` have gained a ``fill_value`` parameter, to fill missing values (:issue:`3715`).
- ``Series/DataFrame.resample.asfreq`` have gained a ``fill_value`` parameter, to fill missing values during resampling (:issue:`3715`).
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index d3db633f3aa04..89616890e1de1 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -239,14 +239,35 @@
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
- method. Valid values: False,True
+ Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
- method. Valid values: False,True
+ Valid values: False,True
+"""
+
+pc_latex_multicolumn = """
+: bool
+ This specifies if the to_latex method of a Dataframe uses multicolumns
+ to pretty-print MultiIndex columns.
+ Valid values: False,True
+"""
+
+pc_latex_multicolumn_format = """
+: string
+ This specifies the format for multicolumn headers.
+ Can be surrounded with '|'.
+ Valid values: 'l', 'c', 'r', 'p{<width>}'
+"""
+
+pc_latex_multirow = """
+: bool
+ This specifies if the to_latex method of a Dataframe uses multirows
+ to pretty-print MultiIndex rows.
+ Valid values: False,True
"""
style_backup = dict()
@@ -339,6 +360,12 @@ def mpl_style_cb(key):
validator=is_bool)
cf.register_option('latex.longtable', False, pc_latex_longtable,
validator=is_bool)
+ cf.register_option('latex.multicolumn', True, pc_latex_multicolumn,
+ validator=is_bool)
+ cf.register_option('latex.multicolumn_format', 'l', pc_latex_multicolumn,
+ validator=is_text)
+ cf.register_option('latex.multirow', False, pc_latex_multirow,
+ validator=is_bool)
cf.deprecate_option('display.line_width',
msg=pc_line_width_deprecation_warning,
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 26a0a91094e7d..b3e43edc3eb55 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1614,10 +1614,11 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
- encoding=None, decimal='.'):
- """
+ encoding=None, decimal='.', multicolumn=None,
+ multicolumn_format=None, multirow=None):
+ r"""
Render a DataFrame to a tabular environment table. You can splice
- this into a LaTeX document. Requires \\usepackage{booktabs}.
+ this into a LaTeX document. Requires \usepackage{booktabs}.
`to_latex`-specific options:
@@ -1628,27 +1629,54 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
- default: False
+ Default: False.
Use a longtable environment instead of tabular. Requires adding
- a \\usepackage{longtable} to your LaTeX preamble.
+ a \usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
- default: True
+ Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
- Character recognized as decimal separator, e.g. ',' in Europe
+ Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
+ multicolumn : boolean, default True
+ Use \multicolumn to enhance MultiIndex columns.
+ The default will be read from the config module.
+
+ .. versionadded:: 0.20.0
+
+ multicolumn_format : str, default 'l'
+ The alignment for multicolumns, similar to `column_format`
+ The default will be read from the config module.
+
+ .. versionadded:: 0.20.0
+
+ multirow : boolean, default False
+ Use \multirow to enhance MultiIndex rows.
+ Requires adding a \usepackage{multirow} to your LaTeX preamble.
+ Will print centered labels (instead of top-aligned)
+ across the contained rows, separating groups via clines.
+ The default will be read from the pandas config module.
+
+ .. versionadded:: 0.20.0
+
"""
# Get defaults from the pandas config
if longtable is None:
longtable = get_option("display.latex.longtable")
if escape is None:
escape = get_option("display.latex.escape")
+ if multicolumn is None:
+ multicolumn = get_option("display.latex.multicolumn")
+ if multicolumn_format is None:
+ multicolumn_format = get_option("display.latex.multicolumn_format")
+ if multirow is None:
+ multirow = get_option("display.latex.multirow")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
@@ -1660,7 +1688,9 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
- encoding=encoding)
+ encoding=encoding, multicolumn=multicolumn,
+ multicolumn_format=multicolumn_format,
+ multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
diff --git a/pandas/formats/format.py b/pandas/formats/format.py
index 4c081770e0125..9dde3b0001c31 100644
--- a/pandas/formats/format.py
+++ b/pandas/formats/format.py
@@ -650,13 +650,17 @@ def _join_multiline(self, *strcols):
st = ed
return '\n\n'.join(str_lst)
- def to_latex(self, column_format=None, longtable=False, encoding=None):
+ def to_latex(self, column_format=None, longtable=False, encoding=None,
+ multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
latex_renderer = LatexFormatter(self, column_format=column_format,
- longtable=longtable)
+ longtable=longtable,
+ multicolumn=multicolumn,
+ multicolumn_format=multicolumn_format,
+ multirow=multirow)
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
@@ -824,11 +828,15 @@ class LatexFormatter(TableFormatter):
HTMLFormatter
"""
- def __init__(self, formatter, column_format=None, longtable=False):
+ def __init__(self, formatter, column_format=None, longtable=False,
+ multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.column_format = column_format
self.longtable = longtable
+ self.multicolumn = multicolumn
+ self.multicolumn_format = multicolumn_format
+ self.multirow = multirow
def write_result(self, buf):
"""
@@ -850,14 +858,21 @@ def get_col_type(dtype):
else:
return 'l'
+ # reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
+ cname = any(self.frame.columns.names)
+ lastcol = self.frame.index.nlevels - 1
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = ' ' * len(lev2[0])
- lev3 = [blank] * clevels
+ # display column names in last index-column
+ if cname and i == lastcol:
+ lev3 = [x if x else '{}' for x in self.frame.columns.names]
+ else:
+ lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
for level_idx, group in itertools.groupby(
@@ -885,10 +900,15 @@ def get_col_type(dtype):
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
- nlevels = self.frame.columns.nlevels
+ ilevels = self.frame.index.nlevels
+ clevels = self.frame.columns.nlevels
+ nlevels = clevels
if any(self.frame.index.names):
nlevels += 1
- for i, row in enumerate(zip(*strcols)):
+ strrows = list(zip(*strcols))
+ self.clinebuf = []
+
+ for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
@@ -910,8 +930,17 @@ def get_col_type(dtype):
if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
+ if i < clevels and self.fmt.header and self.multicolumn:
+ # sum up columns to multicolumns
+ crow = self._format_multicolumn(crow, ilevels)
+ if (i >= nlevels and self.fmt.index and self.multirow and
+ ilevels > 1):
+ # sum up rows to multirows
+ crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
+ if self.multirow and i < len(strrows) - 1:
+ self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
@@ -919,6 +948,80 @@ def get_col_type(dtype):
else:
buf.write('\\end{longtable}\n')
+ def _format_multicolumn(self, row, ilevels):
+ """
+ Combine columns belonging to a group to a single multicolumn entry
+ according to self.multicolumn_format
+
+ e.g.:
+ a & & & b & c &
+ will become
+ \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
+ """
+ row2 = list(row[:ilevels])
+ ncol = 1
+ coltext = ''
+
+ def append_col():
+ # write multicolumn if needed
+ if ncol > 1:
+ row2.append('\\multicolumn{{{0:d}}}{{{1:s}}}{{{2:s}}}'
+ .format(ncol, self.multicolumn_format,
+ coltext.strip()))
+ # don't modify where not needed
+ else:
+ row2.append(coltext)
+ for c in row[ilevels:]:
+ # if next col has text, write the previous
+ if c.strip():
+ if coltext:
+ append_col()
+ coltext = c
+ ncol = 1
+ # if not, add it to the previous multicolumn
+ else:
+ ncol += 1
+ # write last column name
+ if coltext:
+ append_col()
+ return row2
+
+ def _format_multirow(self, row, ilevels, i, rows):
+ """
+ Check following rows, whether row should be a multirow
+
+ e.g.: becomes:
+ a & 0 & \multirow{2}{*}{a} & 0 &
+ & 1 & & 1 &
+ b & 0 & \cline{1-2}
+ b & 0 &
+ """
+ for j in range(ilevels):
+ if row[j].strip():
+ nrow = 1
+ for r in rows[i + 1:]:
+ if not r[j].strip():
+ nrow += 1
+ else:
+ break
+ if nrow > 1:
+ # overwrite non-multirow entry
+ row[j] = '\\multirow{{{0:d}}}{{*}}{{{1:s}}}'.format(
+ nrow, row[j].strip())
+ # save when to end the current block with \cline
+ self.clinebuf.append([i + nrow - 1, j + 1])
+ return row
+
+ def _print_cline(self, buf, i, icol):
+ """
+ Print clines after multirow-blocks are finished
+ """
+ for cl in self.clinebuf:
+ if cl[0] == i:
+ buf.write('\cline{{{0:d}-{1:d}}}\n'.format(cl[1], icol))
+ # remove entries that have been written to buffer
+ self.clinebuf = [x for x in self.clinebuf if x[0] != i]
+
class HTMLFormatter(TableFormatter):
diff --git a/pandas/tests/formats/test_to_latex.py b/pandas/tests/formats/test_to_latex.py
index 89e18e1cec06e..17e1e18f03dd6 100644
--- a/pandas/tests/formats/test_to_latex.py
+++ b/pandas/tests/formats/test_to_latex.py
@@ -168,6 +168,24 @@ def test_to_latex_multiindex(self):
assert result == expected
+ # GH 14184
+ df = df.T
+ df.columns.names = ['a', 'b']
+ result = df.to_latex()
+ expected = r"""\begin{tabular}{lrrrrr}
+\toprule
+a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+b & 0 & 1 & 0 & 1 & 0 \\
+\midrule
+0 & 0 & 4 & 0 & 4 & 0 \\
+1 & 1 & 5 & 1 & 5 & 1 \\
+2 & 2 & 6 & 2 & 6 & 2 \\
+3 & 3 & 7 & 3 & 7 & 3 \\
+\bottomrule
+\end{tabular}
+"""
+ assert result == expected
+
# GH 10660
df = pd.DataFrame({'a': [0, 0, 1, 1],
'b': list('abab'),
@@ -189,16 +207,95 @@ def test_to_latex_multiindex(self):
assert result == expected
result = df.groupby('a').describe().to_latex()
- expected = ('\\begin{tabular}{lrrrrrrrr}\n\\toprule\n{} & c & '
- ' & & & & & & '
- '\\\\\n{} & count & mean & std & min & 25\\% & '
- '50\\% & 75\\% & max \\\\\na & & & '
- ' & & & & & \\\\\n\\midrule\n0 '
- '& 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 '
- '& 2.0 \\\\\n1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 '
- '& 3.5 & 3.75 & 4.0 '
- '\\\\\n\\bottomrule\n\\end{tabular}\n')
+ expected = r"""\begin{tabular}{lrrrrrrrr}
+\toprule
+{} & \multicolumn{8}{l}{c} \\
+{} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+a & & & & & & & & \\
+\midrule
+0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
+1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
+\bottomrule
+\end{tabular}
+"""
+
+ assert result == expected
+
+ def test_to_latex_multicolumnrow(self):
+ df = pd.DataFrame({
+ ('c1', 0): dict((x, x) for x in range(5)),
+ ('c1', 1): dict((x, x + 5) for x in range(5)),
+ ('c2', 0): dict((x, x) for x in range(5)),
+ ('c2', 1): dict((x, x + 5) for x in range(5)),
+ ('c3', 0): dict((x, x) for x in range(5))
+ })
+ result = df.to_latex()
+ expected = r"""\begin{tabular}{lrrrrr}
+\toprule
+{} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+{} & 0 & 1 & 0 & 1 & 0 \\
+\midrule
+0 & 0 & 5 & 0 & 5 & 0 \\
+1 & 1 & 6 & 1 & 6 & 1 \\
+2 & 2 & 7 & 2 & 7 & 2 \\
+3 & 3 & 8 & 3 & 8 & 3 \\
+4 & 4 & 9 & 4 & 9 & 4 \\
+\bottomrule
+\end{tabular}
+"""
+ assert result == expected
+ result = df.to_latex(multicolumn=False)
+ expected = r"""\begin{tabular}{lrrrrr}
+\toprule
+{} & c1 & & c2 & & c3 \\
+{} & 0 & 1 & 0 & 1 & 0 \\
+\midrule
+0 & 0 & 5 & 0 & 5 & 0 \\
+1 & 1 & 6 & 1 & 6 & 1 \\
+2 & 2 & 7 & 2 & 7 & 2 \\
+3 & 3 & 8 & 3 & 8 & 3 \\
+4 & 4 & 9 & 4 & 9 & 4 \\
+\bottomrule
+\end{tabular}
+"""
+ assert result == expected
+
+ result = df.T.to_latex(multirow=True)
+ expected = r"""\begin{tabular}{llrrrrr}
+\toprule
+ & & 0 & 1 & 2 & 3 & 4 \\
+\midrule
+\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+\cline{1-7}
+\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+\cline{1-7}
+c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+\bottomrule
+\end{tabular}
+"""
+ assert result == expected
+
+ df.index = df.T.index
+ result = df.T.to_latex(multirow=True, multicolumn=True,
+ multicolumn_format='c')
+ expected = r"""\begin{tabular}{llrrrrr}
+\toprule
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
+\midrule
+\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+\cline{1-7}
+\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+\cline{1-7}
+c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+\bottomrule
+\end{tabular}
+"""
assert result == expected
def test_to_latex_escape(self):
| - [x] closes #13508
- [X] tests added / passed
- [X] passes `git diff upstream/master | flake8 --diff`
- [X] whatsnew entry
Print names of MultiIndex columns.
Added "multicol" and "multirow" flags to to_latex
which trigger the corresponding feature.
Multirow adds clines to visually separate sections.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14184 | 2016-09-08T12:14:10Z | 2017-03-03T09:16:46Z | 2017-03-03T09:16:46Z | 2017-03-03T09:17:22Z |
BUG: fix str.contains for series containing only nan values | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index a007500322ed4..e077b126ae975 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1567,3 +1567,4 @@ Bug Fixes
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- Bug in ``str.contains()`` for series containing only nan values (:issue:`14171`)
\ No newline at end of file
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 695e917c76ba0..4aee6f72b1d53 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -81,3 +81,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index b49761367b9b5..3041b17b99b17 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -165,7 +165,8 @@ def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if na_mask:
mask = isnull(arr)
try:
- result = lib.map_infer_mask(arr, f, mask.view(np.uint8))
+ convert = not all(mask)
+ result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError):
def g(x):
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 92fa7b976eb0e..4019bbe20ea1a 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2439,6 +2439,26 @@ def test_more_contains(self):
True, False, False])
assert_series_equal(result, expected)
+ def test_contains_nan(self):
+ # PR #14171
+ s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
+
+ result = s.str.contains('foo', na=False)
+ expected = Series([False, False, False], dtype=np.bool_)
+ assert_series_equal(result, expected)
+
+ result = s.str.contains('foo', na=True)
+ expected = Series([True, True, True], dtype=np.bool_)
+ assert_series_equal(result, expected)
+
+ result = s.str.contains('foo', na="foo")
+ expected = Series(["foo", "foo", "foo"], dtype=np.object_)
+ assert_series_equal(result, expected)
+
+ result = s.str.contains('foo')
+ expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
+ assert_series_equal(result, expected)
+
def test_more_replace(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
| - [x] closes #14171
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14182 | 2016-09-08T09:54:44Z | 2016-09-09T22:29:16Z | null | 2016-09-09T22:29:29Z |
MAINT: Use __module__ in _DeprecatedModule. | diff --git a/pandas/core/api.py b/pandas/core/api.py
index c0f39e2ac4717..b5e1de2063c7e 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -31,14 +31,12 @@
# see gh-14094.
from pandas.util.depr_module import _DeprecatedModule
-_alts = ['pandas.tseries.tools', 'pandas.tseries.offsets',
- 'pandas.tseries.frequencies']
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
-datetools = _DeprecatedModule(deprmod='pandas.core.datetools', alts=_alts,
+datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
diff --git a/pandas/util/depr_module.py b/pandas/util/depr_module.py
index 7e03a000a50ec..736d2cdaab31c 100644
--- a/pandas/util/depr_module.py
+++ b/pandas/util/depr_module.py
@@ -13,18 +13,11 @@ class _DeprecatedModule(object):
Parameters
----------
deprmod : name of module to be deprecated.
- alts : alternative modules to be used to access objects or methods
- available in module.
removals : objects or methods in module that will no longer be
accessible once module is removed.
"""
- def __init__(self, deprmod, alts=None, removals=None):
+ def __init__(self, deprmod, removals=None):
self.deprmod = deprmod
-
- self.alts = alts
- if self.alts is not None:
- self.alts = frozenset(self.alts)
-
self.removals = removals
if self.removals is not None:
self.removals = frozenset(self.removals)
@@ -33,47 +26,39 @@ def __init__(self, deprmod, alts=None, removals=None):
self.self_dir = frozenset(dir(self.__class__))
def __dir__(self):
- _dir = object.__dir__(self)
-
- if self.removals is not None:
- _dir.extend(list(self.removals))
+ deprmodule = self._import_deprmod()
+ return dir(deprmodule)
- if self.alts is not None:
- for modname in self.alts:
- module = importlib.import_module(modname)
- _dir.extend(dir(module))
+ def __repr__(self):
+ deprmodule = self._import_deprmod()
+ return repr(deprmodule)
- return _dir
+ __str__ = __repr__
def __getattr__(self, name):
if name in self.self_dir:
return object.__getattribute__(self, name)
- if self.removals is not None and name in self.removals:
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', category=FutureWarning)
- module = importlib.import_module(self.deprmod)
+ deprmodule = self._import_deprmod()
+ obj = getattr(deprmodule, name)
+ if self.removals is not None and name in self.removals:
warnings.warn(
"{deprmod}.{name} is deprecated and will be removed in "
"a future version.".format(deprmod=self.deprmod, name=name),
FutureWarning, stacklevel=2)
+ else:
+ # The object is actually located in another module.
+ warnings.warn(
+ "{deprmod}.{name} is deprecated. Please use "
+ "{modname}.{name} instead.".format(
+ deprmod=self.deprmod, modname=obj.__module__, name=name),
+ FutureWarning, stacklevel=2)
- return object.__getattribute__(module, name)
-
- if self.alts is not None:
- for modname in self.alts:
- module = importlib.import_module(modname)
-
- if hasattr(module, name):
- warnings.warn(
- "{deprmod}.{name} is deprecated. Please use "
- "{modname}.{name} instead.".format(
- deprmod=self.deprmod, modname=modname, name=name),
- FutureWarning, stacklevel=2)
-
- return getattr(module, name)
+ return obj
- raise AttributeError("module '{deprmod}' has no attribute "
- "'{name}'".format(deprmod=self.deprmod,
- name=name))
+ def _import_deprmod(self):
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', category=FutureWarning)
+ deprmodule = importlib.import_module(self.deprmod)
+ return deprmodule
| Follow-up to #14105. Uses the `__module__` method to correctly determine the location of the alternative
module to use.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14181 | 2016-09-08T03:44:04Z | 2016-09-14T08:31:04Z | 2016-09-14T08:31:04Z | 2016-09-14T14:52:06Z |
DOC: clean-up 0.19.0 whatsnew file | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 9f468ae6785cb..a007500322ed4 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1,25 +1,28 @@
.. _whatsnew_0190:
-v0.19.0 (August ??, 2016)
--------------------------
+v0.19.0 (September ??, 2016)
+----------------------------
-This is a major release from 0.18.1 and includes a small number of API changes, several new features,
+This is a major release from 0.18.1 and includes number of API changes, several new features,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
-.. warning::
-
- pandas >= 0.19.0 will no longer silence numpy ufunc warnings upon import, see :ref:`here <whatsnew_0190.errstate>`.
-
Highlights include:
- :func:`merge_asof` for asof-style time-series joining, see :ref:`here <whatsnew_0190.enhancements.asof_merge>`
- ``.rolling()`` are now time-series aware, see :ref:`here <whatsnew_0190.enhancements.rolling_ts>`
- :func:`read_csv` now supports parsing ``Categorical`` data, see :ref:`here <whatsnew_0190.enhancements.read_csv_categorical>`
- A function :func:`union_categorical` has been added for combining categoricals, see :ref:`here <whatsnew_0190.enhancements.union_categoricals>`
-- pandas development api, see :ref:`here <whatsnew_0190.dev_api>`
- ``PeriodIndex`` now has its own ``period`` dtype, and changed to be more consistent with other ``Index`` classes. See :ref:`here <whatsnew_0190.api.period>`
-- Sparse data structures now gained enhanced support of ``int`` and ``bool`` dtypes, see :ref:`here <whatsnew_0190.sparse>`
+- Sparse data structures gained enhanced support of ``int`` and ``bool`` dtypes, see :ref:`here <whatsnew_0190.sparse>`
+- Comparison operations with ``Series`` no longer ignores the index, see :ref:`here <whatsnew_0190.api.series_ops>` for an overview of the API changes.
+- Introduction of a pandas development API for utility functions, see :ref:`here <whatsnew_0190.dev_api>`.
+- Deprecation of ``Panel4D`` and ``PanelND``. We recommend to represent these types of n-dimensional data with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
+- Removal of the previously deprecated modules ``pandas.io.data``, ``pandas.io.wb``, ``pandas.tools.rplot``.
+
+.. warning::
+
+ pandas >= 0.19.0 will no longer silence numpy ufunc warnings upon import, see :ref:`here <whatsnew_0190.errstate>`.
.. contents:: What's new in v0.19.0
:local:
@@ -35,7 +38,7 @@ New features
pandas development API
^^^^^^^^^^^^^^^^^^^^^^
-As part of making pandas APi more uniform and accessible in the future, we have created a standard
+As part of making pandas API more uniform and accessible in the future, we have created a standard
sub-package of pandas, ``pandas.api`` to hold public API's. We are starting by exposing type
introspection functions in ``pandas.api.types``. More sub-packages and officially sanctioned API's
will be published in future versions of pandas (:issue:`13147`, :issue:`13634`)
@@ -215,12 +218,12 @@ default of the index) in a DataFrame.
:ref:`Duplicate column names <io.dupe_names>` are now supported in :func:`read_csv` whether
they are in the file or passed in as the ``names`` parameter (:issue:`7160`, :issue:`9424`)
-.. ipython :: python
+.. ipython:: python
data = '0,1,2\n3,4,5'
names = ['a', 'b', 'a']
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -230,25 +233,25 @@ Previous Behavior:
0 2 1 2
1 5 4 5
-The first ``a`` column contains the same data as the second ``a`` column, when it should have
+The first ``a`` column contained the same data as the second ``a`` column, when it should have
contained the values ``[0, 3]``.
-New Behavior:
+**New behavior**:
-.. ipython :: python
+.. ipython:: python
- In [2]: pd.read_csv(StringIO(data), names=names)
+ pd.read_csv(StringIO(data), names=names)
.. _whatsnew_0190.enhancements.read_csv_categorical:
-:func:`read_csv` supports parsing ``Categorical`` directly
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``read_csv`` supports parsing ``Categorical`` directly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :func:`read_csv` function now supports parsing a ``Categorical`` column when
specified as a dtype (:issue:`10153`). Depending on the structure of the data,
this can result in a faster parse time and lower memory usage compared to
-converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.categorical>`
+converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.categorical>`.
.. ipython:: python
@@ -296,7 +299,7 @@ Categorical Concatenation
- ``concat`` and ``append`` now can concat ``category`` dtypes wifht different
``categories`` as ``object`` dtype (:issue:`13524`)
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -305,7 +308,7 @@ Previous Behavior:
In [3]: pd.concat([s1, s2])
ValueError: incompatible categories in categorical concat
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -407,12 +410,12 @@ After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from
.. _whatsnew_0190.get_dummies_dtypes:
-get_dummies dtypes
-^^^^^^^^^^^^^^^^^^
+``get_dummies`` now returns integer dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``pd.get_dummies`` function now returns dummy-encoded columns as small integers, rather than floats (:issue:`8725`). This should provide an improved memory footprint.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -424,22 +427,19 @@ Previous Behavior:
c float64
dtype: object
-New Behavior:
+**New behavior**:
.. ipython:: python
pd.get_dummies(['a', 'b', 'a', 'c']).dtypes
-.. _whatsnew_0190.enhancements.other:
-
-Other enhancements
-^^^^^^^^^^^^^^^^^^
+.. _whatsnew_0190.enhancements.to_numeric_downcast:
-- The ``.get_credentials()`` method of ``GbqConnector`` can now first try to fetch `the application default credentials <https://developers.google.com/identity/protocols/application-default-credentials>`__. See the :ref:`docs <io.bigquery_authentication>` for more details (:issue:`13577`).
+Downcast values to smallest possible dtype in ``to_numeric``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behavior remains to raising a ``NonExistentTimeError`` (:issue:`13057`)
-- ``pd.to_numeric()`` now accepts a ``downcast`` parameter, which will downcast the data if possible to smallest specified numerical dtype (:issue:`13352`)
+``pd.to_numeric()`` now accepts a ``downcast`` parameter, which will downcast the data if possible to smallest specified numerical dtype (:issue:`13352`)
.. ipython:: python
@@ -447,6 +447,16 @@ Other enhancements
pd.to_numeric(s, downcast='unsigned')
pd.to_numeric(s, downcast='integer')
+
+.. _whatsnew_0190.enhancements.other:
+
+Other enhancements
+^^^^^^^^^^^^^^^^^^
+
+- The ``.get_credentials()`` method of ``GbqConnector`` can now first try to fetch `the application default credentials <https://developers.google.com/identity/protocols/application-default-credentials>`__. See the :ref:`docs <io.bigquery_authentication>` for more details (:issue:`13577`).
+
+- The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behavior remains to raising a ``NonExistentTimeError`` (:issue:`13057`)
+
- ``.to_hdf/read_hdf()`` now accept path objects (e.g. ``pathlib.Path``, ``py.path.local``) for the file path (:issue:`11773`)
- ``Timestamp`` can now accept positional and keyword parameters similar to :func:`datetime.datetime` (:issue:`10758`, :issue:`11630`)
@@ -471,13 +481,10 @@ Other enhancements
df.resample('M', on='date').sum()
df.resample('M', level='d').sum()
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``decimal`` option (:issue:`12933`)
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``na_filter`` option (:issue:`13321`)
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``memory_map`` option (:issue:`13381`)
+- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the
+ ``decimal`` (:issue:`12933`), ``na_filter`` (:issue:`13321`) and the ``memory_map`` option (:issue:`13381`).
- Consistent with the Python API, ``pd.read_csv()`` will now interpret ``+inf`` as positive infinity (:issue:`13274`)
-
- The ``pd.read_html()`` has gained support for the ``na_values``, ``converters``, ``keep_default_na`` options (:issue:`13461`)
-
- ``Categorical.astype()`` now accepts an optional boolean argument ``copy``, effective when dtype is categorical (:issue:`13209`)
- ``DataFrame`` has gained the ``.asof()`` method to return the last non-NaN values according to the selected subset (:issue:`13358`)
- The ``DataFrame`` constructor will now respect key ordering if a list of ``OrderedDict`` objects are passed in (:issue:`13304`)
@@ -504,43 +511,14 @@ Other enhancements
- :meth:`~DataFrame.to_html` now has a ``border`` argument to control the value in the opening ``<table>`` tag. The default is the value of the ``html.border`` option, which defaults to 1. This also affects the notebook HTML repr, but since Jupyter's CSS includes a border-width attribute, the visual effect is the same. (:issue:`11563`).
- Raise ``ImportError`` in the sql functions when ``sqlalchemy`` is not installed and a connection string is used (:issue:`11920`).
- Compatibility with matplotlib 2.0. Older versions of pandas should also work with matplotlib 2.0 (:issue:`13333`)
-
-.. _whatsnew_0190.api:
-
-
-API changes
-~~~~~~~~~~~
-
-
-- ``Timestamp.to_pydatetime`` will issue a ``UserWarning`` when ``warn=True``, and the instance has a non-zero number of nanoseconds, previously this would print a message to stdout. (:issue:`14101`)
-- Non-convertible dates in an excel date column will be returned without conversion and the column will be ``object`` dtype, rather than raising an exception (:issue:`10001`)
-- ``Series.unique()`` with datetime and timezone now returns return array of ``Timestamp`` with timezone (:issue:`13565`)
- ``Timestamp``, ``Period``, ``DatetimeIndex``, ``PeriodIndex`` and ``.dt`` accessor have gained a ``.is_leap_year`` property to check whether the date belongs to a leap year. (:issue:`13727`)
-- ``pd.Timedelta(None)`` is now accepted and will return ``NaT``, mirroring ``pd.Timestamp`` (:issue:`13687`)
-- ``Panel.to_sparse()`` will raise a ``NotImplementedError`` exception when called (:issue:`13778`)
-- ``Index.reshape()`` will raise a ``NotImplementedError`` exception when called (:issue:`12882`)
-- ``.filter()`` enforces mutual exclusion of the keyword arguments. (:issue:`12399`)
-- ``eval``'s upcasting rules for ``float32`` types have been updated to be more consistent with NumPy's rules. New behavior will not upcast to ``float64`` if you multiply a pandas ``float32`` object by a scalar float64. (:issue:`12388`)
-- An ``UnsupportedFunctionCall`` error is now raised if NumPy ufuncs like ``np.mean`` are called on groupby or resample objects (:issue:`12811`)
-- ``__setitem__`` will no longer apply a callable rhs as a function instead of storing it. Call ``where`` directly to get the previous behavior. (:issue:`13299`)
-- Calls to ``.sample()`` will respect the random seed set via ``numpy.random.seed(n)`` (:issue:`13161`)
-- ``Styler.apply`` is now more strict about the outputs your function must return. For ``axis=0`` or ``axis=1``, the output shape must be identical. For ``axis=None``, the output must be a DataFrame with identical columns and index labels. (:issue:`13222`)
-- ``Float64Index.astype(int)`` will now raise ``ValueError`` if ``Float64Index`` contains ``NaN`` values (:issue:`13149`)
-- ``TimedeltaIndex.astype(int)`` and ``DatetimeIndex.astype(int)`` will now return ``Int64Index`` instead of ``np.array`` (:issue:`13209`)
-- Passing ``Period`` with multiple frequencies to normal ``Index`` now returns ``Index`` with ``object`` dtype (:issue:`13664`)
-- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
-- ``PeriodIndex.fillna`` with ``Period`` has different freq now coerces to ``object`` dtype (:issue:`13664`)
-- Faceted boxplots from ``DataFrame.boxplot(by=col)`` now return a ``Series`` when ``return_type`` is not None. Previously these returned an ``OrderedDict``. Note that when ``return_type=None``, the default, these still return a 2-D NumPy array. (:issue:`12216`, :issue:`7096`)
- ``astype()`` will now accept a dict of column name to data types mapping as the ``dtype`` argument. (:issue:`12086`)
- The ``pd.read_json`` and ``DataFrame.to_json`` has gained support for reading and writing json lines with ``lines`` option see :ref:`Line delimited json <io.jsonl>` (:issue:`9180`)
-- ``pd.read_hdf`` will now raise a ``ValueError`` instead of ``KeyError``, if a mode other than ``r``, ``r+`` and ``a`` is supplied. (:issue:`13623`)
-- ``pd.read_csv()``, ``pd.read_table()``, and ``pd.read_hdf()`` raise the builtin ``FileNotFoundError`` exception for Python 3.x when called on a nonexistent file; this is back-ported as ``IOError`` in Python 2.x (:issue:`14086`)
-- More informative exceptions are passed through the csv parser. The exception type would now be the original exception type instead of ``CParserError``. (:issue:`13652`)
-- ``pd.read_csv()`` in the C engine will now issue a ``ParserWarning`` or raise a ``ValueError`` when ``sep`` encoded is more than one character long (:issue:`14065`)
-- ``DataFrame.values`` will now return ``float64`` with a ``DataFrame`` of mixed ``int64`` and ``uint64`` dtypes, conforming to ``np.find_common_type`` (:issue:`10364`, :issue:`13917`)
+.. _whatsnew_0190.api:
-.. _whatsnew_0190.api.tolist:
+API changes
+~~~~~~~~~~~
``Series.tolist()`` will now return Python types
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -551,9 +529,8 @@ API changes
.. ipython:: python
s = pd.Series([1,2,3])
- type(s.tolist()[0])
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -561,7 +538,7 @@ Previous Behavior:
Out[7]:
<class 'numpy.int64'>
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -572,11 +549,11 @@ New Behavior:
``Series`` operators for different indexes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Following ``Series`` operators has been changed to make all operators consistent,
+Following ``Series`` operators have been changed to make all operators consistent,
including ``DataFrame`` (:issue:`1134`, :issue:`4581`, :issue:`13538`)
- ``Series`` comparison operators now raise ``ValueError`` when ``index`` are different.
-- ``Series`` logical operators align both ``index``.
+- ``Series`` logical operators align both ``index`` of left and right hand side.
.. warning::
Until 0.18.1, comparing ``Series`` with the same length, would succeed even if
@@ -607,7 +584,7 @@ Comparison operators raise ``ValueError`` when ``.index`` are different.
Previous Behavior (``Series``):
-``Series`` compares values ignoring ``.index`` as long as both lengthes are the same.
+``Series`` compared values ignoring the ``.index`` as long as both had the same length:
.. code-block:: ipython
@@ -618,7 +595,7 @@ Previous Behavior (``Series``):
C False
dtype: bool
-New Behavior (``Series``):
+**New behavior** (``Series``):
.. code-block:: ipython
@@ -627,13 +604,18 @@ New Behavior (``Series``):
ValueError: Can only compare identically-labeled Series objects
.. note::
+
To achieve the same result as previous versions (compare values based on locations ignoring ``.index``), compare both ``.values``.
.. ipython:: python
s1.values == s2.values
- If you want to compare ``Series`` aligning its ``.index``, see flexible comparison methods section below.
+ If you want to compare ``Series`` aligning its ``.index``, see flexible comparison methods section below:
+
+ .. ipython:: python
+
+ s1.eq(s2)
Current Behavior (``DataFrame``, no change):
@@ -646,9 +628,9 @@ Current Behavior (``DataFrame``, no change):
Logical operators
"""""""""""""""""
-Logical operators align both ``.index``.
+Logical operators align both ``.index`` of left and right hand side.
-Previous behavior (``Series``), only left hand side ``index`` is kept:
+Previous behavior (``Series``), only left hand side ``index`` was kept:
.. code-block:: ipython
@@ -661,7 +643,7 @@ Previous behavior (``Series``), only left hand side ``index`` is kept:
C False
dtype: bool
-New Behavior (``Series``):
+**New behavior** (``Series``):
.. ipython:: python
@@ -673,11 +655,11 @@ New Behavior (``Series``):
``Series`` logical operators fill a ``NaN`` result with ``False``.
.. note::
- To achieve the same result as previous versions (compare values based on locations ignoring ``.index``), compare both ``.values``.
+ To achieve the same result as previous versions (compare values based on only left hand side index), you can use ``reindex_like``:
.. ipython:: python
- s1.values & s2.values
+ s1 & s2.reindex_like(s1)
Current Behavior (``DataFrame``, no change):
@@ -714,7 +696,7 @@ A ``Series`` will now correctly promote its dtype for assignment with incompat v
s = pd.Series()
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -723,7 +705,7 @@ Previous Behavior:
In [3]: s["b"] = 3.0
TypeError: invalid type promotion
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -739,7 +721,7 @@ New Behavior:
Previously if ``.to_datetime()`` encountered mixed integers/floats and strings, but no datetimes with ``errors='coerce'`` it would convert all to ``NaT``.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -774,7 +756,7 @@ Merging will now preserve the dtype of the join keys (:issue:`8596`)
df2 = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]})
df2
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -791,7 +773,7 @@ Previous Behavior:
v1 float64
dtype: object
-New Behavior:
+**New behavior**:
We are able to preserve the join keys
@@ -820,7 +802,7 @@ Percentile identifiers in the index of a ``.describe()`` output will now be roun
s = pd.Series([0, 1, 2, 3, 4])
df = pd.DataFrame([0, 1, 2, 3, 4])
-Previous Behavior:
+**Previous behavior**:
The percentiles were rounded to at most one decimal place, which could raise ``ValueError`` for a data frame if the percentiles were duplicated.
@@ -847,7 +829,7 @@ The percentiles were rounded to at most one decimal place, which could raise ``V
...
ValueError: cannot reindex from a duplicate axis
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -868,10 +850,10 @@ Furthermore:
""""""""""""""""""""""""""""""""""""""""
``PeriodIndex`` now has its own ``period`` dtype. The ``period`` dtype is a
-pandas extension dtype like ``category`` or :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``). (:issue:`13941`).
+pandas extension dtype like ``category`` or the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``). (:issue:`13941`).
As a consequence of this change, ``PeriodIndex`` no longer has an integer dtype:
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -886,7 +868,7 @@ Previous Behavior:
In [4]: pi.dtype
Out[4]: dtype('int64')
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -904,14 +886,14 @@ New Behavior:
Previously, ``Period`` has its own ``Period('NaT')`` representation different from ``pd.NaT``. Now ``Period('NaT')`` has been changed to return ``pd.NaT``. (:issue:`12759`, :issue:`13582`)
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
In [5]: pd.Period('NaT', freq='D')
Out[5]: Period('NaT', 'D')
-New Behavior:
+**New behavior**:
These result in ``pd.NaT`` without providing ``freq`` option.
@@ -921,9 +903,9 @@ These result in ``pd.NaT`` without providing ``freq`` option.
pd.Period(None)
-To be compat with ``Period`` addition and subtraction, ``pd.NaT`` now supports addition and subtraction with ``int``. Previously it raises ``ValueError``.
+To be compatible with ``Period`` addition and subtraction, ``pd.NaT`` now supports addition and subtraction with ``int``. Previously it raised ``ValueError``.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -931,7 +913,7 @@ Previous Behavior:
...
ValueError: Cannot add integral value to Timestamp without freq.
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -941,10 +923,10 @@ New Behavior:
``PeriodIndex.values`` now returns array of ``Period`` object
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-``.values`` is changed to return array of ``Period`` object, rather than array
-of ``int64`` (:issue:`13988`)
+``.values`` is changed to return an array of ``Period`` objects, rather than an array
+of integers (:issue:`13988`).
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -952,7 +934,7 @@ Previous Behavior:
In [7]: pi.values
array([492, 493])
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -982,7 +964,7 @@ Previous behavior:
FutureWarning: using '+' to provide set union with Indexes is deprecated, use '|' or .union()
Out[1]: Index(['a', 'b', 'c'], dtype='object')
-The same operation will now perform element-wise addition:
+**New behavior**: the same operation will now perform element-wise addition:
.. ipython:: python
@@ -1008,7 +990,7 @@ Previous behavior:
FutureWarning: using '-' to provide set differences with datetimelike Indexes is deprecated, use .difference()
Out[1]: DatetimeIndex(['2016-01-01'], dtype='datetime64[ns]', freq=None)
-New behavior:
+**New behavior**:
.. ipython:: python
@@ -1027,7 +1009,7 @@ New behavior:
idx1 = pd.Index([1, 2, 3, np.nan])
idx2 = pd.Index([0, 1, np.nan])
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1037,7 +1019,7 @@ Previous Behavior:
In [4]: idx1.symmetric_difference(idx2)
Out[4]: Float64Index([0.0, nan, 2.0, 3.0], dtype='float64')
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -1050,12 +1032,11 @@ New Behavior:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``Index.unique()`` now returns unique values as an
-``Index`` of the appropriate ``dtype``. (:issue:`13395`)
-
+``Index`` of the appropriate ``dtype``. (:issue:`13395`).
Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex``,
``TimedeltaIndex`` and ``PeriodIndex`` returned ``Index`` to keep metadata like timezone.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1063,11 +1044,12 @@ Previous Behavior:
Out[1]: array([1, 2, 3])
In [2]: pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz='Asia/Tokyo').unique()
- Out[2]: DatetimeIndex(['2011-01-01 00:00:00+09:00', '2011-01-02 00:00:00+09:00',
- '2011-01-03 00:00:00+09:00'],
- dtype='datetime64[ns, Asia/Tokyo]', freq=None)
+ Out[2]:
+ DatetimeIndex(['2011-01-01 00:00:00+09:00', '2011-01-02 00:00:00+09:00',
+ '2011-01-03 00:00:00+09:00'],
+ dtype='datetime64[ns, Asia/Tokyo]', freq=None)
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -1076,8 +1058,8 @@ New Behavior:
.. _whatsnew_0190.api.multiindex:
-``MultiIndex`` constructors preserve categorical dtypes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``MultiIndex`` constructors, ``groupby`` and ``set_index`` preserve categorical dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``MultiIndex.from_arrays`` and ``MultiIndex.from_product`` will now preserve categorical dtype
in ``MultiIndex`` levels. (:issue:`13743`, :issue:`13854`)
@@ -1089,7 +1071,7 @@ in ``MultiIndex`` levels. (:issue:`13743`, :issue:`13854`)
midx = pd.MultiIndex.from_arrays([cat, lvl1])
midx
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1099,7 +1081,7 @@ Previous Behavior:
In [5]: midx.get_level_values[0]
Out[5]: Index(['a', 'b'], dtype='object')
-New Behavior:
+**New behavior**: the single level is now a ``CategoricalIndex``:
.. ipython:: python
@@ -1115,7 +1097,7 @@ As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes
df_grouped = df.groupby(by=['A', 'C']).first()
df_set_idx = df.set_index(['A', 'C'])
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1137,7 +1119,7 @@ Previous Behavior:
B int64
dtype: object
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -1152,8 +1134,8 @@ New Behavior:
``read_csv`` will progressively enumerate chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-When :func:`read_csv` is called with ``chunksize='n'`` and without specifying an index,
-each chunk used to have an independently generated index from `0`` to ``n-1``.
+When :func:`read_csv` is called with ``chunksize=n`` and without specifying an index,
+each chunk used to have an independently generated index from ``0`` to ``n-1``.
They are now given instead a progressive index, starting from ``0`` for the first chunk,
from ``n`` for the second, and so on, so that, when concatenated, they are identical to
the result of calling :func:`read_csv` without the ``chunksize=`` argument.
@@ -1163,7 +1145,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument.
data = 'A,B\n0,1\n2,3\n4,5\n6,7'
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1175,7 +1157,7 @@ Previous Behavior:
0 4 5
1 6 7
-New Behavior:
+**New behavior**:
.. ipython :: python
@@ -1188,13 +1170,12 @@ Sparse Changes
These changes allow pandas to handle sparse data with more dtypes, and for work to make a smoother experience with data handling.
-
``int64`` and ``bool`` support enhancements
"""""""""""""""""""""""""""""""""""""""""""
-Sparse data structures now gained enhanced support of ``int64`` and ``bool`` ``dtype`` (:issue:`667`, :issue:`13849`)
+Sparse data structures now gained enhanced support of ``int64`` and ``bool`` ``dtype`` (:issue:`667`, :issue:`13849`).
-Previously, sparse data were ``float64`` dtype by default, even if all inputs were ``int`` or ``bool`` dtype. You had to specify ``dtype`` explicitly to create sparse data with ``int64`` dtype. Also, ``fill_value`` had to be specified explicitly becuase it's default was ``np.nan`` which doesn't appear in ``int64`` or ``bool`` data.
+Previously, sparse data were ``float64`` dtype by default, even if all inputs were of ``int`` or ``bool`` dtype. You had to specify ``dtype`` explicitly to create sparse data with ``int64`` dtype. Also, ``fill_value`` had to be specified explicitly because the default was ``np.nan`` which doesn't appear in ``int64`` or ``bool`` data.
.. code-block:: ipython
@@ -1221,9 +1202,9 @@ Previously, sparse data were ``float64`` dtype by default, even if all inputs we
IntIndex
Indices: array([0, 1], dtype=int32)
-As of v0.19.0, sparse data keeps the input dtype, and assign more appropriate ``fill_value`` default (``0`` for ``int64`` dtype, ``False`` for ``bool`` dtype).
+As of v0.19.0, sparse data keeps the input dtype, and uses more appropriate ``fill_value`` defaults (``0`` for ``int64`` dtype, ``False`` for ``bool`` dtype).
-.. ipython :: python
+.. ipython:: python
pd.SparseArray([1, 2, 0, 0], dtype=np.int64)
pd.SparseArray([True, False, False, False])
@@ -1235,29 +1216,29 @@ Operators now preserve dtypes
- Sparse data structure now can preserve ``dtype`` after arithmetic ops (:issue:`13848`)
-.. ipython:: python
+ .. ipython:: python
- s = pd.SparseSeries([0, 2, 0, 1], fill_value=0, dtype=np.int64)
- s.dtype
+ s = pd.SparseSeries([0, 2, 0, 1], fill_value=0, dtype=np.int64)
+ s.dtype
- s + 1
+ s + 1
- Sparse data structure now support ``astype`` to convert internal ``dtype`` (:issue:`13900`)
-.. ipython:: python
+ .. ipython:: python
- s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0)
- s
- s.astype(np.int64)
+ s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0)
+ s
+ s.astype(np.int64)
-``astype`` fails if data contains values which cannot be converted to specified ``dtype``.
-Note that the limitation is applied to ``fill_value`` which default is ``np.nan``.
+ ``astype`` fails if data contains values which cannot be converted to specified ``dtype``.
+ Note that the limitation is applied to ``fill_value`` which default is ``np.nan``.
-.. code-block:: ipython
+ .. code-block:: ipython
- In [7]: pd.SparseSeries([1., np.nan, 2., np.nan], fill_value=np.nan).astype(np.int64)
- Out[7]:
- ValueError: unable to coerce current fill_value nan to int64 dtype
+ In [7]: pd.SparseSeries([1., np.nan, 2., np.nan], fill_value=np.nan).astype(np.int64)
+ Out[7]:
+ ValueError: unable to coerce current fill_value nan to int64 dtype
Other sparse fixes
""""""""""""""""""
@@ -1301,7 +1282,7 @@ These types are the same on many platform, but for 64 bit python on Windows,
``np.int_`` is 32 bits, and ``np.intp`` is 64 bits. Changing this behavior improves performance for many
operations on that platform.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -1310,7 +1291,7 @@ Previous Behavior:
In [2]: i.get_indexer(['b', 'b', 'c']).dtype
Out[2]: dtype('int32')
-New Behavior:
+**New behavior**:
.. code-block:: ipython
@@ -1319,6 +1300,35 @@ New Behavior:
In [2]: i.get_indexer(['b', 'b', 'c']).dtype
Out[2]: dtype('int64')
+
+.. _whatsnew_0190.api.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
+
+- ``Timestamp.to_pydatetime`` will issue a ``UserWarning`` when ``warn=True``, and the instance has a non-zero number of nanoseconds, previously this would print a message to stdout. (:issue:`14101`)
+- Non-convertible dates in an excel date column will be returned without conversion and the column will be ``object`` dtype, rather than raising an exception (:issue:`10001`)
+- ``Series.unique()`` with datetime and timezone now returns return array of ``Timestamp`` with timezone (:issue:`13565`)
+- ``pd.Timedelta(None)`` is now accepted and will return ``NaT``, mirroring ``pd.Timestamp`` (:issue:`13687`)
+- ``Panel.to_sparse()`` will raise a ``NotImplementedError`` exception when called (:issue:`13778`)
+- ``Index.reshape()`` will raise a ``NotImplementedError`` exception when called (:issue:`12882`)
+- ``.filter()`` enforces mutual exclusion of the keyword arguments. (:issue:`12399`)
+- ``eval``'s upcasting rules for ``float32`` types have been updated to be more consistent with NumPy's rules. New behavior will not upcast to ``float64`` if you multiply a pandas ``float32`` object by a scalar float64. (:issue:`12388`)
+- An ``UnsupportedFunctionCall`` error is now raised if NumPy ufuncs like ``np.mean`` are called on groupby or resample objects (:issue:`12811`)
+- ``__setitem__`` will no longer apply a callable rhs as a function instead of storing it. Call ``where`` directly to get the previous behavior. (:issue:`13299`)
+- Calls to ``.sample()`` will respect the random seed set via ``numpy.random.seed(n)`` (:issue:`13161`)
+- ``Styler.apply`` is now more strict about the outputs your function must return. For ``axis=0`` or ``axis=1``, the output shape must be identical. For ``axis=None``, the output must be a DataFrame with identical columns and index labels. (:issue:`13222`)
+- ``Float64Index.astype(int)`` will now raise ``ValueError`` if ``Float64Index`` contains ``NaN`` values (:issue:`13149`)
+- ``TimedeltaIndex.astype(int)`` and ``DatetimeIndex.astype(int)`` will now return ``Int64Index`` instead of ``np.array`` (:issue:`13209`)
+- Passing ``Period`` with multiple frequencies to normal ``Index`` now returns ``Index`` with ``object`` dtype (:issue:`13664`)
+- ``PeriodIndex.fillna`` with ``Period`` has different freq now coerces to ``object`` dtype (:issue:`13664`)
+- Faceted boxplots from ``DataFrame.boxplot(by=col)`` now return a ``Series`` when ``return_type`` is not None. Previously these returned an ``OrderedDict``. Note that when ``return_type=None``, the default, these still return a 2-D NumPy array. (:issue:`12216`, :issue:`7096`)
+- ``pd.read_hdf`` will now raise a ``ValueError`` instead of ``KeyError``, if a mode other than ``r``, ``r+`` and ``a`` is supplied. (:issue:`13623`)
+- ``pd.read_csv()``, ``pd.read_table()``, and ``pd.read_hdf()`` raise the builtin ``FileNotFoundError`` exception for Python 3.x when called on a nonexistent file; this is back-ported as ``IOError`` in Python 2.x (:issue:`14086`)
+- More informative exceptions are passed through the csv parser. The exception type would now be the original exception type instead of ``CParserError``. (:issue:`13652`)
+- ``pd.read_csv()`` in the C engine will now issue a ``ParserWarning`` or raise a ``ValueError`` when ``sep`` encoded is more than one character long (:issue:`14065`)
+- ``DataFrame.values`` will now return ``float64`` with a ``DataFrame`` of mixed ``int64`` and ``uint64`` dtypes, conforming to ``np.find_common_type`` (:issue:`10364`, :issue:`13917`)
+
.. _whatsnew_0190.deprecations:
Deprecations
@@ -1326,10 +1336,10 @@ Deprecations
- ``Categorical.reshape`` has been deprecated and will be removed in a subsequent release (:issue:`12882`)
- ``Series.reshape`` has been deprecated and will be removed in a subsequent release (:issue:`12882`)
-- ``PeriodIndex.to_datetime`` has been deprecated in favour of ``PeriodIndex.to_timestamp`` (:issue:`8254`)
-- ``Timestamp.to_datetime`` has been deprecated in favour of ``Timestamp.to_pydatetime`` (:issue:`8254`)
+- ``PeriodIndex.to_datetime`` has been deprecated in favor of ``PeriodIndex.to_timestamp`` (:issue:`8254`)
+- ``Timestamp.to_datetime`` has been deprecated in favor of ``Timestamp.to_pydatetime`` (:issue:`8254`)
- ``pandas.core.datetools`` module has been deprecated and will be removed in a subsequent release (:issue:`14094`)
-- ``Index.to_datetime`` and ``DatetimeIndex.to_datetime`` have been deprecated in favour of ``pd.to_datetime`` (:issue:`8254`)
+- ``Index.to_datetime`` and ``DatetimeIndex.to_datetime`` have been deprecated in favor of ``pd.to_datetime`` (:issue:`8254`)
- ``SparseList`` has been deprecated and will be removed in a future version (:issue:`13784`)
- ``DataFrame.to_html()`` and ``DataFrame.to_latex()`` have dropped the ``colSpace`` parameter in favor of ``col_space`` (:issue:`13857`)
- ``DataFrame.to_sql()`` has deprecated the ``flavor`` parameter, as it is superfluous when SQLAlchemy is not installed (:issue:`13611`)
@@ -1350,6 +1360,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
- The ``SparsePanel`` class has been removed (:issue:`13778`)
- The ``pd.sandbox`` module has been removed in favor of the external library ``pandas-qt`` (:issue:`13670`)
- The ``pandas.io.data`` and ``pandas.io.wb`` modules are removed in favor of
@@ -1359,30 +1370,19 @@ Removal of prior version deprecations/changes
- ``DataFrame.to_csv()`` has dropped the ``engine`` parameter, as was deprecated in 0.17.1 (:issue:`11274`, :issue:`13419`)
- ``DataFrame.to_dict()`` has dropped the ``outtype`` parameter in favor of ``orient`` (:issue:`13627`, :issue:`8486`)
- ``pd.Categorical`` has dropped setting of the ``ordered`` attribute directly in favor of the ``set_ordered`` method (:issue:`13671`)
-- ``pd.Categorical`` has dropped the ``levels`` attribute in favour of ``categories`` (:issue:`8376`)
+- ``pd.Categorical`` has dropped the ``levels`` attribute in favor of ``categories`` (:issue:`8376`)
- ``DataFrame.to_sql()`` has dropped the ``mysql`` option for the ``flavor`` parameter (:issue:`13611`)
-- ``Panel.shift()`` has dropped the ``lags`` parameter in favour of ``periods`` (:issue:`14041`)
-- ``pd.Index`` has dropped the ``diff`` method in favour of ``difference`` (:issue:`13669`)
-
-- ``pd.DataFrame`` has dropped the ``to_wide`` method in favour of ``to_panel`` (:issue:`14039`)
+- ``Panel.shift()`` has dropped the ``lags`` parameter in favor of ``periods`` (:issue:`14041`)
+- ``pd.Index`` has dropped the ``diff`` method in favor of ``difference`` (:issue:`13669`)
+- ``pd.DataFrame`` has dropped the ``to_wide`` method in favor of ``to_panel`` (:issue:`14039`)
- ``Series.to_csv`` has dropped the ``nanRep`` parameter in favor of ``na_rep`` (:issue:`13804`)
- ``Series.xs``, ``DataFrame.xs``, ``Panel.xs``, ``Panel.major_xs``, and ``Panel.minor_xs`` have dropped the ``copy`` parameter (:issue:`13781`)
- ``str.split`` has dropped the ``return_type`` parameter in favor of ``expand`` (:issue:`13701`)
-- Removal of the legacy time rules (offset aliases), deprecated since 0.17.0 (this has been alias since 0.8.0) (:issue:`13590`, :issue:`13868`)
-
- Previous Behavior:
-
- .. code-block:: ipython
-
- In [2]: pd.date_range('2016-07-01', freq='W@MON', periods=3)
- pandas/tseries/frequencies.py:465: FutureWarning: Freq "W@MON" is deprecated, use "W-MON" as alternative.
- Out[2]: DatetimeIndex(['2016-07-04', '2016-07-11', '2016-07-18'], dtype='datetime64[ns]', freq='W-MON')
-
- Now legacy time rules raises ``ValueError``. For the list of currently supported offsets, see :ref:`here <timeseries.offset_aliases>`
-
+- Removal of the legacy time rules (offset aliases), deprecated since 0.17.0 (this has been alias since 0.8.0) (:issue:`13590`, :issue:`13868`). Now legacy time rules raises ``ValueError``. For the list of currently supported offsets, see :ref:`here <timeseries.offset_aliases>`.
- The default value for the ``return_type`` parameter for ``DataFrame.plot.box`` and ``DataFrame.boxplot`` changed from ``None`` to ``"axes"``. These methods will now return a matplotlib axes by default instead of a dictionary of artists. See :ref:`here <visualization.box.return>` (:issue:`6581`).
- The ``tquery`` and ``uquery`` functions in the ``pandas.io.sql`` module are removed (:issue:`5950`).
+
.. _whatsnew_0190.performance:
Performance Improvements
@@ -1390,8 +1390,7 @@ Performance Improvements
- Improved performance of sparse ``IntIndex.intersect`` (:issue:`13082`)
- Improved performance of sparse arithmetic with ``BlockIndex`` when the number of blocks are large, though recommended to use ``IntIndex`` in such cases (:issue:`13082`)
-- increased performance of ``DataFrame.quantile()`` as it now operates per-block (:issue:`11623`)
-
+- Improved performance of ``DataFrame.quantile()`` as it now operates per-block (:issue:`11623`)
- Improved performance of float64 hash table operations, fixing some very slow indexing and groupby operations in python 3 (:issue:`13166`, :issue:`13334`)
- Improved performance of ``DataFrameGroupBy.transform`` (:issue:`12737`)
- Improved performance of ``Index`` and ``Series`` ``.duplicated`` (:issue:`10235`)
@@ -1402,7 +1401,6 @@ Performance Improvements
- Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`)
-
.. _whatsnew_0190.bug_fixes:
Bug Fixes
@@ -1568,3 +1566,4 @@ Bug Fixes
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
+- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
| WIP (it's actually not critical for rc, as we do link to the dev docs (which are updated after rc release) anyway)
| https://api.github.com/repos/pandas-dev/pandas/pulls/14176 | 2016-09-07T10:50:38Z | 2016-09-07T19:15:38Z | 2016-09-07T19:15:38Z | 2016-09-07T19:15:38Z |
Fix trivial typo in comment | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index f12ba8083f545..051cc8aa4d018 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -814,7 +814,7 @@ def apply(self, other):
if bd != 0:
skip_bd = BusinessDay(n=bd)
- # midnight busienss hour may not on BusinessDay
+ # midnight business hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
| https://api.github.com/repos/pandas-dev/pandas/pulls/14174 | 2016-09-07T10:28:45Z | 2016-09-07T10:49:24Z | 2016-09-07T10:49:24Z | 2016-09-07T10:49:41Z | |
DOC: cleanup build warnings | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index b5ad681426b15..6063e3e8bce45 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -935,134 +935,20 @@ method:
minor_axis=['a', 'b', 'c', 'd'])
panel.to_frame()
-
-.. _dsintro.panel4d:
-
-Panel4D (Experimental)
-----------------------
-
-.. warning::
-
- In 0.19.0 ``Panel4D`` is deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel4D.to_xarray` method to automate this conversion.
-
-``Panel4D`` is a 4-Dimensional named container very much like a ``Panel``, but
-having 4 named dimensions. It is intended as a test bed for more N-Dimensional named
-containers.
-
- - **labels**: axis 0, each item corresponds to a Panel contained inside
- - **items**: axis 1, each item corresponds to a DataFrame contained inside
- - **major_axis**: axis 2, it is the **index** (rows) of each of the
- DataFrames
- - **minor_axis**: axis 3, it is the **columns** of each of the DataFrames
-
-``Panel4D`` is a sub-class of ``Panel``, so most methods that work on Panels are
-applicable to Panel4D. The following methods are disabled:
-
- - ``join , to_frame , to_excel , to_sparse , groupby``
-
-Construction of Panel4D works in a very similar manner to a ``Panel``
-
-From 4D ndarray with optional axis labels
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. ipython:: python
-
- p4d = pd.Panel4D(np.random.randn(2, 2, 5, 4),
- labels=['Label1','Label2'],
- items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- p4d
-
-
-From dict of Panel objects
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. ipython:: python
-
- data = { 'Label1' : pd.Panel({ 'Item1' : pd.DataFrame(np.random.randn(4, 3)) }),
- 'Label2' : pd.Panel({ 'Item2' : pd.DataFrame(np.random.randn(4, 2)) }) }
- pd.Panel4D(data)
-
-Note that the values in the dict need only be **convertible to Panels**.
-Thus, they can be any of the other valid inputs to Panel as per above.
-
-Slicing
-~~~~~~~
-
-Slicing works in a similar manner to a Panel. ``[]`` slices the first dimension.
-``.ix`` allows you to slice arbitrarily and get back lower dimensional objects
-
-.. ipython:: python
-
- p4d['Label1']
-
-4D -> Panel
-
-.. ipython:: python
-
- p4d.ix[:,:,:,'A']
-
-4D -> DataFrame
-
-.. ipython:: python
-
- p4d.ix[:,:,0,'A']
-
-4D -> Series
-
-.. ipython:: python
-
- p4d.ix[:,0,0,'A']
-
-Transposing
-~~~~~~~~~~~
-
-A Panel4D can be rearranged using its ``transpose`` method (which does not make a
-copy by default unless the data are heterogeneous):
-
-.. ipython:: python
-
- p4d.transpose(3, 2, 1, 0)
-
.. _dsintro.panelnd:
+.. _dsintro.panel4d:
-PanelND (Experimental)
-----------------------
+Panel4D and PanelND (Deprecated)
+--------------------------------
.. warning::
- In 0.19.0 ``PanelND`` is deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
+ In 0.19.0 ``Panel4D`` and ``PanelND`` are deprecated and will be removed in
+ a future version. The recommended way to represent these types of
+ n-dimensional data are with the
+ `xarray package <http://xarray.pydata.org/en/stable/>`__.
+ Pandas provides a :meth:`~Panel4D.to_xarray` method to automate
+ this conversion.
-PanelND is a module with a set of factory functions to enable a user to construct N-dimensional named
-containers like Panel4D, with a custom set of axis labels. Thus a domain-specific container can easily be
-created.
-
-The following creates a Panel5D. A new panel type object must be sliceable into a lower dimensional object.
-Here we slice to a Panel4D.
-
-.. ipython:: python
- :okwarning:
-
- from pandas.core import panelnd
- Panel5D = panelnd.create_nd_panel_factory(
- klass_name = 'Panel5D',
- orders = [ 'cool', 'labels','items','major_axis','minor_axis'],
- slices = { 'labels' : 'labels', 'items' : 'items',
- 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
- slicer = pd.Panel4D,
- aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
- stat_axis = 2)
-
- p5d = Panel5D(dict(C1 = p4d))
- p5d
-
- # print a slice of our 5D
- p5d.ix['C1',:,:,0:3,:]
-
- # transpose it
- p5d.transpose(1,2,3,4,0)
-
- # look at the shape & dim
- p5d.shape
- p5d.ndim
+See the `docs of a previous version <http://pandas.pydata.org/pandas-docs/version/0.18.1/dsintro.html#panel4d-experimental>`__
+for documentation on these objects.
diff --git a/doc/source/install.rst b/doc/source/install.rst
index f8ee0542ea17e..6295e6f6cbb68 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -255,6 +255,7 @@ Optional Dependencies
* `matplotlib <http://matplotlib.org/>`__: for plotting
* For Excel I/O:
+
* `xlrd/xlwt <http://www.python-excel.org/>`__: Excel reading (xlrd) and writing (xlwt)
* `openpyxl <http://packages.python.org/openpyxl/>`__: openpyxl version 1.6.1
or higher (but lower than 2.0.0), or version 2.2 or higher, for writing .xlsx files (xlrd >= 0.9.0)
@@ -296,8 +297,8 @@ Optional Dependencies
<html-gotchas>`. It explains issues surrounding the installation and
usage of the above three libraries
* You may need to install an older version of `BeautifulSoup4`_:
- - Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and
- 32-bit Ubuntu/Debian
+ Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and 32-bit
+ Ubuntu/Debian
* Additionally, if you're using `Anaconda`_ you should definitely
read :ref:`the gotchas about HTML parsing libraries <html-gotchas>`
diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst
index b6c5c15bc9081..d3f921f8762cc 100644
--- a/doc/source/sparse.rst
+++ b/doc/source/sparse.rst
@@ -9,7 +9,7 @@
import pandas as pd
import pandas.util.testing as tm
np.set_printoptions(precision=4, suppress=True)
- options.display.max_rows = 15
+ pd.options.display.max_rows = 15
**********************
Sparse data structures
@@ -90,38 +90,10 @@ can be converted back to a regular ndarray by calling ``to_dense``:
SparseList
----------
-.. note:: The ``SparseList`` class has been deprecated and will be removed in a future version.
+The ``SparseList`` class has been deprecated and will be removed in a future version.
+See the `docs of a previous version <http://pandas.pydata.org/pandas-docs/version/0.18.1/sparse.html#sparselist>`__
+for documentation on ``SparseList``.
-``SparseList`` is a list-like data structure for managing a dynamic collection
-of SparseArrays. To create one, simply call the ``SparseList`` constructor with
-a ``fill_value`` (defaulting to ``NaN``):
-
-.. ipython:: python
-
- spl = pd.SparseList()
- spl
-
-The two important methods are ``append`` and ``to_array``. ``append`` can
-accept scalar values or any 1-dimensional sequence:
-
-.. ipython:: python
- :suppress:
-
-.. ipython:: python
-
- spl.append(np.array([1., np.nan, np.nan, 2., 3.]))
- spl.append(5)
- spl.append(sparr)
- spl
-
-As you can see, all of the contents are stored internally as a list of
-memory-efficient ``SparseArray`` objects. Once you've accumulated all of the
-data, you can call ``to_array`` to get a single ``SparseArray`` with all the
-data:
-
-.. ipython:: python
-
- spl.to_array()
SparseIndex objects
-------------------
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 36e492df29983..7ab97c6af3583 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1219,7 +1219,7 @@ objects.
ts.shift(1)
The shift method accepts an ``freq`` argument which can accept a
-``DateOffset`` class or other ``timedelta``-like object or also a :ref:`offset alias <timeseries.alias>`:
+``DateOffset`` class or other ``timedelta``-like object or also a :ref:`offset alias <timeseries.offset_aliases>`:
.. ipython:: python
@@ -1494,7 +1494,7 @@ level of ``MultiIndex``, its name or location can be passed to the
.. ipython:: python
- df.resample(level='d').sum()
+ df.resample('M', level='d').sum()
.. _timeseries.periods:
@@ -1630,8 +1630,6 @@ Period Dtypes
``PeriodIndex`` has a custom ``period`` dtype. This is a pandas extension
dtype similar to the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``).
-.. _timeseries.timezone_series:
-
The ``period`` dtype holds the ``freq`` attribute and is represented with
``period[freq]`` like ``period[D]`` or ``period[M]``, using :ref:`frequency strings <timeseries.offset_aliases>`.
diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt
index 84f2a77203c41..239d6c9c6e0d4 100644
--- a/doc/source/whatsnew/v0.14.1.txt
+++ b/doc/source/whatsnew/v0.14.1.txt
@@ -156,7 +156,7 @@ Experimental
~~~~~~~~~~~~
- ``pandas.io.data.Options`` has a new method, ``get_all_data`` method, and now consistently returns a
- multi-indexed ``DataFrame``, see :ref:`the docs <remote_data.yahoo_options>`. (:issue:`5602`)
+ multi-indexed ``DataFrame`` (:issue:`5602`)
- ``io.gbq.read_gbq`` and ``io.gbq.to_gbq`` were refactored to remove the
dependency on the Google ``bq.py`` command line client. This submodule
now uses ``httplib2`` and the Google ``apiclient`` and ``oauth2client`` API client
diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt
index a25e5a80b65fc..cd9298c74539a 100644
--- a/doc/source/whatsnew/v0.15.1.txt
+++ b/doc/source/whatsnew/v0.15.1.txt
@@ -185,8 +185,6 @@ API changes
2014-11-22 call AAPL141122C00110000 1.02
2014-11-28 call AAPL141128C00110000 1.32
- See the Options documentation in :ref:`Remote Data <remote_data.yahoo_options>`
-
.. _whatsnew_0151.datetime64_plotting:
- pandas now also registers the ``datetime64`` dtype in matplotlib's units registry
@@ -257,7 +255,7 @@ Enhancements
- Added support for 3-character ISO and non-standard country codes in :func:`io.wb.download()` (:issue:`8482`)
-- :ref:`World Bank data requests <remote_data.wb>` now will warn/raise based
+- World Bank data requests now will warn/raise based
on an ``errors`` argument, as well as a list of hard-coded country codes and
the World Bank's JSON response. In prior versions, the error messages
didn't look at the World Bank's JSON response. Problem-inducing input were
diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt
index cf6ac7c1e6ad2..4136c108fba57 100644
--- a/doc/source/whatsnew/v0.8.0.txt
+++ b/doc/source/whatsnew/v0.8.0.txt
@@ -59,7 +59,7 @@ Time series changes and improvements
aggregation functions, and control over how the intervals and result labeling
are defined. A suite of high performance Cython/C-based resampling functions
(including Open-High-Low-Close) have also been implemented.
-- Revamp of :ref:`frequency aliases <timeseries.alias>` and support for
+- Revamp of :ref:`frequency aliases <timeseries.offset_aliases>` and support for
**frequency shortcuts** like '15min', or '1h30min'
- New :ref:`DatetimeIndex class <timeseries.datetimeindex>` supports both fixed
frequency and irregular time
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5a17401ea67b1..ea5dca32945e8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3998,7 +3998,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False):
converted : type of caller
To learn more about the frequency strings, please see `this link
- <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
+<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
from pandas.tseries.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize)
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 068cfee2b2aa2..8f23e82daf2e3 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -630,16 +630,20 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
https://developers.google.com/api-client-library/python/apis/bigquery/v2
Authentication to the Google BigQuery service is via OAuth 2.0.
+
- If "private_key" is not provided:
- By default "application default credentials" are used.
- .. versionadded:: 0.19.0
+ By default "application default credentials" are used.
+
+ .. versionadded:: 0.19.0
+
+ If default application credentials are not found or are restrictive,
+ user account credentials are used. In this case, you will be asked to
+ grant permissions for product name 'pandas GBQ'.
- If default application credentials are not found or are restrictive,
- user account credentials are used. In this case, you will be asked to
- grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
- Service account credentials will be used to authenticate.
+
+ Service account credentials will be used to authenticate.
Parameters
----------
@@ -747,16 +751,20 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
https://developers.google.com/api-client-library/python/apis/bigquery/v2
Authentication to the Google BigQuery service is via OAuth 2.0.
+
- If "private_key" is not provided:
- By default "application default credentials" are used.
- .. versionadded:: 0.19.0
+ By default "application default credentials" are used.
+
+ .. versionadded:: 0.19.0
+
+ If default application credentials are not found or are restrictive,
+ user account credentials are used. In this case, you will be asked to
+ grant permissions for product name 'pandas GBQ'.
- If default application credentials are not found or are restrictive,
- user account credentials are used. In this case, you will be asked to
- grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
- Service account credentials will be used to authenticate.
+
+ Service account credentials will be used to authenticate.
Parameters
----------
| https://api.github.com/repos/pandas-dev/pandas/pulls/14172 | 2016-09-07T09:50:46Z | 2016-09-07T13:57:22Z | 2016-09-07T13:57:22Z | 2016-09-07T13:57:22Z | |
API/DEPR: Remove +/- as setops for DatetimeIndex/PeriodIndex (GH9630) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 2d93652ca91db..9345f11aca341 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -932,14 +932,16 @@ New Behavior:
Index ``+`` / ``-`` no longer used for set operations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Addition and subtraction of the base Index type (not the numeric subclasses)
+Addition and subtraction of the base Index type and of DatetimeIndex
+(not the numeric index types)
previously performed set operations (set union and difference). This
behaviour was already deprecated since 0.15.0 (in favor using the specific
``.union()`` and ``.difference()`` methods), and is now disabled. When
possible, ``+`` and ``-`` are now used for element-wise operations, for
-example for concatenating strings (:issue:`8227`, :issue:`14127`).
+example for concatenating strings or subtracting datetimes
+(:issue:`8227`, :issue:`14127`).
-Previous Behavior:
+Previous behavior:
.. code-block:: ipython
@@ -962,6 +964,23 @@ For example, the behaviour of adding two integer Indexes:
is unchanged. The base ``Index`` is now made consistent with this behaviour.
+Further, because of this change, it is now possible to subtract two
+DatetimeIndex objects resulting in a TimedeltaIndex:
+
+Previous behavior:
+
+.. code-block:: ipython
+
+ In [1]: pd.DatetimeIndex(['2016-01-01', '2016-01-02']) - pd.DatetimeIndex(['2016-01-02', '2016-01-03'])
+ FutureWarning: using '-' to provide set differences with datetimelike Indexes is deprecated, use .difference()
+ Out[1]: DatetimeIndex(['2016-01-01'], dtype='datetime64[ns]', freq=None)
+
+New behavior:
+
+.. ipython:: python
+
+ pd.DatetimeIndex(['2016-01-01', '2016-01-02']) - pd.DatetimeIndex(['2016-01-02', '2016-01-03'])
+
.. _whatsnew_0190.api.difference:
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 1690a9b229db2..3b676b894d355 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -2,7 +2,6 @@
Base and utility classes for tseries type pandas objects.
"""
-import warnings
from datetime import datetime, timedelta
from pandas import compat
@@ -628,10 +627,9 @@ def __add__(self, other):
raise TypeError("cannot add TimedeltaIndex and {typ}"
.format(typ=type(other)))
elif isinstance(other, Index):
- warnings.warn("using '+' to provide set union with "
- "datetimelike Indexes is deprecated, "
- "use .union()", FutureWarning, stacklevel=2)
- return self.union(other)
+ raise TypeError("cannot add {typ1} and {typ2}"
+ .format(typ1=type(self).__name__,
+ typ2=type(other).__name__))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64,
tslib.Timedelta)):
return self._add_delta(other)
@@ -646,6 +644,7 @@ def __add__(self, other):
def __sub__(self, other):
from pandas.core.index import Index
+ from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
@@ -653,13 +652,14 @@ def __sub__(self, other):
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError("cannot subtract TimedeltaIndex and {typ}"
- .format(typ=type(other)))
+ .format(typ=type(other).__name__))
return self._add_delta(-other)
+ elif isinstance(other, DatetimeIndex):
+ return self._sub_datelike(other)
elif isinstance(other, Index):
- warnings.warn("using '-' to provide set differences with "
- "datetimelike Indexes is deprecated, "
- "use .difference()", FutureWarning, stacklevel=2)
- return self.difference(other)
+ raise TypeError("cannot subtract {typ1} and {typ2}"
+ .format(typ1=type(self).__name__,
+ typ2=type(other).__name__))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64,
tslib.Timedelta)):
return self._add_delta(-other)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 351edf1b38352..e26a0548fdc78 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -731,19 +731,43 @@ def _add_datelike(self, other):
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
- other = Timestamp(other)
- if other is tslib.NaT:
- result = self._nat_new(box=False)
- # require tz compat
- elif not self._has_same_tz(other):
- raise TypeError("Timestamp subtraction must have the same "
- "timezones or no timezones")
+ if isinstance(other, DatetimeIndex):
+ # require tz compat
+ if not self._has_same_tz(other):
+ raise TypeError("DatetimeIndex subtraction must have the same "
+ "timezones or no timezones")
+ result = self._sub_datelike_dti(other)
+ elif isinstance(other, (tslib.Timestamp, datetime)):
+ other = Timestamp(other)
+ if other is tslib.NaT:
+ result = self._nat_new(box=False)
+ # require tz compat
+ elif not self._has_same_tz(other):
+ raise TypeError("Timestamp subtraction must have the same "
+ "timezones or no timezones")
+ else:
+ i8 = self.asi8
+ result = i8 - other.value
+ result = self._maybe_mask_results(result,
+ fill_value=tslib.iNaT)
else:
- i8 = self.asi8
- result = i8 - other.value
- result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
+ raise TypeError("cannot subtract DatetimeIndex and {typ}"
+ .format(typ=type(other).__name__))
return TimedeltaIndex(result, name=self.name, copy=False)
+ def _sub_datelike_dti(self, other):
+ """subtraction of two DatetimeIndexes"""
+ if not len(self) == len(other):
+ raise ValueError("cannot add indices of unequal length")
+
+ self_i8 = self.asi8
+ other_i8 = other.asi8
+ new_values = self_i8 - other_i8
+ if self.hasnans or other.hasnans:
+ mask = (self._isnan) | (other._isnan)
+ new_values[mask] = tslib.iNaT
+ return new_values.view('i8')
+
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 96ff74c819624..8a86fcba32ecb 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -360,7 +360,7 @@ def test_resolution(self):
tz=tz)
self.assertEqual(idx.resolution, expected)
- def test_add_iadd(self):
+ def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
@@ -378,17 +378,12 @@ def test_add_iadd(self):
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
- # GH9094
- with tm.assert_produces_warning(FutureWarning):
- result_add = rng + other
- result_union = rng.union(other)
- tm.assert_index_equal(result_add, expected)
+ result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
- # GH9094
- with tm.assert_produces_warning(FutureWarning):
- rng += other
- tm.assert_index_equal(rng, expected)
+
+ def test_add_iadd(self):
+ for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
@@ -421,7 +416,26 @@ def test_add_iadd(self):
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
- def test_sub_isub(self):
+ def test_add_dti_dti(self):
+ # previously performed setop (deprecated in 0.16.0), now raises
+ # TypeError (GH14164)
+
+ dti = date_range('20130101', periods=3)
+ dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
+
+ with tm.assertRaises(TypeError):
+ dti + dti
+
+ with tm.assertRaises(TypeError):
+ dti_tz + dti_tz
+
+ with tm.assertRaises(TypeError):
+ dti_tz + dti
+
+ with tm.assertRaises(TypeError):
+ dti + dti_tz
+
+ def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
@@ -439,9 +453,11 @@ def test_sub_isub(self):
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
- result_union = rng.difference(other)
+ result_diff = rng.difference(other)
+ tm.assert_index_equal(result_diff, expected)
- tm.assert_index_equal(result_union, expected)
+ def test_sub_isub(self):
+ for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
@@ -449,9 +465,10 @@ def test_sub_isub(self):
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
- result = rng - delta
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
+
+ result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
@@ -466,6 +483,47 @@ def test_sub_isub(self):
rng -= 1
tm.assert_index_equal(rng, expected)
+ def test_sub_dti_dti(self):
+ # previously performed setop (deprecated in 0.16.0), now changed to
+ # return subtraction -> TimeDeltaIndex (GH ...)
+
+ dti = date_range('20130101', periods=3)
+ dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
+ dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
+ expected = TimedeltaIndex([0, 0, 0])
+
+ result = dti - dti
+ tm.assert_index_equal(result, expected)
+
+ result = dti_tz - dti_tz
+ tm.assert_index_equal(result, expected)
+
+ with tm.assertRaises(TypeError):
+ dti_tz - dti
+
+ with tm.assertRaises(TypeError):
+ dti - dti_tz
+
+ with tm.assertRaises(TypeError):
+ dti_tz - dti_tz2
+
+ # isub
+ dti -= dti
+ tm.assert_index_equal(dti, expected)
+
+ # different length raises ValueError
+ dti1 = date_range('20130101', periods=3)
+ dti2 = date_range('20130101', periods=4)
+ with tm.assertRaises(ValueError):
+ dti1 - dti2
+
+ # NaN propagation
+ dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
+ dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
+ expected = TimedeltaIndex(['1 days', np.nan, np.nan])
+ result = dti2 - dti1
+ tm.assert_index_equal(result, expected)
+
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
@@ -1239,50 +1297,6 @@ def _check(result, expected):
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
- def test_dti_dti_deprecated_ops(self):
-
- # deprecated in 0.16.0 (GH9094)
- # change to return subtraction -> TimeDeltaIndex in 0.17.0
- # shoudl move to the appropriate sections above
-
- dti = date_range('20130101', periods=3)
- dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti - dti
- expected = Index([])
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti + dti
- expected = dti
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti_tz - dti_tz
- expected = Index([])
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti_tz + dti_tz
- expected = dti_tz
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti_tz - dti
- expected = dti_tz
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result = dti - dti_tz
- expected = dti
- tm.assert_index_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- self.assertRaises(TypeError, lambda: dti_tz + dti)
- with tm.assert_produces_warning(FutureWarning):
- self.assertRaises(TypeError, lambda: dti + dti_tz)
-
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
@@ -2005,7 +2019,7 @@ def test_resolution(self):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
- def test_add_iadd(self):
+ def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
@@ -2031,7 +2045,8 @@ def test_add_iadd(self):
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
- '2000-01-01 09:08'], freq='T')
+ '2000-01-01 09:08'],
+ freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
@@ -2052,20 +2067,19 @@ def test_add_iadd(self):
expected6),
(rng7, other7, expected7)]:
- # GH9094
- with tm.assert_produces_warning(FutureWarning):
- result_add = rng + other
-
result_union = rng.union(other)
-
- tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
- # GH 6527
- # GH9094
- with tm.assert_produces_warning(FutureWarning):
- rng += other
- tm.assert_index_equal(rng, expected)
+ def test_add_iadd(self):
+ rng = pd.period_range('1/1/2000', freq='D', periods=5)
+ other = pd.period_range('1/6/2000', freq='D', periods=5)
+
+ # previously performed setop union, now raises TypeError (GH14164)
+ with tm.assertRaises(TypeError):
+ rng + other
+
+ with tm.assertRaises(TypeError):
+ rng += other
# offset
# DateOffset
@@ -2152,7 +2166,7 @@ def test_add_iadd(self):
rng += 1
tm.assert_index_equal(rng, expected)
- def test_sub_isub(self):
+ def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
@@ -2194,6 +2208,19 @@ def test_sub_isub(self):
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
+ def test_sub_isub(self):
+
+ # previously performed setop, now raises TypeError (GH14164)
+ # TODO needs to wait on #13077 for decision on result type
+ rng = pd.period_range('1/1/2000', freq='D', periods=5)
+ other = pd.period_range('1/6/2000', freq='D', periods=5)
+
+ with tm.assertRaises(TypeError):
+ rng - other
+
+ with tm.assertRaises(TypeError):
+ rng -= other
+
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
| xref #13777, deprecations put in place in #9630
| https://api.github.com/repos/pandas-dev/pandas/pulls/14164 | 2016-09-06T15:53:21Z | 2016-09-07T13:11:04Z | 2016-09-07T13:11:04Z | 2016-09-07T13:24:30Z |
ERR: improves message raised for bad names in usecols (GH14154) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 1c12a145caf72..dd8565670a6bf 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -471,6 +471,7 @@ Other enhancements
- :meth:`~DataFrame.to_html` now has a ``border`` argument to control the value in the opening ``<table>`` tag. The default is the value of the ``html.border`` option, which defaults to 1. This also affects the notebook HTML repr, but since Jupyter's CSS includes a border-width attribute, the visual effect is the same. (:issue:`11563`).
- Raise ``ImportError`` in the sql functions when ``sqlalchemy`` is not installed and a connection string is used (:issue:`11920`).
- Compatibility with matplotlib 2.0. Older versions of pandas should also work with matplotlib 2.0 (:issue:`13333`)
+- When using the ``usecols`` argument in the ``read`` functions, specifying a column name that isn't found now generates a more helpful error message (:issue:`14154`)
.. _whatsnew_0190.api:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3bd8579d456d3..24b2a60afd90f 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -981,8 +981,7 @@ def _validate_usecols_arg(usecols):
if usecols is not None:
usecols_dtype = lib.infer_dtype(usecols)
- if usecols_dtype not in ('empty', 'integer',
- 'string', 'unicode'):
+ if usecols_dtype not in ('empty', 'integer', 'string', 'unicode'):
raise ValueError(msg)
return set(usecols)
@@ -1424,7 +1423,13 @@ def __init__(self, src, **kwds):
if (i in self.usecols or n in self.usecols)]
if len(self.names) < len(self.usecols):
- raise ValueError("Usecols do not match names.")
+ bad_cols = [n for n in self.usecols if n not in self.names]
+ if len(bad_cols) > 0:
+ raise ValueError(("%s specified in usecols but not found "
+ "in names.") % bad_cols)
+ else:
+ raise ValueError(("Number of usecols is greater than "
+ "number of names."))
self._set_noconvert_columns()
@@ -2185,16 +2190,21 @@ def _handle_usecols(self, columns, usecols_key):
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
- if any([isinstance(u, string_types) for u in self.usecols]):
+ if any([isinstance(c, string_types) for c in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
+ bad_cols = [n for n in self.usecols if n not in usecols_key]
+ if len(bad_cols) > 0:
+ raise ValueError(("%s specified in usecols but not found "
+ "in names.") % bad_cols)
+
col_indices = []
- for u in self.usecols:
- if isinstance(u, string_types):
- col_indices.append(usecols_key.index(u))
+ for c in self.usecols:
+ if isinstance(c, string_types):
+ col_indices.append(usecols_key.index(c))
else:
- col_indices.append(u)
+ col_indices.append(c)
else:
col_indices = self.usecols
diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py
index 16a19c50be960..efd23c8b3be54 100644
--- a/pandas/io/tests/parser/usecols.py
+++ b/pandas/io/tests/parser/usecols.py
@@ -83,6 +83,8 @@ def test_usecols(self):
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
+ self.assertRaises(ValueError, self.read_csv, StringIO(data),
+ names=['a', 'b'], usecols=['A'], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
| - [X] partial fix for #14154
- [X] tests added / passed
- [X] passes `git diff upstream/master | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14163 | 2016-09-06T13:59:11Z | 2017-02-27T16:04:20Z | null | 2023-05-11T01:14:05Z |
TST: Make encoded sep check more locale sensitive | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3bd8579d456d3..93c431531355a 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -800,17 +800,22 @@ def _clean_options(self, options, engine):
" different from '\s+' are"\
" interpreted as regex)"
engine = 'python'
-
- elif len(sep.encode(encoding)) > 1:
- if engine not in ('python', 'python-fwf'):
- fallback_reason = "the separator encoded in {encoding}"\
- " is > 1 char long, and the 'c' engine"\
- " does not support such separators".format(
- encoding=encoding)
- engine = 'python'
elif delim_whitespace:
if 'python' in engine:
result['delimiter'] = '\s+'
+ elif sep is not None:
+ encodeable = True
+ try:
+ if len(sep.encode(encoding)) > 1:
+ encodeable = False
+ except UnicodeDecodeError:
+ encodeable = False
+ if not encodeable and engine not in ('python', 'python-fwf'):
+ fallback_reason = "the separator encoded in {encoding}" \
+ " is > 1 char long, and the 'c' engine" \
+ " does not support such separators".format(
+ encoding=encoding)
+ engine = 'python'
if fallback_reason and engine_specified:
raise ValueError(fallback_reason)
diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py
index 0bfb8b17349cf..ef8f7967193ff 100644
--- a/pandas/io/tests/parser/test_unsupported.py
+++ b/pandas/io/tests/parser/test_unsupported.py
@@ -60,10 +60,6 @@ def test_c_engine(self):
sep=None, delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, msg):
read_table(StringIO(data), engine='c', sep='\s')
-
- # GH 14120, skipping as failing when locale is set
- # with tm.assertRaisesRegexp(ValueError, msg):
- # read_table(StringIO(data), engine='c', sep='§')
with tm.assertRaisesRegexp(ValueError, msg):
read_table(StringIO(data), engine='c', skipfooter=1)
| Follow-up to #14120 to make the `sep` check more locale sensitive. Closes #14140.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14161 | 2016-09-06T02:08:59Z | 2016-09-08T22:12:53Z | 2016-09-08T22:12:53Z | 2016-09-09T02:03:05Z |
BUG : assignment of non-ns timedelta64 value (GH14155) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 7f471904acf30..a391691a7550a 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1478,3 +1478,4 @@ Bug Fixes
- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
+- Bug in ```Series``` of ```np.timedelta64``` when setting a slice using an indexer (:issue:`14155`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index bb2d1a9d1b5d3..f2df949a90585 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1696,7 +1696,7 @@ def _try_coerce_args(self, values, other):
other = other.value
elif isinstance(other, np.timedelta64):
other_mask = isnull(other)
- other = other.view('i8')
+ other = Timedelta(other).value
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.ndarray):
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 64ebaa63cc10f..abeede16891ab 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1324,6 +1324,13 @@ def test_timedelta_assignment(self):
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
+ # GH 14155
+ s = Series(10 * [np.timedelta64(10, 'm')])
+ s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
+ expected = pd.Series(10 * [np.timedelta64(10, 'm')])
+ expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
+ tm.assert_series_equal(s, expected)
+
def test_underlying_data_conversion(self):
# GH 4080
| - [x] closes #14155
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14160 | 2016-09-06T01:51:49Z | 2016-09-08T10:22:35Z | null | 2016-09-08T10:22:40Z |
WIP/ENH: reading older Stata versions, #11526 | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 4aee6f72b1d53..e2e8ddcc47271 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -33,8 +33,6 @@ Other enhancements
-
-
.. _whatsnew_0200.api_breaking:
Backwards incompatible API changes
@@ -82,3 +80,5 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+`read_stata` can now handle some format 111 files, which are produced
+by SAS when generating Stata dta files (:issue:11526, :issue:14159)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 25f13048a73fd..985ea9c051505 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -34,8 +34,8 @@
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
- "113 (Stata 8/9), 114 (Stata 10/11), 115 (Stata 12), "
- "117 (Stata 13), or 118 (Stata 14)")
+ "111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
+ "115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
@@ -1183,7 +1183,7 @@ def _get_seek_variable_labels(self):
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
- if self.format_version not in [104, 105, 108, 113, 114, 115]:
+ if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
diff --git a/pandas/io/tests/data/stata7_111.dta b/pandas/io/tests/data/stata7_111.dta
new file mode 100644
index 0000000000000..e87fa3a72ff8e
Binary files /dev/null and b/pandas/io/tests/data/stata7_111.dta differ
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 7752fff5247c0..1849b32a4a7c8 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -82,6 +82,8 @@ def setUp(self):
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
+ self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
+
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
@@ -1219,6 +1221,20 @@ def test_repeated_column_labels(self):
read_stata(self.dta23, convert_categoricals=True)
tm.assertTrue('wolof' in cm.exception)
+ def test_stata_111(self):
+ # 111 is an old version but still used by current versions of
+ # SAS when exporting to Stata format. We do not know of any
+ # on-line documentation for this version.
+ df = read_stata(self.dta24_111)
+ original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
+ 'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
+ 'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
+ 'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h',
+ 'i', 'j']})
+ original = original[['y', 'x', 'w', 'z']]
+ tm.assert_frame_equal(original, df)
+
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| A simple file seems to read OK if we just treat it like other "old format" (version 104-115) files. However I do not currently have any documentation for this format. The only relevant resource I can find is the file `foreign/src/stataread.c` in the R `foreign` package sources:
https://cran.r-project.org/src/contrib/foreign_0.8-66.tar.gz
It suggests some special handling for version 111 but I haven't had time to figure out exactly what it is doing.
- [x] closes #11526
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14159 | 2016-09-05T19:43:57Z | 2016-09-11T13:55:39Z | null | 2016-09-11T13:55:44Z |
Fix typo (change 'n' to 'k' in get_dummies documentation) | diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 4dec8b4106126..fa5d16bd85e98 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -984,7 +984,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
.. versionadded:: 0.16.1
drop_first : bool, default False
- Whether to get k-1 dummies out of n categorical levels by removing the
+ Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
| Just changing an `n` to a `k`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14153 | 2016-09-05T12:19:02Z | 2016-09-05T14:16:31Z | 2016-09-05T14:16:31Z | 2016-09-05T14:16:35Z |
BUG/TST: Empty input arrays in cartesian_product and MultiIndex (#12258) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 454ffc5e5c685..67beb468dce8a 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1471,6 +1471,7 @@ Bug Fixes
- Bug in ``MultiIndex`` slicing where extra elements were returned when level is non-unique (:issue:`12896`)
- Bug in ``.str.replace`` does not raise ``TypeError`` for invalid replacement (:issue:`13438`)
- Bug in ``MultiIndex.from_arrays`` which didn't check for input array lengths matching (:issue:`13599`)
+- Bug in ``cartesian_product`` and ``MultiIndex.from_product`` which may raise with empty input arrays (:issue:`12258`)
- Bug in ``pd.read_csv()`` which may cause a segfault or corruption when iterating in large chunks over a stream/file under rare circumstances (:issue:`13703`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 0a13c8936eeec..6b37a5e2cd202 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1979,13 +1979,16 @@ def _factorize_from_iterable(values):
Returns
-------
- codes : np.array
+ codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.indexes.category import CategoricalIndex
+ if not is_list_like(values):
+ raise TypeError("Input must be list-like")
+
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
@@ -2003,8 +2006,23 @@ def _factorize_from_iterable(values):
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
- See `_factorize_from_iterable` for more info.
*This is an internal function*
+
+ Parameters
+ ----------
+ iterables : list-like of list-likes
+
+ Returns
+ -------
+ codes_tuple : tuple of ndarrays
+ categories_tuple : tuple of Indexes
+
+ Notes
+ -----
+ See `_factorize_from_iterable` for more info.
"""
+ if len(iterables) == 0:
+ # For consistency, it should return a list of 2 tuples.
+ return [(), ()]
return lzip(*[_factorize_from_iterable(it) for it in iterables])
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 5248f0775d22f..92061eab61b78 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -691,6 +691,32 @@ def test_from_arrays_index_series_categorical(self):
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
+ def test_from_arrays_empty(self):
+ # 0 levels
+ with tm.assertRaisesRegexp(
+ ValueError, "Must pass non-zero number of levels/labels"):
+ MultiIndex.from_arrays(arrays=[])
+
+ # 1 level
+ result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
+ expected = Index([], name='A')
+ tm.assert_index_equal(result, expected)
+
+ # N levels
+ for N in [2, 3]:
+ arrays = [[]] * N
+ names = list('ABC')[:N]
+ result = MultiIndex.from_arrays(arrays=arrays, names=names)
+ expected = MultiIndex(levels=[np.array([])] * N, labels=[[]] * N,
+ names=names)
+ tm.assert_index_equal(result, expected)
+
+ def test_from_arrays_invalid_input(self):
+ invalid_inputs = [1, [1], [1, 2], [[1], 2],
+ 'a', ['a'], ['a', 'b'], [['a'], 'b']]
+ for i in invalid_inputs:
+ tm.assertRaises(TypeError, MultiIndex.from_arrays, arrays=i)
+
def test_from_arrays_different_lengths(self):
# GH13599
idx1 = [1, 2, 3]
@@ -723,6 +749,43 @@ def test_from_product(self):
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, names)
+ def test_from_product_empty(self):
+ # 0 levels
+ with tm.assertRaisesRegexp(
+ ValueError, "Must pass non-zero number of levels/labels"):
+ MultiIndex.from_product([])
+
+ # 1 level
+ result = MultiIndex.from_product([[]], names=['A'])
+ expected = pd.Float64Index([], name='A')
+ tm.assert_index_equal(result, expected)
+
+ # 2 levels
+ l1 = [[], ['foo', 'bar', 'baz'], []]
+ l2 = [[], [], ['a', 'b', 'c']]
+ names = ['A', 'B']
+ for first, second in zip(l1, l2):
+ result = MultiIndex.from_product([first, second], names=names)
+ expected = MultiIndex(levels=[np.array(first), np.array(second)],
+ labels=[[], []], names=names)
+ tm.assert_index_equal(result, expected)
+
+ # GH12258
+ names = ['A', 'B', 'C']
+ for N in range(4):
+ lvl2 = lrange(N)
+ result = MultiIndex.from_product([[], lvl2, []], names=names)
+ expected = MultiIndex(levels=[np.array(A)
+ for A in [[], lvl2, []]],
+ labels=[[], [], []], names=names)
+ tm.assert_index_equal(result, expected)
+
+ def test_from_product_invalid_input(self):
+ invalid_inputs = [1, [1], [1, 2], [[1], 2],
+ 'a', ['a'], ['a', 'b'], [['a'], 'b']]
+ for i in invalid_inputs:
+ tm.assertRaises(TypeError, MultiIndex.from_product, iterables=i)
+
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py
index d8a98bbb3fd27..8c16308d79a31 100644
--- a/pandas/tools/tests/test_util.py
+++ b/pandas/tools/tests/test_util.py
@@ -34,6 +34,29 @@ def test_datetimeindex(self):
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
+ def test_empty(self):
+ # product of empty factors
+ X = [[], [0, 1], []]
+ Y = [[], [], ['a', 'b', 'c']]
+ for x, y in zip(X, Y):
+ expected1 = np.array([], dtype=np.asarray(x).dtype)
+ expected2 = np.array([], dtype=np.asarray(y).dtype)
+ result1, result2 = cartesian_product([x, y])
+ tm.assert_numpy_array_equal(result1, expected1)
+ tm.assert_numpy_array_equal(result2, expected2)
+
+ # empty product (empty input):
+ result = cartesian_product([])
+ expected = []
+ tm.assert_equal(result, expected)
+
+ def test_invalid_input(self):
+ invalid_inputs = [1, [1], [1, 2], [[1], 2],
+ 'a', ['a'], ['a', 'b'], [['a'], 'b']]
+ msg = "Input must be a list-like of list-likes"
+ for X in invalid_inputs:
+ tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X)
+
class TestLocaleUtils(tm.TestCase):
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index b8b28663387cc..fec56328c1721 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -4,6 +4,7 @@
from pandas.types.common import (is_number,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
+ is_list_like,
_ensure_object)
from pandas.types.cast import _possibly_downcast_to_dtype
@@ -24,13 +25,35 @@ def cartesian_product(X):
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
+ Parameters
+ ----------
+ X : list-like of list-likes
+
+ Returns
+ -------
+ product : list of ndarrays
+
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
+ See also
+ --------
+ itertools.product : Cartesian product of input iterables. Equivalent to
+ nested for-loops.
+ pandas.compat.product : An alias for itertools.product.
"""
+ msg = "Input must be a list-like of list-likes"
+ if not is_list_like(X):
+ raise TypeError(msg)
+ for x in X:
+ if not is_list_like(x):
+ raise TypeError(msg)
+
+ if len(X) == 0:
+ return []
lenX = np.fromiter((len(x) for x in X), dtype=int)
cumprodX = np.cumproduct(lenX)
@@ -38,7 +61,11 @@ def cartesian_product(X):
a = np.roll(cumprodX, 1)
a[0] = 1
- b = cumprodX[-1] / cumprodX
+ if cumprodX[-1] != 0:
+ b = cumprodX[-1] / cumprodX
+ else:
+ # if any factor is empty, the cartesian product is empty
+ b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
| - [x] closes #12258
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
This commit:
1. fixes logic (and division by 0) in `cartesian_product` when some input arrays are empty
2. adds tests for MultiIndex empty level construction with `.from_arrays` and `.from_product`
| https://api.github.com/repos/pandas-dev/pandas/pulls/14151 | 2016-09-05T04:46:36Z | 2016-09-27T10:36:44Z | null | 2016-10-08T02:53:24Z |
MAINT: flake8 *.pyx files | diff --git a/ci/lint.sh b/ci/lint.sh
index 61d74ae28377e..a866b04445f96 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -20,15 +20,7 @@ if [ "$LINT" ]; then
echo "Linting *.py DONE"
echo "Linting *.pyx"
- for path in 'window.pyx' "src/join.pyx"
- do
- echo "linting -> pandas/$path"
- flake8 pandas/$path --filename '*.pyx' --select=E501,E302,E203,E226,E111,E114,E221,E303,E128,E231,E126
- if [ $? -ne "0" ]; then
- RET=1
- fi
-
- done
+ flake8 pandas --filename '*.pyx' --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126
echo "Linting *.pyx DONE"
echo "Linting *.pxi.in"
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index d3e68ad2a5eee..de5c5fc661d4d 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -59,11 +59,11 @@ cdef:
int TIEBREAK_DENSE = 5
tiebreakers = {
- 'average' : TIEBREAK_AVERAGE,
- 'min' : TIEBREAK_MIN,
- 'max' : TIEBREAK_MAX,
- 'first' : TIEBREAK_FIRST,
- 'dense' : TIEBREAK_DENSE,
+ 'average': TIEBREAK_AVERAGE,
+ 'min': TIEBREAK_MIN,
+ 'max': TIEBREAK_MAX,
+ 'first': TIEBREAK_FIRST,
+ 'dense': TIEBREAK_DENSE,
}
@@ -489,7 +489,6 @@ def rank_1d_generic(object in_arr, bint retry=1, ties_method='average',
bint keep_na = 0
float count = 0.0
-
tiebreak = tiebreakers[ties_method]
keep_na = na_option == 'keep'
@@ -578,6 +577,7 @@ class Infinity(object):
__gt__ = lambda self, other: self is not other
__ge__ = lambda self, other: True
+
class NegInfinity(object):
""" provide a negative Infinity comparision method for ranking """
@@ -705,7 +705,6 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average',
# return result
-
cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil except -1:
cdef numeric t
@@ -747,11 +746,11 @@ cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k):
cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n):
cdef:
- Py_ssize_t i,j,l,m
+ Py_ssize_t i, j, l, m
double_t x, t
l = 0
- m = n-1
+ m = n -1
while (l<m):
x = a[k]
i = l
@@ -793,13 +792,13 @@ cpdef numeric median(numeric[:] arr):
def max_subseq(ndarray[double_t] arr):
cdef:
- Py_ssize_t i=0,s=0,e=0,T,n
+ Py_ssize_t i=0, s=0, e=0, T, n
double m, S
n = len(arr)
if len(arr) == 0:
- return (-1,-1,None)
+ return (-1, -1, None)
m = arr[0]
S = m
@@ -819,6 +818,7 @@ def max_subseq(ndarray[double_t] arr):
return (s, e, m)
+
def min_subseq(ndarray[double_t] arr):
cdef:
Py_ssize_t s, e
@@ -831,6 +831,7 @@ def min_subseq(ndarray[double_t] arr):
#----------------------------------------------------------------------
# Pairwise correlation/covariance
+
@cython.boundscheck(False)
@cython.wraparound(False)
def nancorr(ndarray[float64_t, ndim=2] mat, cov=False, minp=None):
@@ -890,6 +891,7 @@ def nancorr(ndarray[float64_t, ndim=2] mat, cov=False, minp=None):
#----------------------------------------------------------------------
# Pairwise Spearman correlation
+
@cython.boundscheck(False)
@cython.wraparound(False)
def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
@@ -953,6 +955,7 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
#----------------------------------------------------------------------
# group operations
+
@cython.wraparound(False)
@cython.boundscheck(False)
def is_lexsorted(list list_of_arrays):
@@ -967,16 +970,14 @@ def is_lexsorted(list list_of_arrays):
cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*))
for i from 0 <= i < nlevels:
- # vecs[i] = <int64_t *> (<ndarray> list_of_arrays[i]).data
-
arr = list_of_arrays[i]
- vecs[i] = <int64_t *> arr.data
- # assume uniqueness??
+ vecs[i] = <int64_t*> arr.data
+ # Assume uniqueness??
for i from 1 <= i < n:
for k from 0 <= k < nlevels:
cur = vecs[k][i]
- pre = vecs[k][i-1]
+ pre = vecs[k][i -1]
if cur == pre:
continue
elif cur > pre:
@@ -988,7 +989,8 @@ def is_lexsorted(list list_of_arrays):
@cython.boundscheck(False)
-def groupby_indices(dict ids, ndarray[int64_t] labels, ndarray[int64_t] counts):
+def groupby_indices(dict ids, ndarray[int64_t] labels,
+ ndarray[int64_t] counts):
"""
turn group_labels output into a combined indexer maping the labels to
indexers
@@ -1020,7 +1022,7 @@ def groupby_indices(dict ids, ndarray[int64_t] labels, ndarray[int64_t] counts):
for i from 0 <= i < len(counts):
arr = np.empty(counts[i], dtype=np.int64)
result[ids[i]] = arr
- vecs[i] = <int64_t *> arr.data
+ vecs[i] = <int64_t*> arr.data
for i from 0 <= i < n:
k = labels[i]
@@ -1036,6 +1038,7 @@ def groupby_indices(dict ids, ndarray[int64_t] labels, ndarray[int64_t] counts):
free(vecs)
return result
+
@cython.wraparound(False)
@cython.boundscheck(False)
def group_labels(ndarray[object] values):
@@ -1116,6 +1119,7 @@ def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
#----------------------------------------------------------------------
# first, nth, last
+
@cython.boundscheck(False)
@cython.wraparound(False)
def group_nth_object(ndarray[object, ndim=2] out,
@@ -1160,6 +1164,7 @@ def group_nth_object(ndarray[object, ndim=2] out,
else:
out[i, j] = resx[i, j]
+
@cython.boundscheck(False)
@cython.wraparound(False)
def group_nth_bin_object(ndarray[object, ndim=2] out,
@@ -1210,6 +1215,7 @@ def group_nth_bin_object(ndarray[object, ndim=2] out,
else:
out[i, j] = resx[i, j]
+
@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_object(ndarray[object, ndim=2] out,
@@ -1252,6 +1258,7 @@ def group_last_object(ndarray[object, ndim=2] out,
else:
out[i, j] = resx[i, j]
+
@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_bin_object(ndarray[object, ndim=2] out,
@@ -1326,7 +1333,6 @@ cdef inline float64_t _median_linear(float64_t* a, int n):
a = tmp
n -= na_count
-
if n % 2:
result = kth_smallest_c( a, n / 2, n)
else:
diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx
index af694c276b5b7..3bda3f49cb054 100644
--- a/pandas/hashtable.pyx
+++ b/pandas/hashtable.pyx
@@ -192,7 +192,7 @@ def mode_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask):
kh_destroy_pymap(table)
- return modes[:j+1]
+ return modes[:j + 1]
@cython.wraparound(False)
@@ -227,7 +227,7 @@ def mode_int64(int64_t[:] values):
kh_destroy_int64(table)
- return modes[:j+1]
+ return modes[:j + 1]
@cython.wraparound(False)
diff --git a/pandas/index.pyx b/pandas/index.pyx
index bc985100692fc..2935560a05b6b 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -54,7 +54,8 @@ cdef inline is_definitely_invalid_key(object val):
# we have a _data, means we are a NDFrame
return (PySlice_Check(val) or cnp.PyArray_Check(val)
- or PyList_Check(val) or hasattr(val,'_data'))
+ or PyList_Check(val) or hasattr(val, '_data'))
+
def get_value_at(ndarray arr, object loc):
if arr.descr.type_num == NPY_DATETIME:
@@ -63,6 +64,7 @@ def get_value_at(ndarray arr, object loc):
return Timedelta(util.get_value_at(arr, loc))
return util.get_value_at(arr, loc)
+
def set_value_at(ndarray arr, object loc, object val):
return util.set_value_at(arr, loc, val)
@@ -302,7 +304,7 @@ cdef class IndexEngine:
else:
n_alloc = n
- result = np.empty(n_alloc, dtype=np.int64)
+ result = np.empty(n_alloc, dtype=np.int64)
missing = np.empty(n_t, dtype=np.int64)
# form the set of the results (like ismember)
@@ -311,7 +313,7 @@ cdef class IndexEngine:
val = util.get_value_1d(values, i)
if val in stargets:
if val not in d:
- d[val] = []
+ d[val] = []
d[val].append(i)
for i in range(n_t):
@@ -322,20 +324,20 @@ cdef class IndexEngine:
if val in d:
for j in d[val]:
- # realloc if needed
- if count >= n_alloc:
- n_alloc += 10000
- result = np.resize(result, n_alloc)
+ # realloc if needed
+ if count >= n_alloc:
+ n_alloc += 10000
+ result = np.resize(result, n_alloc)
- result[count] = j
- count += 1
+ result[count] = j
+ count += 1
# value not found
else:
if count >= n_alloc:
- n_alloc += 10000
- result = np.resize(result, n_alloc)
+ n_alloc += 10000
+ result = np.resize(result, n_alloc)
result[count] = -1
count += 1
missing[count_missing] = i
@@ -479,9 +481,9 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1:
return mid + 1
_pad_functions = {
- 'object' : algos.pad_object,
- 'int64' : algos.pad_int64,
- 'float64' : algos.pad_float64
+ 'object': algos.pad_object,
+ 'int64': algos.pad_int64,
+ 'float64': algos.pad_float64
}
_backfill_functions = {
@@ -606,7 +608,7 @@ cdef class TimedeltaEngine(DatetimeEngine):
cpdef convert_scalar(ndarray arr, object value):
if arr.descr.type_num == NPY_DATETIME:
- if isinstance(value,np.ndarray):
+ if isinstance(value, np.ndarray):
pass
elif isinstance(value, Timestamp):
return value.value
@@ -615,7 +617,7 @@ cpdef convert_scalar(ndarray arr, object value):
else:
return Timestamp(value).value
elif arr.descr.type_num == NPY_TIMEDELTA:
- if isinstance(value,np.ndarray):
+ if isinstance(value, np.ndarray):
pass
elif isinstance(value, Timedelta):
return value.value
@@ -639,7 +641,8 @@ cdef inline _to_i8(object val):
return get_datetime64_value(val)
elif PyDateTime_Check(val):
tzinfo = getattr(val, 'tzinfo', None)
- ival = _pydatetime_to_dts(val, &dts) # Save the original date value so we can get the utcoffset from it.
+ # Save the original date value so we can get the utcoffset from it.
+ ival = _pydatetime_to_dts(val, &dts)
if tzinfo is not None and not _is_utc(tzinfo):
offset = tslib._get_utcoffset(tzinfo, val)
ival -= tslib._delta_to_nanoseconds(offset)
diff --git a/pandas/io/sas/saslib.pyx b/pandas/io/sas/saslib.pyx
index ac73ae37ca70e..a66d62ea41581 100644
--- a/pandas/io/sas/saslib.pyx
+++ b/pandas/io/sas/saslib.pyx
@@ -10,12 +10,14 @@ import sas_constants as const
# algorithm. It is partially documented here:
#
# https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
-cdef np.ndarray[uint8_t, ndim=1] rle_decompress(int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
+cdef np.ndarray[uint8_t, ndim=1] rle_decompress(
+ int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
cdef:
uint8_t control_byte, x
uint8_t [:] result = np.zeros(result_length, np.uint8)
- int rpos = 0, ipos = 0, i, nbytes, end_of_first_byte, length = len(inbuff)
+ int rpos = 0, ipos = 0, length = len(inbuff)
+ int i, nbytes, end_of_first_byte
while ipos < length:
control_byte = inbuff[ipos] & 0xF0
@@ -41,13 +43,13 @@ cdef np.ndarray[uint8_t, ndim=1] rle_decompress(int result_length, np.ndarray[ui
rpos += 1
ipos += 1
elif control_byte == 0x60:
- nbytes = end_of_first_byte*256 + <int>(inbuff[ipos]) + 17
+ nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
ipos += 1
for i in range(nbytes):
result[rpos] = 0x20
rpos += 1
elif control_byte == 0x70:
- nbytes = end_of_first_byte*256 + <int>(inbuff[ipos]) + 17
+ nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
ipos += 1
for i in range(nbytes):
result[rpos] = 0x00
@@ -109,8 +111,9 @@ cdef np.ndarray[uint8_t, ndim=1] rle_decompress(int result_length, np.ndarray[ui
# rdc_decompress decompresses data using the Ross Data Compression algorithm:
#
-# http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
-cdef np.ndarray[uint8_t, ndim=1] rdc_decompress(int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
+# http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
+cdef np.ndarray[uint8_t, ndim=1] rdc_decompress(
+ int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
cdef:
uint8_t cmd
@@ -124,7 +127,8 @@ cdef np.ndarray[uint8_t, ndim=1] rdc_decompress(int result_length, np.ndarray[ui
ii += 1
ctrl_mask = ctrl_mask >> 1
if ctrl_mask == 0:
- ctrl_bits = (<uint16_t>inbuff[ipos] << 8) + <uint16_t>inbuff[ipos + 1]
+ ctrl_bits = ((<uint16_t>inbuff[ipos] << 8) +
+ <uint16_t>inbuff[ipos + 1])
ipos += 2
ctrl_mask = 0x8000
@@ -219,7 +223,8 @@ cdef class Parser(object):
int subheader_pointer_length
int current_page_type
bint is_little_endian
- np.ndarray[uint8_t, ndim=1] (*decompress)(int result_length, np.ndarray[uint8_t, ndim=1] inbuff)
+ np.ndarray[uint8_t, ndim=1] (*decompress)(
+ int result_length, np.ndarray[uint8_t, ndim=1] inbuff)
object parser
def __init__(self, object parser):
@@ -252,7 +257,8 @@ cdef class Parser(object):
elif column_types[j] == b's':
self.column_types[j] = column_type_string
else:
- raise ValueError("unknown column type: %s" % self.parser.columns[j].ctype)
+ raise ValueError("unknown column type: "
+ "%s" % self.parser.columns[j].ctype)
# compression
if parser.compression == const.rle_compression:
@@ -279,7 +285,8 @@ cdef class Parser(object):
# update the parser
self.parser._current_row_on_page_index = self.current_row_on_page_index
- self.parser._current_row_in_chunk_index = self.current_row_in_chunk_index
+ self.parser._current_row_in_chunk_index =\
+ self.current_row_in_chunk_index
self.parser._current_row_in_file_index = self.current_row_in_file_index
cdef bint read_next_page(self):
@@ -299,13 +306,16 @@ cdef class Parser(object):
self.current_row_on_page_index = 0
self.current_page_type = self.parser._current_page_type
self.current_page_block_count = self.parser._current_page_block_count
- self.current_page_data_subheader_pointers_len = len(self.parser._current_page_data_subheader_pointers)
- self.current_page_subheaders_count = self.parser._current_page_subheaders_count
+ self.current_page_data_subheader_pointers_len = len(
+ self.parser._current_page_data_subheader_pointers)
+ self.current_page_subheaders_count =\
+ self.parser._current_page_subheaders_count
cdef bint readline(self):
cdef:
- int offset, bit_offset, align_correction, subheader_pointer_length, mn
+ int offset, bit_offset, align_correction
+ int subheader_pointer_length, mn
bint done, flag
bit_offset = self.bit_offset
@@ -321,7 +331,8 @@ cdef class Parser(object):
# Loop until a data row is read
while True:
if self.current_page_type == page_meta_type:
- flag = self.current_row_on_page_index >= self.current_page_data_subheader_pointers_len
+ flag = self.current_row_on_page_index >=\
+ self.current_page_data_subheader_pointers_len
if flag:
done = self.read_next_page()
if done:
@@ -330,10 +341,12 @@ cdef class Parser(object):
current_subheader_pointer = (
self.parser._current_page_data_subheader_pointers[
self.current_row_on_page_index])
- self.process_byte_array_with_data(current_subheader_pointer.offset,
- current_subheader_pointer.length)
+ self.process_byte_array_with_data(
+ current_subheader_pointer.offset,
+ current_subheader_pointer.length)
return False
- elif self.current_page_type == page_mix_types_0 or self.current_page_type == page_mix_types_1:
+ elif (self.current_page_type == page_mix_types_0 or
+ self.current_page_type == page_mix_types_1):
align_correction = (bit_offset + subheader_pointers_offset +
self.current_page_subheaders_count *
subheader_pointer_length)
@@ -345,18 +358,18 @@ cdef class Parser(object):
offset += self.current_row_on_page_index * self.row_length
self.process_byte_array_with_data(offset,
self.row_length)
- mn = min(self.parser.row_count, self.parser._mix_page_row_count)
+ mn = min(self.parser.row_count,
+ self.parser._mix_page_row_count)
if self.current_row_on_page_index == mn:
done = self.read_next_page()
if done:
return True
return False
elif self.current_page_type == page_data_type:
- self.process_byte_array_with_data(bit_offset +
- subheader_pointers_offset +
- self.current_row_on_page_index *
- self.row_length,
- self.row_length)
+ self.process_byte_array_with_data(
+ bit_offset + subheader_pointers_offset +
+ self.current_row_on_page_index * self.row_length,
+ self.row_length)
flag = (self.current_row_on_page_index ==
self.current_page_block_count)
if flag:
@@ -371,17 +384,18 @@ cdef class Parser(object):
cdef void process_byte_array_with_data(self, int offset, int length):
cdef:
- Py_ssize_t j
- int s, k, m, jb, js, current_row
- int64_t lngt, start, ct
- np.ndarray[uint8_t, ndim=1] source
- int64_t[:] column_types
- int64_t[:] lengths
- int64_t[:] offsets
- uint8_t[:, :] byte_chunk
- object[:, :] string_chunk
-
- source = np.frombuffer(self.cached_page[offset:offset+length], dtype=np.uint8)
+ Py_ssize_t j
+ int s, k, m, jb, js, current_row
+ int64_t lngt, start, ct
+ np.ndarray[uint8_t, ndim=1] source
+ int64_t[:] column_types
+ int64_t[:] lengths
+ int64_t[:] offsets
+ uint8_t[:, :] byte_chunk
+ object[:, :] string_chunk
+
+ source = np.frombuffer(
+ self.cached_page[offset:offset + length], dtype=np.uint8)
if self.decompress != NULL and (length < self.row_length):
source = self.decompress(self.row_length, source)
@@ -408,11 +422,12 @@ cdef class Parser(object):
else:
m = s
for k in range(lngt):
- byte_chunk[jb, m + k] = source[start + k]
+ byte_chunk[jb, m + k] = source[start + k]
jb += 1
elif column_types[j] == column_type_string:
# string
- string_chunk[js, current_row] = source[start:(start+lngt)].tostring().rstrip()
+ string_chunk[js, current_row] = source[start:(
+ start + lngt)].tostring().rstrip()
js += 1
self.current_row_on_page_index += 1
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 0473ae79adce5..e7672de5c835e 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -84,6 +84,7 @@ PyDateTime_IMPORT
import_array()
import_ufunc()
+
def values_from_object(object o):
""" return my values or the object if we are say an ndarray """
cdef f
@@ -159,6 +160,7 @@ def ismember(ndarray arr, set values):
return result.view(np.bool_)
+
def ismember_int64(ndarray[int64_t] arr, set values):
"""
Checks whether
@@ -184,6 +186,7 @@ def ismember_int64(ndarray[int64_t] arr, set values):
return result.view(np.bool_)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def memory_usage_of_objects(ndarray[object, ndim=1] arr):
@@ -217,12 +220,15 @@ cdef inline int64_t gmtime(object date):
days = pydate(y, m, 1).toordinal() - _EPOCH_ORD + d - 1
return ((<int64_t> (((days * 24 + h) * 60 + mn))) * 60 + s) * 1000
+
cpdef object to_datetime(int64_t timestamp):
return pydatetime.utcfromtimestamp(timestamp / 1000.0)
+
cpdef object to_timestamp(object dt):
return gmtime(dt)
+
def array_to_timestamp(ndarray[object, ndim=1] arr):
cdef int i, n
cdef ndarray[int64_t, ndim=1] result
@@ -235,6 +241,7 @@ def array_to_timestamp(ndarray[object, ndim=1] arr):
return result
+
def time64_to_datetime(ndarray[int64_t, ndim=1] arr):
cdef int i, n
cdef ndarray[object, ndim=1] result
@@ -254,6 +261,7 @@ def time64_to_datetime(ndarray[int64_t, ndim=1] arr):
cdef double INF = <double> np.inf
cdef double NEGINF = -INF
+
cpdef checknull(object val):
if util.is_float_object(val) or util.is_complex_object(val):
return val != val # and val != INF and val != NEGINF
@@ -268,6 +276,7 @@ cpdef checknull(object val):
else:
return _checknull(val)
+
cpdef checknull_old(object val):
if util.is_float_object(val) or util.is_complex_object(val):
return val != val or val == INF or val == NEGINF
@@ -282,18 +291,21 @@ cpdef checknull_old(object val):
else:
return util._checknull(val)
+
cpdef isposinf_scalar(object val):
if util.is_float_object(val) and val == INF:
return True
else:
return False
+
cpdef isneginf_scalar(object val):
if util.is_float_object(val) and val == NEGINF:
return True
else:
return False
+
def isscalar(object val):
"""
Return True if given value is scalar.
@@ -356,6 +368,7 @@ def isnullobj(ndarray arr):
result[i] = _check_all_nulls(val)
return result.view(np.bool_)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def isnullobj_old(ndarray arr):
@@ -372,6 +385,7 @@ def isnullobj_old(ndarray arr):
result[i] = val is NaT or util._checknull_old(val)
return result.view(np.bool_)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def isnullobj2d(ndarray arr):
@@ -390,6 +404,7 @@ def isnullobj2d(ndarray arr):
result[i, j] = 1
return result.view(np.bool_)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def isnullobj2d_old(ndarray arr):
@@ -413,8 +428,8 @@ def isnullobj2d_old(ndarray arr):
@cython.boundscheck(False)
cpdef ndarray[object] list_to_object_array(list obj):
"""
- Convert list to object ndarray. Seriously can\'t believe I had to write this
- function
+ Convert list to object ndarray. Seriously can\'t believe
+ I had to write this function.
"""
cdef:
Py_ssize_t i, n = len(obj)
@@ -447,6 +462,7 @@ def fast_unique(ndarray[object] values):
return uniques
+
@cython.wraparound(False)
@cython.boundscheck(False)
def fast_unique_multiple(list arrays):
@@ -473,6 +489,7 @@ def fast_unique_multiple(list arrays):
return uniques
+
@cython.wraparound(False)
@cython.boundscheck(False)
def fast_unique_multiple_list(list lists):
@@ -499,6 +516,7 @@ def fast_unique_multiple_list(list lists):
return uniques
+
@cython.wraparound(False)
@cython.boundscheck(False)
def fast_unique_multiple_list_gen(object gen, bint sort=True):
@@ -538,6 +556,7 @@ def fast_unique_multiple_list_gen(object gen, bint sort=True):
return uniques
+
@cython.wraparound(False)
@cython.boundscheck(False)
def dicts_to_array(list dicts, list columns):
@@ -563,6 +582,7 @@ def dicts_to_array(list dicts, list columns):
return result
+
def fast_zip(list ndarrays):
"""
For zipping multiple ndarrays into an ndarray of tuples
@@ -604,6 +624,7 @@ def fast_zip(list ndarrays):
return result
+
def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length):
"""
Reverse indexing operation.
@@ -645,6 +666,7 @@ def has_infs_f4(ndarray[float32_t] arr):
return True
return False
+
def has_infs_f8(ndarray[float64_t] arr):
cdef:
Py_ssize_t i, n = len(arr)
@@ -659,6 +681,7 @@ def has_infs_f8(ndarray[float64_t] arr):
return True
return False
+
def convert_timestamps(ndarray values):
cdef:
object val, f, result
@@ -911,6 +934,7 @@ def scalar_binop(ndarray[object] values, object val, object op):
return maybe_convert_bool(result)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def vec_binop(ndarray[object] left, ndarray[object] right, object op):
@@ -948,18 +972,19 @@ def astype_intsafe(ndarray[object] arr, new_dtype):
ndarray result
# on 32-bit, 1.6.2 numpy M8[ns] is a subdtype of integer, which is weird
- is_datelike = new_dtype in ['M8[ns]','m8[ns]']
+ is_datelike = new_dtype in ['M8[ns]', 'm8[ns]']
result = np.empty(n, dtype=new_dtype)
for i in range(n):
v = arr[i]
if is_datelike and checknull(v):
- result[i] = NPY_NAT
+ result[i] = NPY_NAT
else:
- util.set_value_at(result, i, v)
+ util.set_value_at(result, i, v)
return result
+
cpdef ndarray[object] astype_unicode(ndarray arr):
cdef:
Py_ssize_t i, n = arr.size
@@ -970,6 +995,7 @@ cpdef ndarray[object] astype_unicode(ndarray arr):
return result
+
cpdef ndarray[object] astype_str(ndarray arr):
cdef:
Py_ssize_t i, n = arr.size
@@ -980,6 +1006,7 @@ cpdef ndarray[object] astype_str(ndarray arr):
return result
+
def clean_index_list(list obj):
"""
Utility used in pandas.core.index._ensure_index
@@ -992,7 +1019,7 @@ def clean_index_list(list obj):
for i in range(n):
v = obj[i]
- if not (PyList_Check(v) or np.PyArray_Check(v) or hasattr(v,'_data')):
+ if not (PyList_Check(v) or np.PyArray_Check(v) or hasattr(v, '_data')):
all_arrays = 0
break
@@ -1002,7 +1029,7 @@ def clean_index_list(list obj):
converted = np.empty(n, dtype=object)
for i in range(n):
v = obj[i]
- if PyList_Check(v) or np.PyArray_Check(v) or hasattr(v,'_data'):
+ if PyList_Check(v) or np.PyArray_Check(v) or hasattr(v, '_data'):
converted[i] = tuple(v)
else:
converted[i] = v
@@ -1038,10 +1065,16 @@ cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr):
return m
+
@cython.boundscheck(False)
@cython.wraparound(False)
-def string_array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, object replace = None):
- """ replace the values in the array with replacement if they are nan_rep; return the same array """
+def string_array_replace_from_nan_rep(
+ ndarray[object, ndim=1] arr, object nan_rep,
+ object replace=None):
+ """
+ Replace the values in the array with 'replacement' if
+ they are 'nan_rep'. Return the same array.
+ """
cdef int length = arr.shape[0], i = 0
if replace is None:
@@ -1053,9 +1086,11 @@ def string_array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_re
return arr
+
@cython.boundscheck(False)
@cython.wraparound(False)
-def write_csv_rows(list data, ndarray data_index, int nlevels, ndarray cols, object writer):
+def write_csv_rows(list data, ndarray data_index,
+ int nlevels, ndarray cols, object writer):
cdef int N, j, i, ncols
cdef list rows
@@ -1066,7 +1101,7 @@ def write_csv_rows(list data, ndarray data_index, int nlevels, ndarray cols, obj
# pre-allocate rows
ncols = len(cols)
- rows = [[None]*(nlevels+ncols) for x in range(N)]
+ rows = [[None] * (nlevels + ncols) for x in range(N)]
j = -1
if nlevels == 1:
@@ -1074,18 +1109,18 @@ def write_csv_rows(list data, ndarray data_index, int nlevels, ndarray cols, obj
row = rows[j % N]
row[0] = data_index[j]
for i in range(ncols):
- row[1+i] = data[i][j]
+ row[1 + i] = data[i][j]
- if j >= N-1 and j % N == N-1:
+ if j >= N - 1 and j % N == N - 1:
writer.writerows(rows)
elif nlevels > 1:
for j in range(len(data_index)):
row = rows[j % N]
row[:nlevels] = list(data_index[j])
for i in range(ncols):
- row[nlevels+i] = data[i][j]
+ row[nlevels + i] = data[i][j]
- if j >= N-1 and j % N == N-1:
+ if j >= N - 1 and j % N == N - 1:
writer.writerows(rows)
else:
for j in range(len(data_index)):
@@ -1093,15 +1128,15 @@ def write_csv_rows(list data, ndarray data_index, int nlevels, ndarray cols, obj
for i in range(ncols):
row[i] = data[i][j]
- if j >= N-1 and j % N == N-1:
+ if j >= N - 1 and j % N == N - 1:
writer.writerows(rows)
- if j >= 0 and (j < N-1 or (j % N) != N-1 ):
- writer.writerows(rows[:((j+1) % N)])
+ if j >= 0 and (j < N - 1 or (j % N) != N - 1):
+ writer.writerows(rows[:((j + 1) % N)])
-#-------------------------------------------------------------------------------
-# Groupby-related functions
+#------------------------------------------------------------------------------
+# Groupby-related functions
@cython.boundscheck(False)
def arrmap(ndarray[object] index, object func):
cdef int length = index.shape[0]
@@ -1114,6 +1149,7 @@ def arrmap(ndarray[object] index, object func):
return result
+
@cython.wraparound(False)
@cython.boundscheck(False)
def is_lexsorted(list list_of_arrays):
@@ -1128,16 +1164,14 @@ def is_lexsorted(list list_of_arrays):
cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*))
for i from 0 <= i < nlevels:
- # vecs[i] = <int64_t *> (<ndarray> list_of_arrays[i]).data
-
arr = list_of_arrays[i]
vecs[i] = <int64_t *> arr.data
- # assume uniqueness??
+ # Assume uniqueness??
for i from 1 <= i < n:
for k from 0 <= k < nlevels:
cur = vecs[k][i]
- pre = vecs[k][i-1]
+ pre = vecs[k][i - 1]
if cur == pre:
continue
elif cur > pre:
@@ -1148,11 +1182,9 @@ def is_lexsorted(list list_of_arrays):
return True
-
# TODO: could do even better if we know something about the data. eg, index has
# 1-min data, binner has 5-min data, then bins are just strides in index. This
# is a general, O(max(len(values), len(binner))) method.
-
@cython.boundscheck(False)
@cython.wraparound(False)
def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
@@ -1182,18 +1214,18 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
- if values[lenidx-1] > binner[lenbin-1]:
+ if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
- j = 0 # index into values
+ j = 0 # index into values
bc = 0 # bin count
# linear scan
if right_closed:
for i in range(0, lenbin - 1):
- r_bin = binner[i+1]
+ r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and values[j] <= r_bin:
j += 1
@@ -1201,7 +1233,7 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
bc += 1
else:
for i in range(0, lenbin - 1):
- r_bin = binner[i+1]
+ r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and values[j] < r_bin:
j += 1
@@ -1216,8 +1248,6 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner,
return bins
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
def row_bool_subset(ndarray[float64_t, ndim=2] values,
@@ -1239,6 +1269,7 @@ def row_bool_subset(ndarray[float64_t, ndim=2] values,
return out
+
@cython.boundscheck(False)
@cython.wraparound(False)
def row_bool_subset_object(ndarray[object, ndim=2] values,
@@ -1260,6 +1291,7 @@ def row_bool_subset_object(ndarray[object, ndim=2] values,
return out
+
@cython.boundscheck(False)
@cython.wraparound(False)
def get_level_sorter(ndarray[int64_t, ndim=1] label,
@@ -1282,6 +1314,7 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label,
return out
+
def group_count(ndarray[int64_t] values, Py_ssize_t size):
cdef:
Py_ssize_t i, n = len(values)
@@ -1292,6 +1325,7 @@ def group_count(ndarray[int64_t] values, Py_ssize_t size):
counts[values[i]] += 1
return counts
+
def lookup_values(ndarray[object] values, dict mapping):
cdef:
Py_ssize_t i, n = len(values)
@@ -1331,6 +1365,7 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
return counts
+
cdef class _PandasNull:
def __richcmp__(_PandasNull self, object other, int op):
@@ -1346,6 +1381,7 @@ cdef class _PandasNull:
pandas_null = _PandasNull()
+
def fast_zip_fillna(list ndarrays, fill_value=pandas_null):
"""
For zipping multiple ndarrays into an ndarray of tuples
@@ -1445,7 +1481,7 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys,
tup = PyTuple_New(k)
for j in range(k):
val = util.get_value_at(keys[j],
- sorted_labels[j][i-1])
+ sorted_labels[j][i - 1])
PyTuple_SET_ITEM(tup, j, val)
Py_INCREF(val)
@@ -1574,7 +1610,7 @@ cpdef slice indexer_as_slice(int64_t[:] vals):
return None
for i in range(2, n):
- if vals[i] < 0 or vals[i] - vals[i-1] != d:
+ if vals[i] < 0 or vals[i] - vals[i - 1] != d:
return None
start = vals[0]
@@ -1645,12 +1681,13 @@ cpdef slice_get_indices_ex(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX):
if slc is None:
raise TypeError("slc should be a slice")
- PySlice_GetIndicesEx(<PySliceObject*>slc, objlen,
+ PySlice_GetIndicesEx(<PySliceObject *>slc, objlen,
&start, &stop, &step, &length)
return start, stop, step, length
-cpdef Py_ssize_t slice_len(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -1:
+cpdef Py_ssize_t slice_len(
+ slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -1:
"""
Get length of a bounded slice.
@@ -1668,7 +1705,7 @@ cpdef Py_ssize_t slice_len(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -
if slc is None:
raise TypeError("slc must be slice")
- PySlice_GetIndicesEx(<PySliceObject*>slc, objlen,
+ PySlice_GetIndicesEx(<PySliceObject *>slc, objlen,
&start, &stop, &step, &length)
return length
diff --git a/pandas/msgpack/_packer.pyx b/pandas/msgpack/_packer.pyx
index 5004b9e8e7262..008dbe5541d50 100644
--- a/pandas/msgpack/_packer.pyx
+++ b/pandas/msgpack/_packer.pyx
@@ -23,7 +23,8 @@ cdef extern from "../src/msgpack/pack.h":
int msgpack_pack_false(msgpack_packer* pk)
int msgpack_pack_long(msgpack_packer* pk, long d)
int msgpack_pack_long_long(msgpack_packer* pk, long long d)
- int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+ int msgpack_pack_unsigned_long_long(msgpack_packer* pk,
+ unsigned long long d)
int msgpack_pack_float(msgpack_packer* pk, float d)
int msgpack_pack_double(msgpack_packer* pk, double d)
int msgpack_pack_array(msgpack_packer* pk, size_t l)
@@ -58,8 +59,10 @@ cdef class Packer(object):
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
- Reset buffer after each pack and return it's content as `bytes`. (default: True).
- If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+ Reset buffer after each pack and return it's
+ content as `bytes`. (default: True).
+ If set this to false, use `bytes()` to get
+ content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
@@ -74,15 +77,16 @@ cdef class Packer(object):
cdef bint autoreset
def __cinit__(self):
- cdef int buf_size = 1024*1024
- self.pk.buf = <char*> malloc(buf_size);
+ cdef int buf_size = 1024 * 1024
+ self.pk.buf = <char*> malloc(buf_size)
if self.pk.buf == NULL:
raise MemoryError("Unable to allocate internal buffer.")
self.pk.buf_size = buf_size
self.pk.length = 0
- def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
- use_single_float=False, bint autoreset=1, bint use_bin_type=0):
+ def __init__(self, default=None, encoding='utf-8',
+ unicode_errors='strict', use_single_float=False,
+ bint autoreset=1, bint use_bin_type=0):
"""
"""
self.use_float = use_single_float
@@ -110,7 +114,8 @@ cdef class Packer(object):
def __dealloc__(self):
free(self.pk.buf);
- cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ cdef int _pack(self, object o,
+ int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
cdef long long llval
cdef unsigned long long ullval
cdef long longval
@@ -147,14 +152,14 @@ cdef class Packer(object):
ret = msgpack_pack_long(&self.pk, longval)
elif PyFloat_Check(o):
if self.use_float:
- fval = o
- ret = msgpack_pack_float(&self.pk, fval)
+ fval = o
+ ret = msgpack_pack_float(&self.pk, fval)
else:
- dval = o
- ret = msgpack_pack_double(&self.pk, dval)
+ dval = o
+ ret = msgpack_pack_double(&self.pk, dval)
elif PyBytes_Check(o):
L = len(o)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("bytes is too large")
rawval = o
ret = msgpack_pack_bin(&self.pk, L)
@@ -162,10 +167,12 @@ cdef class Packer(object):
ret = msgpack_pack_raw_body(&self.pk, rawval, L)
elif PyUnicode_Check(o):
if not self.encoding:
- raise TypeError("Can't encode unicode string: no encoding is specified")
- o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ raise TypeError("Can't encode unicode string: "
+ "no encoding is specified")
+ o = PyUnicode_AsEncodedString(o, self.encoding,
+ self.unicode_errors)
L = len(o)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("dict is too large")
rawval = o
ret = msgpack_pack_raw(&self.pk, len(o))
@@ -174,43 +181,43 @@ cdef class Packer(object):
elif PyDict_CheckExact(o):
d = <dict>o
L = len(d)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("dict is too large")
ret = msgpack_pack_map(&self.pk, L)
if ret == 0:
for k, v in d.iteritems():
- ret = self._pack(k, nest_limit-1)
+ ret = self._pack(k, nest_limit - 1)
if ret != 0: break
- ret = self._pack(v, nest_limit-1)
+ ret = self._pack(v, nest_limit - 1)
if ret != 0: break
elif PyDict_Check(o):
L = len(o)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("dict is too large")
ret = msgpack_pack_map(&self.pk, L)
if ret == 0:
for k, v in o.items():
- ret = self._pack(k, nest_limit-1)
+ ret = self._pack(k, nest_limit - 1)
if ret != 0: break
- ret = self._pack(v, nest_limit-1)
+ ret = self._pack(v, nest_limit - 1)
if ret != 0: break
elif isinstance(o, ExtType):
# This should be before Tuple because ExtType is namedtuple.
longval = o.code
rawval = o.data
L = len(o.data)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("EXT data is too large")
ret = msgpack_pack_ext(&self.pk, longval, L)
ret = msgpack_pack_raw_body(&self.pk, rawval, L)
elif PyTuple_Check(o) or PyList_Check(o):
L = len(o)
- if L > (2**32)-1:
+ if L > (2**32) - 1:
raise ValueError("list is too large")
ret = msgpack_pack_array(&self.pk, L)
if ret == 0:
for v in o:
- ret = self._pack(v, nest_limit-1)
+ ret = self._pack(v, nest_limit - 1)
if ret != 0: break
elif not default_used and self._default:
o = self._default(o)
@@ -237,7 +244,7 @@ cdef class Packer(object):
msgpack_pack_raw_body(&self.pk, data, len(data))
def pack_array_header(self, size_t size):
- if size > (2**32-1):
+ if size > (2**32) - 1:
raise ValueError
cdef int ret = msgpack_pack_array(&self.pk, size)
if ret == -1:
@@ -250,7 +257,7 @@ cdef class Packer(object):
return buf
def pack_map_header(self, size_t size):
- if size > (2**32-1):
+ if size > (2**32) - 1:
raise ValueError
cdef int ret = msgpack_pack_map(&self.pk, size)
if ret == -1:
diff --git a/pandas/msgpack/_unpacker.pyx b/pandas/msgpack/_unpacker.pyx
index f68bf3369427c..6f23a24adde6c 100644
--- a/pandas/msgpack/_unpacker.pyx
+++ b/pandas/msgpack/_unpacker.pyx
@@ -4,18 +4,15 @@
from cpython cimport *
cdef extern from "Python.h":
ctypedef struct PyObject
- cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1
+ cdef int PyObject_AsReadBuffer(object o, const void** buff,
+ Py_ssize_t* buf_len) except -1
from libc.stdlib cimport *
from libc.string cimport *
from libc.limits cimport *
-from pandas.msgpack.exceptions import (
- BufferFull,
- OutOfData,
- UnpackValueError,
- ExtraData,
- )
+from pandas.msgpack.exceptions import (BufferFull, OutOfData,
+ UnpackValueError, ExtraData)
from pandas.msgpack import ExtType
@@ -65,7 +62,8 @@ cdef inline init_ctx(unpack_context *ctx,
ctx.user.max_ext_len = max_ext_len
if object_hook is not None and object_pairs_hook is not None:
- raise TypeError("object_pairs_hook and object_hook are mutually exclusive.")
+ raise TypeError("object_pairs_hook and object_hook "
+ "are mutually exclusive.")
if object_hook is not None:
if not PyCallable_Check(object_hook):
@@ -93,8 +91,11 @@ cdef inline init_ctx(unpack_context *ctx,
ctx.user.encoding = encoding
ctx.user.unicode_errors = unicode_errors
+
def default_read_extended_type(typecode, data):
- raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode)
+ raise NotImplementedError("Cannot decode extended type "
+ "with typecode=%d" % typecode)
+
def unpackb(object packed, object object_hook=None, object list_hook=None,
bint use_list=1, encoding=None, unicode_errors="strict",
@@ -139,7 +140,8 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
if ret == 1:
obj = unpack_data(&ctx)
if off < buf_len:
- raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
+ raise ExtraData(obj, PyBytes_FromStringAndSize(
+ buf + off, buf_len - off))
return obj
else:
raise UnpackValueError("Unpack failed: error = %d" % (ret,))
@@ -157,9 +159,9 @@ def unpack(object stream, object object_hook=None, object list_hook=None,
See :class:`Unpacker` for options.
"""
return unpackb(stream.read(), use_list=use_list,
- object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
- encoding=encoding, unicode_errors=unicode_errors,
- )
+ object_hook=object_hook,
+ object_pairs_hook=object_pairs_hook, list_hook=list_hook,
+ encoding=encoding, unicode_errors=unicode_errors)
cdef class Unpacker(object):
@@ -169,10 +171,12 @@ cdef class Unpacker(object):
:param file_like:
File-like object having `.read(n)` method.
- If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
+ If specified, unpacker reads serialized data from it and
+ :meth:`feed()` is not usable.
:param int read_size:
- Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`)
+ Used as `file_like.read(read_size)`. (default:
+ `min(1024**2, max_buffer_size)`)
:param bool use_list:
If true, unpack msgpack array to Python list.
@@ -184,9 +188,8 @@ cdef class Unpacker(object):
(See also simplejson)
:param callable object_pairs_hook:
- When specified, it should be callable.
- Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
- (See also simplejson)
+ When specified, it should be callable. Unpacker calls it with a list
+ of key-value pairs after unpacking msgpack map. (See also simplejson)
:param str encoding:
Encoding used for decoding msgpack raw.
@@ -197,9 +200,10 @@ cdef class Unpacker(object):
(default: `'strict'`)
:param int max_buffer_size:
- Limits size of data waiting unpacked. 0 means system's INT_MAX (default).
- Raises `BufferFull` exception when it is insufficient.
- You shoud set this parameter when unpacking data from untrasted source.
+ Limits size of data waiting unpacked. 0 means system's
+ INT_MAX (default). Raises `BufferFull` exception when it
+ is insufficient. You shoud set this parameter when unpacking
+ data from untrasted source.
:param int max_str_len:
Limits max length of str. (default: 2**31-1)
@@ -250,9 +254,9 @@ cdef class Unpacker(object):
self.buf = NULL
def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
- object object_hook=None, object object_pairs_hook=None, object list_hook=None,
- encoding=None, unicode_errors='strict', int max_buffer_size=0,
- object ext_hook=ExtType,
+ object object_hook=None, object object_pairs_hook=None,
+ object list_hook=None, encoding=None, unicode_errors='strict',
+ int max_buffer_size=0, object ext_hook=ExtType,
Py_ssize_t max_str_len=2147483647, # 2**32-1
Py_ssize_t max_bin_len=2147483647,
Py_ssize_t max_array_len=2147483647,
@@ -274,7 +278,8 @@ cdef class Unpacker(object):
if not max_buffer_size:
max_buffer_size = INT_MAX
if read_size > max_buffer_size:
- raise ValueError("read_size should be less or equal to max_buffer_size")
+ raise ValueError("read_size should be less or "
+ "equal to max_buffer_size")
if not read_size:
read_size = min(max_buffer_size, 1024**2)
self.max_buffer_size = max_buffer_size
@@ -313,8 +318,8 @@ cdef class Unpacker(object):
"""Append `next_bytes` to internal buffer."""
cdef Py_buffer pybuff
if self.file_like is not None:
- raise AssertionError(
- "unpacker.feed() is not be able to use with `file_like`.")
+ raise AssertionError("unpacker.feed() is not be able "
+ "to use with `file_like`.")
PyObject_GetBuffer(next_bytes, &pybuff, PyBUF_SIMPLE)
try:
self.append_buffer(<char*>pybuff.buf, pybuff.len)
@@ -338,10 +343,10 @@ cdef class Unpacker(object):
head = 0
else:
# expand buffer.
- new_size = (tail-head) + _buf_len
+ new_size = (tail - head) + _buf_len
if new_size > self.max_buffer_size:
raise BufferFull
- new_size = min(new_size*2, self.max_buffer_size)
+ new_size = min(new_size * 2, self.max_buffer_size)
new_buf = <char*>malloc(new_size)
if new_buf == NULL:
# self.buf still holds old buffer and will be freed during
@@ -363,15 +368,16 @@ cdef class Unpacker(object):
cdef read_from_file(self):
next_bytes = self.file_like_read(
- min(self.read_size,
- self.max_buffer_size - (self.buf_tail - self.buf_head)
- ))
+ min(self.read_size,
+ self.max_buffer_size - (self.buf_tail - self.buf_head)))
if next_bytes:
- self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
+ self.append_buffer(PyBytes_AsString(next_bytes),
+ PyBytes_Size(next_bytes))
else:
self.file_like = None
- cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
+ cdef object _unpack(self, execute_fn execute,
+ object write_bytes, bint iter=0):
cdef int ret
cdef object obj
cdef size_t prev_head
@@ -389,7 +395,8 @@ cdef class Unpacker(object):
ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
if write_bytes is not None:
- write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
+ write_bytes(PyBytes_FromStringAndSize(
+ self.buf + prev_head, self.buf_head - prev_head))
if ret == 1:
obj = unpack_data(&self.ctx)
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 5d8ab7213a7b6..12525c7a9c587 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -106,7 +106,7 @@ cdef extern from "parser/tokenizer.h":
enum: ERROR_OVERFLOW
ctypedef void* (*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
- int *status)
+ int *status)
ctypedef int (*io_cleanup)(void *src)
ctypedef struct parser_t:
@@ -410,7 +410,6 @@ cdef class TextReader:
self._set_quoting(quotechar, quoting)
-
dtype_order = ['int64', 'float64', 'bool', 'object']
if quoting == QUOTE_NONNUMERIC:
# consistent with csv module semantics, cast all to float
@@ -517,7 +516,7 @@ cdef class TextReader:
# need to artifically skip the final line
# which is still a header line
header = list(header)
- header.append(header[-1]+1)
+ header.append(header[-1] + 1)
self.parser.header_start = header[0]
self.parser.header_end = header[-1]
@@ -663,7 +662,8 @@ cdef class TextReader:
if ptr == NULL:
if not os.path.exists(source):
- raise compat.FileNotFoundError('File %s does not exist' % source)
+ raise compat.FileNotFoundError(
+ 'File %s does not exist' % source)
raise IOError('Initializing from file failed')
self.parser.source = ptr
@@ -689,7 +689,7 @@ cdef class TextReader:
# header is now a list of lists, so field_count should use header[0]
cdef:
- size_t i, start, data_line, field_count, passed_count, hr, unnamed_count
+ size_t i, start, data_line, field_count, passed_count, hr, unnamed_count # noqa
char *word
object name
int status
@@ -716,10 +716,12 @@ cdef class TextReader:
# e.g., if header=3 and file only has 2 lines
elif self.parser.lines < hr + 1:
msg = self.orig_header
- if isinstance(msg,list):
- msg = "[%s], len of %d," % (','.join([ str(m) for m in msg ]),len(msg))
- raise CParserError('Passed header=%s but only %d lines in file'
- % (msg, self.parser.lines))
+ if isinstance(msg, list):
+ msg = "[%s], len of %d," % (
+ ','.join([ str(m) for m in msg ]), len(msg))
+ raise CParserError(
+ 'Passed header=%s but only %d lines in file'
+ % (msg, self.parser.lines))
else:
field_count = self.parser.line_fields[hr]
@@ -740,13 +742,14 @@ cdef class TextReader:
if name == '':
if self.has_mi_columns:
- name = 'Unnamed: %d_level_%d' % (i,level)
+ name = 'Unnamed: %d_level_%d' % (i, level)
else:
name = 'Unnamed: %d' % i
unnamed_count += 1
count = counts.get(name, 0)
- if count > 0 and self.mangle_dupe_cols and not self.has_mi_columns:
+ if (count > 0 and self.mangle_dupe_cols
+ and not self.has_mi_columns):
this_header.append('%s.%d' % (name, count))
else:
this_header.append(name)
@@ -754,12 +757,13 @@ cdef class TextReader:
if self.has_mi_columns:
- # if we have grabbed an extra line, but its not in our format
- # so save in the buffer, and create an blank extra line for the rest of the
- # parsing code
+ # If we have grabbed an extra line, but it's not in our
+ # format, save in the buffer, and create an blank extra
+ # line for the rest of the parsing code.
if hr == self.header[-1]:
lc = len(this_header)
- ic = len(self.index_col) if self.index_col is not None else 0
+ ic = (len(self.index_col) if self.index_col
+ is not None else 0)
if lc != unnamed_count and lc - ic > unnamed_count:
hr -= 1
self.parser_start -= 1
@@ -993,20 +997,15 @@ cdef class TextReader:
# if footer > 0:
# end -= footer
- #print >> sys.stderr, self.table_width
- #print >> sys.stderr, self.leading_cols
- #print >> sys.stderr, self.parser.lines
- #print >> sys.stderr, start
- #print >> sys.stderr, end
- #print >> sys.stderr, self.header
- #print >> sys.stderr, "index"
num_cols = -1
for i in range(self.parser.lines):
- num_cols = (num_cols < self.parser.line_fields[i]) * self.parser.line_fields[i] +\
+ num_cols = (num_cols < self.parser.line_fields[i]) * \
+ self.parser.line_fields[i] + \
(num_cols >= self.parser.line_fields[i]) * num_cols
if self.table_width - self.leading_cols > num_cols:
- raise CParserError("Too many columns specified: expected %s and found %s" %
+ raise CParserError(
+ "Too many columns specified: expected %s and found %s" %
(self.table_width - self.leading_cols, num_cols))
results = {}
@@ -1045,8 +1044,8 @@ cdef class TextReader:
continue
# Should return as the desired dtype (inferred or specified)
- col_res, na_count = self._convert_tokens(i, start, end, name,
- na_filter, na_hashset, na_flist)
+ col_res, na_count = self._convert_tokens(
+ i, start, end, name, na_filter, na_hashset, na_flist)
if na_filter:
self._free_na_set(na_hashset)
@@ -1054,8 +1053,10 @@ cdef class TextReader:
if upcast_na and na_count > 0:
col_res = _maybe_upcast(col_res)
- if issubclass(col_res.dtype.type, np.integer) and self.compact_ints:
- col_res = lib.downcast_int64(col_res, na_values, self.use_unsigned)
+ if issubclass(col_res.dtype.type,
+ np.integer) and self.compact_ints:
+ col_res = lib.downcast_int64(col_res, na_values,
+ self.use_unsigned)
if col_res is None:
raise CParserError('Unable to parse column %d' % i)
@@ -1087,10 +1088,12 @@ cdef class TextReader:
col_dtype = self.dtype
if col_dtype is not None:
- col_res, na_count = self._convert_with_dtype(col_dtype, i, start, end,
- na_filter, 1, na_hashset, na_flist)
+ col_res, na_count = self._convert_with_dtype(
+ col_dtype, i, start, end, na_filter,
+ 1, na_hashset, na_flist)
- # fallback on the parse (e.g. we requested int dtype, but its actually a float)
+ # Fallback on the parse (e.g. we requested int dtype,
+ # but its actually a float).
if col_res is not None:
return col_res, na_count
@@ -1104,7 +1107,8 @@ cdef class TextReader:
dt, i, start, end, na_filter, 0, na_hashset, na_flist)
except OverflowError:
col_res, na_count = self._convert_with_dtype(
- np.dtype('object'), i, start, end, na_filter, 0, na_hashset, na_flist)
+ np.dtype('object'), i, start, end, na_filter,
+ 0, na_hashset, na_flist)
if col_res is not None:
break
@@ -1113,7 +1117,7 @@ cdef class TextReader:
# only allow safe casts, eg. with a nan you cannot safely cast to int
if col_res is not None and col_dtype is not None:
try:
- col_res = col_res.astype(col_dtype,casting='safe')
+ col_res = col_res.astype(col_dtype, casting='safe')
except TypeError:
# float -> int conversions can fail the above
@@ -1121,12 +1125,13 @@ cdef class TextReader:
col_res_orig = col_res
col_res = col_res.astype(col_dtype)
if (col_res != col_res_orig).any():
- raise ValueError("cannot safely convert passed user dtype of "
- "{col_dtype} for {col_res} dtyped data in "
- "column {column}".format(col_dtype=col_dtype,
- col_res=col_res_orig.dtype.name,
- column=i))
-
+ raise ValueError(
+ "cannot safely convert passed user dtype of "
+ "{col_dtype} for {col_res} dtyped data in "
+ "column {column}".format(
+ col_dtype=col_dtype,
+ col_res=col_res_orig.dtype.name,
+ column=i))
return col_res, na_count
@@ -1137,8 +1142,8 @@ cdef class TextReader:
kh_str_t *na_hashset,
object na_flist):
if is_integer_dtype(dtype):
- result, na_count = _try_int64(self.parser, i, start, end, na_filter,
- na_hashset)
+ result, na_count = _try_int64(self.parser, i, start,
+ end, na_filter, na_hashset)
if user_dtype and na_count is not None:
if na_count > 0:
raise ValueError("Integer column has NA values in "
@@ -1175,15 +1180,16 @@ cdef class TextReader:
elif dtype.kind == 'U':
width = dtype.itemsize
if width > 0:
- raise TypeError("the dtype %s is not supported for parsing" % dtype)
+ raise TypeError("the dtype %s is not "
+ "supported for parsing" % dtype)
# unicode variable width
return self._string_convert(i, start, end, na_filter,
na_hashset)
elif is_categorical_dtype(dtype):
- codes, cats, na_count = _categorical_convert(self.parser, i, start,
- end, na_filter, na_hashset,
- self.c_encoding)
+ codes, cats, na_count = _categorical_convert(
+ self.parser, i, start, end, na_filter,
+ na_hashset, self.c_encoding)
# sort categories and recode if necessary
cats = Index(cats)
if not cats.is_monotonic_increasing:
@@ -1198,10 +1204,12 @@ cdef class TextReader:
return self._string_convert(i, start, end, na_filter,
na_hashset)
elif is_datetime64_dtype(dtype):
- raise TypeError("the dtype %s is not supported for parsing, "
- "pass this column using parse_dates instead" % dtype)
+ raise TypeError("the dtype %s is not supported "
+ "for parsing, pass this column "
+ "using parse_dates instead" % dtype)
else:
- raise TypeError("the dtype %s is not supported for parsing" % dtype)
+ raise TypeError("the dtype %s is not "
+ "supported for parsing" % dtype)
cdef _string_convert(self, Py_ssize_t i, int start, int end,
bint na_filter, kh_str_t *na_hashset):
@@ -1218,7 +1226,6 @@ cdef class TextReader:
return _string_box_factorize(self.parser, i, start, end,
na_filter, na_hashset)
-
def _get_converter(self, i, name):
if self.converters is None:
return None
@@ -1330,9 +1337,9 @@ def _maybe_upcast(arr):
return arr
cdef enum StringPath:
- CSTRING
- UTF8
- ENCODED
+ CSTRING
+ UTF8
+ ENCODED
# factored out logic to pick string converter
cdef inline StringPath _string_path(char *encoding):
@@ -1445,7 +1452,7 @@ cdef _string_box_utf8(parser_t *parser, int col,
pyval = PyUnicode_FromString(word)
k = kh_put_strbox(table, word, &ret)
- table.vals[k] = <PyObject*> pyval
+ table.vals[k] = <PyObject *> pyval
result[i] = pyval
@@ -1503,7 +1510,7 @@ cdef _string_box_decode(parser_t *parser, int col,
pyval = PyUnicode_Decode(word, size, encoding, errors)
k = kh_put_strbox(table, word, &ret)
- table.vals[k] = <PyObject*> pyval
+ table.vals[k] = <PyObject *> pyval
result[i] = pyval
@@ -1511,6 +1518,7 @@ cdef _string_box_decode(parser_t *parser, int col,
return result, na_count
+
@cython.boundscheck(False)
cdef _categorical_convert(parser_t *parser, int col,
int line_start, int line_end,
@@ -1570,7 +1578,8 @@ cdef _categorical_convert(parser_t *parser, int col,
for k in range(table.n_buckets):
if kh_exist_str(table, k):
size = strlen(table.keys[k])
- result[table.vals[k]] = PyUnicode_Decode(table.keys[k], size, encoding, errors)
+ result[table.vals[k]] = PyUnicode_Decode(
+ table.keys[k], size, encoding, errors)
elif path == UTF8:
for k in range(table.n_buckets):
if kh_exist_str(table, k):
@@ -1600,8 +1609,9 @@ cdef _to_fw_string(parser_t *parser, int col, int line_start,
return result
-cdef inline void _to_fw_string_nogil(parser_t *parser, int col, int line_start,
- int line_end, size_t width, char *data) nogil:
+cdef inline void _to_fw_string_nogil(parser_t *parser, int col,
+ int line_start, int line_end,
+ size_t width, char *data) nogil:
cdef:
Py_ssize_t i
coliter_t it
@@ -1639,17 +1649,20 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
na_fset = kset_float64_from_list(na_flist)
with nogil:
error = _try_double_nogil(parser, col, line_start, line_end,
- na_filter, na_hashset, use_na_flist, na_fset, NA, data, &na_count)
+ na_filter, na_hashset, use_na_flist,
+ na_fset, NA, data, &na_count)
kh_destroy_float64(na_fset)
if error != 0:
return None, None
return result, na_count
-cdef inline int _try_double_nogil(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, kh_str_t *na_hashset, bint use_na_flist,
+cdef inline int _try_double_nogil(parser_t *parser, int col,
+ int line_start, int line_end,
+ bint na_filter, kh_str_t *na_hashset,
+ bint use_na_flist,
const kh_float64_t *na_flist,
- double NA,
- double *data, int *na_count) nogil:
+ double NA, double *data,
+ int *na_count) nogil:
cdef:
int error,
size_t i
@@ -1674,15 +1687,17 @@ cdef inline int _try_double_nogil(parser_t *parser, int col, int line_start, int
na_count[0] += 1
data[0] = NA
else:
- data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
- parser.thousands, 1)
+ data[0] = parser.converter(word, &p_end, parser.decimal,
+ parser.sci, parser.thousands, 1)
if errno != 0 or p_end[0] or p_end == word:
- if strcasecmp(word, cinf) == 0 or strcasecmp(word, cposinf) == 0:
+ if (strcasecmp(word, cinf) == 0 or
+ strcasecmp(word, cposinf) == 0):
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
data[0] = NEGINF
else:
- # Just return a non-zero value since the errno is never consumed.
+ # Just return a non-zero value since
+ # the errno is never consumed.
return 1
if use_na_flist:
k64 = kh_get_float64(na_flist, data[0])
@@ -1693,15 +1708,17 @@ cdef inline int _try_double_nogil(parser_t *parser, int col, int line_start, int
else:
for i in range(lines):
COLITER_NEXT(it, word)
- data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
- parser.thousands, 1)
+ data[0] = parser.converter(word, &p_end, parser.decimal,
+ parser.sci, parser.thousands, 1)
if errno != 0 or p_end[0] or p_end == word:
- if strcasecmp(word, cinf) == 0 or strcasecmp(word, cposinf) == 0:
+ if (strcasecmp(word, cinf) == 0 or
+ strcasecmp(word, cposinf) == 0):
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
data[0] = NEGINF
else:
- # Just return a non-zero value since the errno is never consumed.
+ # Just return a non-zero value since
+ # the errno is never consumed.
return 1
data += 1
@@ -1724,7 +1741,8 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
data = <int64_t *> result.data
coliter_setup(&it, parser, col, line_start)
with nogil:
- error = _try_int64_nogil(parser, col, line_start, line_end, na_filter, na_hashset, NA, data, &na_count)
+ error = _try_int64_nogil(parser, col, line_start, line_end,
+ na_filter, na_hashset, NA, data, &na_count)
if error != 0:
if error == ERROR_OVERFLOW:
# Can't get the word variable
@@ -1733,9 +1751,10 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
return result, na_count
-cdef inline int _try_int64_nogil(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, const kh_str_t *na_hashset, int64_t NA, int64_t *data,
- int *na_count) nogil:
+cdef inline int _try_int64_nogil(parser_t *parser, int col, int line_start,
+ int line_end, bint na_filter,
+ const kh_str_t *na_hashset, int64_t NA,
+ int64_t *data, int *na_count) nogil:
cdef:
int error
size_t i
@@ -1785,14 +1804,18 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end,
data = <uint8_t *> result.data
with nogil:
- error = _try_bool_nogil(parser, col, line_start, line_end, na_filter, na_hashset, NA, data, &na_count)
+ error = _try_bool_nogil(parser, col, line_start,
+ line_end, na_filter,
+ na_hashset, NA, data,
+ &na_count)
if error != 0:
return None, None
return result.view(np.bool_), na_count
-cdef inline int _try_bool_nogil(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, const kh_str_t *na_hashset, uint8_t NA, uint8_t *data,
- int *na_count) nogil:
+cdef inline int _try_bool_nogil(parser_t *parser, int col, int line_start,
+ int line_end, bint na_filter,
+ const kh_str_t *na_hashset, uint8_t NA,
+ uint8_t *data, int *na_count) nogil:
cdef:
int error
size_t lines = line_end - line_start
@@ -1832,7 +1855,8 @@ cdef inline int _try_bool_nogil(parser_t *parser, int col, int line_start, int l
cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
bint na_filter, const kh_str_t *na_hashset,
- const kh_str_t *true_hashset, const kh_str_t *false_hashset):
+ const kh_str_t *true_hashset,
+ const kh_str_t *false_hashset):
cdef:
int error, na_count = 0
size_t i, lines
@@ -1848,16 +1872,20 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
result = np.empty(lines, dtype=np.uint8)
data = <uint8_t *> result.data
with nogil:
- error = _try_bool_flex_nogil(parser, col, line_start, line_end, na_filter, na_hashset,
- true_hashset, false_hashset, NA, data, &na_count)
+ error = _try_bool_flex_nogil(parser, col, line_start, line_end,
+ na_filter, na_hashset, true_hashset,
+ false_hashset, NA, data, &na_count)
if error != 0:
return None, None
return result.view(np.bool_), na_count
-cdef inline int _try_bool_flex_nogil(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, const kh_str_t *na_hashset,
- const kh_str_t *true_hashset, const kh_str_t *false_hashset,
- uint8_t NA, uint8_t *data, int *na_count) nogil:
+cdef inline int _try_bool_flex_nogil(parser_t *parser, int col, int line_start,
+ int line_end, bint na_filter,
+ const kh_str_t *na_hashset,
+ const kh_str_t *true_hashset,
+ const kh_str_t *false_hashset,
+ uint8_t NA, uint8_t *data,
+ int *na_count) nogil:
cdef:
int error = 0
size_t i
@@ -2016,14 +2044,15 @@ def _concatenate_chunks(list chunks):
if warning_columns:
warning_names = ','.join(warning_columns)
- warning_message = " ".join(["Columns (%s) have mixed types." % warning_names,
+ warning_message = " ".join([
+ "Columns (%s) have mixed types." % warning_names,
"Specify dtype option on import or set low_memory=False."
])
warnings.warn(warning_message, DtypeWarning, stacklevel=8)
return result
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# NA values
def _compute_na_values():
int64info = np.iinfo(np.int64)
@@ -2035,17 +2064,17 @@ def _compute_na_values():
uint16info = np.iinfo(np.uint16)
uint8info = np.iinfo(np.uint8)
na_values = {
- np.float64 : np.nan,
- np.int64 : int64info.min,
- np.int32 : int32info.min,
- np.int16 : int16info.min,
- np.int8 : int8info.min,
- np.uint64 : uint64info.max,
- np.uint32 : uint32info.max,
- np.uint16 : uint16info.max,
- np.uint8 : uint8info.max,
- np.bool_ : uint8info.max,
- np.object_ : np.nan # oof
+ np.float64: np.nan,
+ np.int64: int64info.min,
+ np.int32: int32info.min,
+ np.int16: int16info.min,
+ np.int8: int8info.min,
+ np.uint64: uint64info.max,
+ np.uint32: uint32info.max,
+ np.uint16: uint16info.max,
+ np.uint8: uint8info.max,
+ np.bool_: uint8info.max,
+ np.object_: np.nan # oof
}
return na_values
@@ -2128,22 +2157,13 @@ def _to_structured_array(dict columns, object names, object usecols):
stride = dt.itemsize
- # start = time.time()
-
- # we own the data
+ # We own the data.
buf = <char*> malloc(length * stride)
recs = util.sarr_from_data(dt, length, buf)
assert(recs.flags.owndata)
- # buf = <char*> recs.data
- # end = time.time()
- # print 'took %.4f' % (end - start)
-
for i in range(nfields):
- # start = time.clock()
- # name = names[i]
-
# XXX
field_type = fields[fnames[i]]
@@ -2156,9 +2176,6 @@ def _to_structured_array(dict columns, object names, object usecols):
elsize, stride, length,
field_type[0] == np.object_)
- # print 'Transfer of %s took %.4f' % (str(field_type),
- # time.clock() - start)
-
return recs
cdef _fill_structured_column(char *dst, char* src, int elsize,
@@ -2175,7 +2192,6 @@ cdef _fill_structured_column(char *dst, char* src, int elsize,
src += elsize
-
def _maybe_encode(values):
if values is None:
return []
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 62555dc7f178c..4fa730eac0fd1 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -21,15 +21,20 @@ cdef extern from "headers/stdint.h":
enum: INT64_MIN
# core.common import for fast inference checks
+
+
def is_float(object obj):
return util.is_float_object(obj)
+
def is_integer(object obj):
return util.is_integer_object(obj)
+
def is_bool(object obj):
return util.is_bool_object(obj)
+
def is_complex(object obj):
return util.is_complex_object(obj)
@@ -38,33 +43,33 @@ cpdef bint is_period(object val):
return util.is_period_object(val)
_TYPE_MAP = {
- 'categorical' : 'categorical',
- 'category' : 'categorical',
+ 'categorical': 'categorical',
+ 'category': 'categorical',
'int8': 'integer',
'int16': 'integer',
'int32': 'integer',
'int64': 'integer',
- 'i' : 'integer',
+ 'i': 'integer',
'uint8': 'integer',
'uint16': 'integer',
'uint32': 'integer',
'uint64': 'integer',
- 'u' : 'integer',
+ 'u': 'integer',
'float32': 'floating',
'float64': 'floating',
- 'f' : 'floating',
+ 'f': 'floating',
'complex128': 'complex',
- 'c' : 'complex',
+ 'c': 'complex',
'string': 'string' if PY2 else 'bytes',
- 'S' : 'string' if PY2 else 'bytes',
+ 'S': 'string' if PY2 else 'bytes',
'unicode': 'unicode' if PY2 else 'string',
- 'U' : 'unicode' if PY2 else 'string',
+ 'U': 'unicode' if PY2 else 'string',
'bool': 'boolean',
- 'b' : 'boolean',
- 'datetime64[ns]' : 'datetime64',
- 'M' : 'datetime64',
- 'timedelta64[ns]' : 'timedelta64',
- 'm' : 'timedelta64',
+ 'b': 'boolean',
+ 'datetime64[ns]': 'datetime64',
+ 'M': 'datetime64',
+ 'timedelta64[ns]': 'timedelta64',
+ 'm': 'timedelta64',
}
# types only exist on certain platform
@@ -88,12 +93,13 @@ cdef _try_infer_map(v):
""" if its in our map, just return the dtype """
cdef:
object attr, val
- for attr in ['name','kind','base']:
- val = getattr(v.dtype,attr)
+ for attr in ['name', 'kind', 'base']:
+ val = getattr(v.dtype, attr)
if val in _TYPE_MAP:
return _TYPE_MAP[val]
return None
+
def infer_dtype(object _values):
"""
we are coercing to an ndarray here
@@ -107,12 +113,13 @@ def infer_dtype(object _values):
if isinstance(_values, np.ndarray):
values = _values
- elif hasattr(_values,'dtype'):
+ elif hasattr(_values, 'dtype'):
# this will handle ndarray-like
# e.g. categoricals
try:
- values = getattr(_values, '_values', getattr(_values, 'values', _values))
+ values = getattr(_values, '_values', getattr(
+ _values, 'values', _values))
except:
val = _try_infer_map(_values)
if val is not None:
@@ -242,20 +249,21 @@ def is_possible_datetimelike_array(object arr):
for i in range(n):
v = arr[i]
if util.is_string_object(v):
- continue
+ continue
elif util._checknull(v):
- continue
+ continue
elif is_datetime(v):
- seen_datetime=1
+ seen_datetime=1
elif is_timedelta(v):
- seen_timedelta=1
+ seen_timedelta=1
else:
- return False
+ return False
return seen_datetime or seen_timedelta
cdef inline bint is_null_datetimelike(v):
- # determine if we have a null for a timedelta/datetime (or integer versions)x
+ # determine if we have a null for a timedelta/datetime (or integer
+ # versions)x
if util._checknull(v):
return True
elif v is NaT:
@@ -315,6 +323,7 @@ cdef inline bint is_time(object o):
cdef inline bint is_timedelta(object o):
return PyDelta_Check(o) or util.is_timedelta64_object(o)
+
def is_bool_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -335,9 +344,11 @@ def is_bool_array(ndarray values):
else:
return False
+
def is_integer(object o):
return util.is_integer_object(o)
+
def is_integer_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -358,6 +369,7 @@ def is_integer_array(ndarray values):
else:
return False
+
def is_integer_float_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -380,6 +392,7 @@ def is_integer_float_array(ndarray values):
else:
return False
+
def is_float_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -400,6 +413,7 @@ def is_float_array(ndarray values):
else:
return False
+
def is_string_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -421,6 +435,7 @@ def is_string_array(ndarray values):
else:
return False
+
def is_unicode_array(ndarray values):
cdef:
Py_ssize_t i, n = len(values)
@@ -475,11 +490,12 @@ def is_datetime_array(ndarray[object] values):
if is_null_datetime64(v):
# we are a regular null
if util._checknull(v):
- null_count += 1
+ null_count += 1
elif not is_datetime(v):
return False
return null_count != n
+
def is_datetime64_array(ndarray values):
cdef Py_ssize_t i, null_count = 0, n = len(values)
cdef object v
@@ -619,6 +635,7 @@ cdef extern from "parse_helper.h":
cdef int64_t iINT64_MAX = <int64_t> INT64_MAX
cdef int64_t iINT64_MIN = <int64_t> INT64_MIN
+
def maybe_convert_numeric(object[:] values, set na_values,
bint convert_empty=True, bint coerce_numeric=False):
"""
@@ -772,7 +789,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
seen_float = 1
elif util.is_datetime64_object(val):
if convert_datetime:
- idatetimes[i] = convert_to_tsobject(val, None, None, 0, 0).value
+ idatetimes[i] = convert_to_tsobject(
+ val, None, None, 0, 0).value
seen_datetime = 1
else:
seen_object = 1
@@ -807,7 +825,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
break
else:
seen_datetime = 1
- idatetimes[i] = convert_to_tsobject(val, None, None, 0, 0).value
+ idatetimes[i] = convert_to_tsobject(
+ val, None, None, 0, 0).value
else:
seen_object = 1
break
@@ -857,7 +876,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
return floats
elif seen_int:
return ints
- elif not seen_datetime and not seen_numeric and not seen_timedelta:
+ elif (not seen_datetime and not seen_numeric
+ and not seen_timedelta):
return bools.view(np.bool_)
else:
@@ -887,7 +907,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
return floats
elif seen_int:
return ints
- elif not seen_datetime and not seen_numeric and not seen_timedelta:
+ elif (not seen_datetime and not seen_numeric
+ and not seen_timedelta):
return bools.view(np.bool_)
return objects
@@ -896,8 +917,9 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
def convert_sql_column(x):
return maybe_convert_objects(x, try_float=1)
+
def try_parse_dates(ndarray[object] values, parser=None,
- dayfirst=False,default=None):
+ dayfirst=False, default=None):
cdef:
Py_ssize_t i, n
ndarray[object] result
@@ -907,12 +929,12 @@ def try_parse_dates(ndarray[object] values, parser=None,
if parser is None:
if default is None: # GH2618
- date=datetime.now()
- default=datetime(date.year,date.month,1)
+ date=datetime.now()
+ default=datetime(date.year, date.month, 1)
try:
from dateutil.parser import parse
- parse_date = lambda x: parse(x, dayfirst=dayfirst,default=default)
+ parse_date = lambda x: parse(x, dayfirst=dayfirst, default=default)
except ImportError: # pragma: no cover
def parse_date(s):
try:
@@ -944,9 +966,10 @@ def try_parse_dates(ndarray[object] values, parser=None,
return result
+
def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
date_parser=None, time_parser=None,
- dayfirst=False,default=None):
+ dayfirst=False, default=None):
cdef:
Py_ssize_t i, n
ndarray[object] result
@@ -960,8 +983,8 @@ def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
if date_parser is None:
if default is None: # GH2618
- date=datetime.now()
- default=datetime(date.year,date.month,1)
+ date=datetime.now()
+ default=datetime(date.year, date.month, 1)
try:
from dateutil.parser import parse
@@ -1016,6 +1039,7 @@ def try_parse_year_month_day(ndarray[object] years, ndarray[object] months,
return result
+
def try_parse_datetime_components(ndarray[object] years,
ndarray[object] months,
ndarray[object] days,
@@ -1052,6 +1076,7 @@ def try_parse_datetime_components(ndarray[object] years,
return result
+
def sanitize_objects(ndarray[object] values, set na_values,
convert_empty=True):
cdef:
@@ -1075,6 +1100,7 @@ def sanitize_objects(ndarray[object] values, set na_values,
return na_count
+
def maybe_convert_bool(ndarray[object] arr,
true_values=None, false_values=None):
cdef:
@@ -1166,6 +1192,7 @@ def map_infer_mask(ndarray arr, object f, ndarray[uint8_t] mask,
return result
+
def map_infer(ndarray arr, object f, bint convert=1):
"""
Substitute for np.vectorize with pandas-friendly dtype inference
@@ -1246,6 +1273,7 @@ def to_object_array(list rows, int min_width=0):
return result
+
def tuples_to_object_array(ndarray[object] tuples):
cdef:
Py_ssize_t i, j, n, k, tmp
@@ -1262,6 +1290,7 @@ def tuples_to_object_array(ndarray[object] tuples):
return result
+
def to_object_array_tuples(list rows):
cdef:
Py_ssize_t i, j, n, k, tmp
diff --git a/pandas/src/offsets.pyx b/pandas/src/offsets.pyx
index 096198c8a05fa..c963e256d0aa5 100644
--- a/pandas/src/offsets.pyx
+++ b/pandas/src/offsets.pyx
@@ -162,7 +162,7 @@ cdef class YearOffset(_Offset):
cpdef prev(self):
cdef int64_t days
- days = 365 + is_leapyear(self.y - (1-self.ly))
+ days = 365 + is_leapyear(self.y - (1 - self.ly))
self.t -= days * us_in_day
self.y -= 1
@@ -204,8 +204,8 @@ cdef class MonthOffset(_Offset):
self.t = ts.value + (self.dayoffset * us_in_day)
# for day counting
- self.m = ts.dts.month - 1
- self.y = ts.dts.year
+ self.m = ts.dts.month - 1
+ self.y = ts.dts.year
self.ly = is_leapyear(self.y)
if self.biz != 0:
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index bb0108fcb141c..5565f25937394 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -80,17 +80,21 @@ cdef extern from "period_helper.h":
ctypedef int64_t (*freq_conv_func)(int64_t, char, asfreq_info*)
void initialize_daytime_conversion_factor_matrix()
- int64_t asfreq(int64_t dtordinal, int freq1, int freq2, char relation) except INT32_MIN
+ int64_t asfreq(int64_t dtordinal, int freq1, int freq2,
+ char relation) except INT32_MIN
freq_conv_func get_asfreq_func(int fromFreq, int toFreq)
void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info)
int64_t get_period_ordinal(int year, int month, int day,
- int hour, int minute, int second, int microseconds, int picoseconds,
- int freq) nogil except INT32_MIN
+ int hour, int minute, int second,
+ int microseconds, int picoseconds,
+ int freq) nogil except INT32_MIN
- int64_t get_python_ordinal(int64_t period_ordinal, int freq) except INT32_MIN
+ int64_t get_python_ordinal(int64_t period_ordinal,
+ int freq) except INT32_MIN
- int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil except INT32_MIN
+ int get_date_info(int64_t ordinal, int freq,
+ date_info *dinfo) nogil except INT32_MIN
double getAbsTime(int, int64_t, int64_t)
int pyear(int64_t ordinal, int freq) except INT32_MIN
@@ -134,6 +138,7 @@ cdef inline int64_t remove_mult(int64_t period_ord_w_mult, int64_t mult):
return period_ord_w_mult * mult + 1;
+
@cython.wraparound(False)
@cython.boundscheck(False)
def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
@@ -158,11 +163,13 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
continue
pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ dts.hour, dts.min, dts.sec,
+ dts.us, dts.ps, freq)
else:
out = localize_dt64arr_to_period(dtarr, freq, tz)
return out
+
@cython.wraparound(False)
@cython.boundscheck(False)
def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
@@ -212,6 +219,7 @@ cpdef int64_t period_asfreq(int64_t period_ordinal, int freq1, int freq2,
return retval
+
def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
"""
Convert int64-array of period ordinals from one frequency to another, and
@@ -254,7 +262,9 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
return result
-def period_ordinal(int y, int m, int d, int h, int min, int s, int us, int ps, int freq):
+
+def period_ordinal(int y, int m, int d, int h, int min,
+ int s, int us, int ps, int freq):
cdef:
int64_t ordinal
@@ -284,6 +294,7 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil:
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+
def period_format(int64_t value, int freq, object fmt=None):
cdef:
int freq_group
@@ -332,7 +343,8 @@ cdef list extra_fmts = [(b"%q", b"^`AB`^"),
(b"%u", b"^`IJ`^"),
(b"%n", b"^`KL`^")]
-cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^", "^`GH`^", "^`IJ`^", "^`KL`^"]
+cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^",
+ "^`GH`^", "^`IJ`^", "^`KL`^"]
cdef object _period_strftime(int64_t value, int freq, object fmt):
import sys
@@ -390,6 +402,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt):
ctypedef int (*accessor)(int64_t ordinal, int freq) except INT32_MIN
+
def get_period_field(int code, int64_t value, int freq):
cdef accessor f = _get_accessor_func(code)
if f is NULL:
@@ -398,6 +411,7 @@ def get_period_field(int code, int64_t value, int freq):
return np.nan
return f(value, freq)
+
def get_period_field_arr(int code, ndarray[int64_t] arr, int freq):
cdef:
Py_ssize_t i, sz
@@ -420,7 +434,6 @@ def get_period_field_arr(int code, ndarray[int64_t] arr, int freq):
return out
-
cdef accessor _get_accessor_func(int code):
if code == 0:
return &pyear
@@ -571,7 +584,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz):
pos = _pos
# statictzinfo
- if typ not in ['pytz','dateutil']:
+ if typ not in ['pytz', 'dateutil']:
for i in range(n):
if stamps[i] == NPY_NAT:
continue
@@ -613,7 +626,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
continue
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ dts.hour, dts.min, dts.sec,
+ dts.us, dts.ps, freq)
elif _is_tzlocal(tz):
for i in range(n):
@@ -628,7 +642,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ dts.hour, dts.min, dts.sec,
+ dts.us, dts.ps, freq)
else:
# Adjust datetime64 timestamp, recompute datetimestruct
trans, deltas, typ = _get_dst_info(tz)
@@ -639,7 +654,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pos = _pos
# statictzinfo
- if typ not in ['pytz','dateutil']:
+ if typ not in ['pytz', 'dateutil']:
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
@@ -647,7 +662,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + deltas[0],
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ dts.hour, dts.min, dts.sec,
+ dts.us, dts.ps, freq)
else:
for i in range(n):
if stamps[i] == NPY_NAT:
@@ -656,13 +672,15 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
pandas_datetime_to_datetimestruct(stamps[i] + deltas[pos[i]],
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ dts.hour, dts.min, dts.sec,
+ dts.us, dts.ps, freq)
return result
_DIFFERENT_FREQ = "Input has different freq={1} from Period(freq={0})"
-_DIFFERENT_FREQ_INDEX = "Input has different freq={1} from PeriodIndex(freq={0})"
+_DIFFERENT_FREQ_INDEX = ("Input has different freq={1} "
+ "from PeriodIndex(freq={0})")
class IncompatibleFrequency(ValueError):
@@ -675,7 +693,7 @@ cdef class _Period(object):
int64_t ordinal
object freq
- _comparables = ['name','freqstr']
+ _comparables = ['name', 'freqstr']
_typ = 'period'
@classmethod
@@ -695,7 +713,9 @@ cdef class _Period(object):
@classmethod
def _from_ordinal(cls, ordinal, freq):
- """ fast creation from an ordinal and freq that are already validated! """
+ """
+ Fast creation from an ordinal and freq that are already validated!
+ """
if ordinal == tslib.iNaT:
return tslib.NaT
else:
@@ -727,7 +747,8 @@ cdef class _Period(object):
return hash((self.ordinal, self.freqstr))
def _add_delta(self, other):
- if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
+ if isinstance(other, (timedelta, np.timedelta64,
+ offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
@@ -752,7 +773,8 @@ cdef class _Period(object):
def __add__(self, other):
if isinstance(self, Period):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.Tick, offsets.DateOffset, Timedelta)):
+ offsets.Tick, offsets.DateOffset,
+ Timedelta)):
return self._add_delta(other)
elif other is tslib.NaT:
return tslib.NaT
@@ -769,7 +791,8 @@ cdef class _Period(object):
def __sub__(self, other):
if isinstance(self, Period):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.Tick, offsets.DateOffset, Timedelta)):
+ offsets.Tick, offsets.DateOffset,
+ Timedelta)):
neg_other = -other
return self + neg_other
elif lib.is_integer(other):
@@ -1138,8 +1161,9 @@ class Period(_Period):
raise ValueError('Must supply freq for ordinal value')
elif value is None:
- if (year is None and month is None and quarter is None and
- day is None and hour is None and minute is None and second is None):
+ if (year is None and month is None and
+ quarter is None and day is None and
+ hour is None and minute is None and second is None):
ordinal = tslib.iNaT
else:
if freq is None:
@@ -1157,7 +1181,8 @@ class Period(_Period):
elif isinstance(value, Period):
other = value
- if freq is None or frequencies.get_freq_code(freq) == frequencies.get_freq_code(other.freq):
+ if freq is None or frequencies.get_freq_code(
+ freq) == frequencies.get_freq_code(other.freq):
ordinal = other.ordinal
freq = other.freq
else:
@@ -1177,7 +1202,8 @@ class Period(_Period):
try:
freq = frequencies.Resolution.get_freq(reso)
except KeyError:
- raise ValueError("Invalid frequency or could not infer: %s" % reso)
+ raise ValueError(
+ "Invalid frequency or could not infer: %s" % reso)
elif isinstance(value, datetime):
dt = value
@@ -1210,7 +1236,8 @@ def _ordinal_from_fields(year, month, quarter, day,
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
- return get_period_ordinal(year, month, day, hour, minute, second, 0, 0, base)
+ return get_period_ordinal(year, month, day, hour,
+ minute, second, 0, 0, base)
def _quarter_to_myear(year, quarter, freq):
@@ -1218,7 +1245,8 @@ def _quarter_to_myear(year, quarter, freq):
if quarter <= 0 or quarter > 4:
raise ValueError('Quarter must be 1 <= q <= 4')
- mnum = frequencies._month_numbers[frequencies._get_rule_month(freq)] + 1
+ mnum = frequencies._month_numbers[
+ frequencies._get_rule_month(freq)] + 1
month = (mnum + (quarter - 1) * 3) % 12 + 1
if month > mnum:
year -= 1
diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx
index c3f8bdfbfd0a6..1cd3e53494a72 100644
--- a/pandas/src/reduce.pyx
+++ b/pandas/src/reduce.pyx
@@ -46,11 +46,11 @@ cdef class Reducer:
self.chunksize = k
self.increment = k * arr.dtype.itemsize
-
self.f = f
self.arr = arr
self.labels = labels
- self.dummy, self.typ, self.index, self.ityp = self._check_dummy(dummy=dummy)
+ self.dummy, self.typ, self.index, self.ityp = self._check_dummy(
+ dummy=dummy)
def _check_dummy(self, dummy=None):
cdef object index=None, typ=None, ityp=None
@@ -65,16 +65,17 @@ cdef class Reducer:
else:
# we passed a series-like
- if hasattr(dummy,'values'):
+ if hasattr(dummy, 'values'):
typ = type(dummy)
- index = getattr(dummy,'index',None)
+ index = getattr(dummy, 'index', None)
dummy = dummy.values
if dummy.dtype != self.arr.dtype:
raise ValueError('Dummy array must be same dtype')
if len(dummy) != self.chunksize:
- raise ValueError('Dummy array must be length %d' % self.chunksize)
+ raise ValueError('Dummy array must be length %d' %
+ self.chunksize)
return dummy, typ, index, ityp
@@ -111,15 +112,16 @@ cdef class Reducer:
if self.typ is not None:
- # recreate with the index if supplied
- if has_index:
+ # recreate with the index if supplied
+ if has_index:
- cached_typ = self.typ(chunk, index=self.index, name=name)
+ cached_typ = self.typ(
+ chunk, index=self.index, name=name)
- else:
+ else:
- # use the passsed typ, sans index
- cached_typ = self.typ(chunk, name=name)
+ # use the passsed typ, sans index
+ cached_typ = self.typ(chunk, name=name)
# use the cached_typ if possible
if cached_typ is not None:
@@ -127,13 +129,15 @@ cdef class Reducer:
if has_index:
object.__setattr__(cached_typ, 'index', self.index)
- object.__setattr__(cached_typ._data._block, 'values', chunk)
+ object.__setattr__(
+ cached_typ._data._block, 'values', chunk)
object.__setattr__(cached_typ, 'name', name)
res = self.f(cached_typ)
else:
res = self.f(chunk)
- if hasattr(res,'values') and isinstance(res.values, np.ndarray):
+ if hasattr(res, 'values') and isinstance(
+ res.values, np.ndarray):
res = res.values
if i == 0:
result = _get_result_array(res,
@@ -167,7 +171,8 @@ cdef class SeriesBinGrouper:
bint passed_dummy
cdef public:
- object arr, index, dummy_arr, dummy_index, values, f, bins, typ, ityp, name
+ object arr, index, dummy_arr, dummy_index
+ object values, f, bins, typ, ityp, name
def __init__(self, object series, object f, object bins, object dummy):
n = len(series)
@@ -182,7 +187,7 @@ cdef class SeriesBinGrouper:
self.typ = series._constructor
self.ityp = series.index._constructor
self.index = series.index.values
- self.name = getattr(series,'name',None)
+ self.name = getattr(series, 'name', None)
self.dummy_arr, self.dummy_index = self._check_dummy(dummy)
self.passed_dummy = dummy is not None
@@ -205,7 +210,7 @@ cdef class SeriesBinGrouper:
raise ValueError('Dummy array must be same dtype')
if not values.flags.contiguous:
values = values.copy()
- index = dummy.index.values
+ index = dummy.index.values
if not index.flags.contiguous:
index = index.copy()
@@ -227,9 +232,9 @@ cdef class SeriesBinGrouper:
counts[0] = self.bins[0]
for i in range(1, self.ngroups):
if i == self.ngroups - 1:
- counts[i] = len(self.arr) - self.bins[i-1]
+ counts[i] = len(self.arr) - self.bins[i - 1]
else:
- counts[i] = self.bins[i] - self.bins[i-1]
+ counts[i] = self.bins[i] - self.bins[i - 1]
group_size = 0
n = len(self.arr)
@@ -252,7 +257,8 @@ cdef class SeriesBinGrouper:
else:
object.__setattr__(cached_ityp, '_data', islider.buf)
cached_ityp._engine.clear_mapping()
- object.__setattr__(cached_typ._data._block, 'values', vslider.buf)
+ object.__setattr__(
+ cached_typ._data._block, 'values', vslider.buf)
object.__setattr__(cached_typ, '_index', cached_ityp)
object.__setattr__(cached_typ, 'name', name)
@@ -293,7 +299,8 @@ cdef class SeriesGrouper:
bint passed_dummy
cdef public:
- object arr, index, dummy_arr, dummy_index, f, labels, values, typ, ityp, name
+ object arr, index, dummy_arr, dummy_index
+ object f, labels, values, typ, ityp, name
def __init__(self, object series, object f, object labels,
Py_ssize_t ngroups, object dummy):
@@ -309,7 +316,7 @@ cdef class SeriesGrouper:
self.typ = series._constructor
self.ityp = series.index._constructor
self.index = series.index.values
- self.name = getattr(series,'name',None)
+ self.name = getattr(series, 'name', None)
self.dummy_arr, self.dummy_index = self._check_dummy(dummy)
self.passed_dummy = dummy is not None
@@ -320,14 +327,14 @@ cdef class SeriesGrouper:
if dummy is None:
values = np.empty(0, dtype=self.arr.dtype)
- index = None
+ index = None
else:
values = dummy.values
if dummy.dtype != self.arr.dtype:
raise ValueError('Dummy array must be same dtype')
if not values.flags.contiguous:
values = values.copy()
- index = dummy.index.values
+ index = dummy.index.values
if not index.flags.contiguous:
index = index.copy()
@@ -375,7 +382,8 @@ cdef class SeriesGrouper:
else:
object.__setattr__(cached_ityp, '_data', islider.buf)
cached_ityp._engine.clear_mapping()
- object.__setattr__(cached_typ._data._block, 'values', vslider.buf)
+ object.__setattr__(
+ cached_typ._data._block, 'values', vslider.buf)
object.__setattr__(cached_typ, '_index', cached_ityp)
object.__setattr__(cached_typ, 'name', name)
@@ -411,14 +419,14 @@ cdef class SeriesGrouper:
cdef inline _extract_result(object res):
""" extract the result object, it might be a 0-dim ndarray
or a len-1 0-dim, or a scalar """
- if hasattr(res,'values'):
- res = res.values
+ if hasattr(res, 'values'):
+ res = res.values
if not np.isscalar(res):
- if isinstance(res, np.ndarray):
- if res.ndim == 0:
- res = res.item()
- elif res.ndim == 1 and len(res) == 1:
- res = res[0]
+ if isinstance(res, np.ndarray):
+ if res.ndim == 0:
+ res = res.item()
+ elif res.ndim == 1 and len(res) == 1:
+ res = res[0]
return res
cdef class Slider:
@@ -467,9 +475,11 @@ cdef class Slider:
self.buf.data = self.orig_data
self.buf.strides[0] = self.orig_stride
+
class InvalidApply(Exception):
pass
+
def apply_frame_axis0(object frame, object f, object names,
ndarray[int64_t] starts, ndarray[int64_t] ends):
cdef:
@@ -482,7 +492,6 @@ def apply_frame_axis0(object frame, object f, object names,
if frame.index._has_complex_internals:
raise InvalidApply('Cannot modify frame index internals')
-
results = []
# Need to infer if our low-level mucking is going to cause a segfault
@@ -496,7 +505,6 @@ def apply_frame_axis0(object frame, object f, object names,
except:
raise InvalidApply('Let this error raise above us')
-
slider = BlockSlider(frame)
mutated = False
@@ -550,7 +558,8 @@ cdef class BlockSlider:
util.set_array_not_contiguous(x)
self.nblocks = len(self.blocks)
- self.idx_slider = Slider(self.frame.index.values, self.dummy.index.values)
+ self.idx_slider = Slider(
+ self.frame.index.values, self.dummy.index.values)
self.base_ptrs = <char**> malloc(sizeof(char*) * len(self.blocks))
for i, block in enumerate(self.blocks):
@@ -574,7 +583,7 @@ cdef class BlockSlider:
# move and set the index
self.idx_slider.move(start, end)
- object.__setattr__(self.index,'_data',self.idx_slider.buf)
+ object.__setattr__(self.index, '_data', self.idx_slider.buf)
self.index._engine.clear_mapping()
cdef reset(self):
@@ -589,6 +598,7 @@ cdef class BlockSlider:
arr.data = self.base_ptrs[i]
arr.shape[1] = 0
+
def reduce(arr, f, axis=0, dummy=None, labels=None):
"""
@@ -606,7 +616,7 @@ def reduce(arr, f, axis=0, dummy=None, labels=None):
raise Exception('Cannot use shortcut')
# pass as an ndarray
- if hasattr(labels,'values'):
+ if hasattr(labels, 'values'):
labels = labels.values
reducer = Reducer(arr, f, axis=axis, dummy=dummy, labels=labels)
diff --git a/pandas/src/skiplist.pyx b/pandas/src/skiplist.pyx
index e7db7bd5a4a02..3017931e25115 100644
--- a/pandas/src/skiplist.pyx
+++ b/pandas/src/skiplist.pyx
@@ -75,7 +75,6 @@ cdef class IndexableSkiplist:
i -= node.width[level]
node = node.next[level]
-
return node.value
cpdef insert(self, double value):
diff --git a/pandas/src/sparse.pyx b/pandas/src/sparse.pyx
index 88eb4cf13815b..7ab29414499fc 100644
--- a/pandas/src/sparse.pyx
+++ b/pandas/src/sparse.pyx
@@ -20,7 +20,7 @@ _np_version_under1p11 = LooseVersion(_np_version) < '1.11'
np.import_array()
np.import_ufunc()
-#-------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Preamble stuff
cdef float64_t NaN = <float64_t> np.NaN
@@ -29,7 +29,7 @@ cdef float64_t INF = <float64_t> np.inf
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
-#-------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
cdef class SparseIndex:
@@ -112,7 +112,8 @@ cdef class IntIndex(SparseIndex):
xindices = self.indices
yindices = y.indices
- new_indices = np.empty(min(len(xindices), len(yindices)), dtype=np.int32)
+ new_indices = np.empty(min(
+ len(xindices), len(yindices)), dtype=np.int32)
for xi from 0 <= xi < self.npoints:
xind = xindices[xi]
@@ -171,7 +172,8 @@ cdef class IntIndex(SparseIndex):
return -1
@cython.wraparound(False)
- cpdef ndarray[int32_t] lookup_array(self, ndarray[int32_t, ndim=1] indexer):
+ cpdef ndarray[int32_t] lookup_array(self, ndarray[
+ int32_t, ndim=1] indexer):
"""
Vectorized lookup, returns ndarray[int32_t]
"""
@@ -279,7 +281,7 @@ cpdef get_blocks(ndarray[int32_t, ndim=1] indices):
lens = lens[:result_indexer]
return locs, lens
-#-------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# BlockIndex
cdef class BlockIndex(SparseIndex):
@@ -350,7 +352,7 @@ cdef class BlockIndex(SparseIndex):
for i from 0 <= i < self.nblocks:
if i > 0:
- if blocs[i] <= blocs[i-1]:
+ if blocs[i] <= blocs[i - 1]:
raise ValueError('Locations not in ascending order')
if i < self.nblocks - 1:
@@ -524,7 +526,8 @@ cdef class BlockIndex(SparseIndex):
return -1
@cython.wraparound(False)
- cpdef ndarray[int32_t] lookup_array(self, ndarray[int32_t, ndim=1] indexer):
+ cpdef ndarray[int32_t] lookup_array(self, ndarray[
+ int32_t, ndim=1] indexer):
"""
Vectorized lookup, returns ndarray[int32_t]
"""
@@ -642,7 +645,8 @@ cdef class BlockUnion(BlockMerge):
cdef _make_merged_blocks(self):
cdef:
- ndarray[int32_t, ndim=1] xstart, xend, ystart, yend, out_bloc, out_blen
+ ndarray[int32_t, ndim=1] xstart, xend, ystart
+ ndarray[int32_t, ndim=1] yend, out_bloc, out_blen
int32_t nstart, nend, diff
Py_ssize_t max_len, result_indexer = 0
@@ -752,14 +756,13 @@ cdef class BlockUnion(BlockMerge):
return self._find_next_block_end(1 - mode)
-#-------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Sparse arithmetic
include "sparse_op_helper.pxi"
-
-#-------------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Indexing operations
def get_reindexer(ndarray[object, ndim=1] values, dict index_map):
diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx
index e9563d9168206..cda21ba9c4ce1 100644
--- a/pandas/src/testing.pyx
+++ b/pandas/src/testing.pyx
@@ -68,13 +68,14 @@ cpdef assert_almost_equal(a, b,
b : object
check_less_precise : bool or int, default False
Specify comparison precision.
- 5 digits (False) or 3 digits (True) after decimal points are compared.
- If an integer, then this will be the number of decimal points to compare
+ 5 digits (False) or 3 digits (True) after decimal points are
+ compared. If an integer, then this will be the number of decimal
+ points to compare
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
obj : str, default None
- Specify object name being compared, internally used to show appropriate
- assertion message
+ Specify object name being compared, internally used to show
+ appropriate assertion message
lobj : str, default None
Specify left object name being compared, internally used to show
appropriate assertion message
@@ -129,8 +130,9 @@ cpdef assert_almost_equal(a, b,
na, nb = a.size, b.size
if a.shape != b.shape:
from pandas.util.testing import raise_assert_detail
- raise_assert_detail(obj, '{0} shapes are different'.format(obj),
- a.shape, b.shape)
+ raise_assert_detail(
+ obj, '{0} shapes are different'.format(obj),
+ a.shape, b.shape)
if check_dtype and not is_dtype_equal(a, b):
from pandas.util.testing import assert_attr_equal
@@ -148,7 +150,7 @@ cpdef assert_almost_equal(a, b,
from pandas.util.testing import raise_assert_detail
# if we have a small diff set, print it
- if abs(na-nb) < 10:
+ if abs(na - nb) < 10:
r = list(set(a) ^ set(b))
else:
r = None
@@ -158,14 +160,16 @@ cpdef assert_almost_equal(a, b,
for i in xrange(len(a)):
try:
- assert_almost_equal(a[i], b[i], check_less_precise=check_less_precise)
+ assert_almost_equal(a[i], b[i],
+ check_less_precise=check_less_precise)
except AssertionError:
is_unequal = True
diff += 1
if is_unequal:
from pandas.util.testing import raise_assert_detail
- msg = '{0} values are different ({1} %)'.format(obj, np.round(diff * 100.0 / na, 5))
+ msg = '{0} values are different ({1} %)'.format(
+ obj, np.round(diff * 100.0 / na, 5))
raise_assert_detail(obj, msg, lobj, robj)
return True
@@ -198,12 +202,12 @@ cpdef assert_almost_equal(a, b,
# case for zero
if abs(fa) < 1e-5:
if not decimal_almost_equal(fa, fb, decimal):
- assert False, (
- '(very low values) expected %.5f but got %.5f, with decimal %d' % (fb, fa, decimal)
- )
+ assert False, ('(very low values) expected %.5f but '
+ 'got %.5f, with decimal %d' % (fb, fa, decimal))
else:
if not decimal_almost_equal(1, fb / fa, decimal):
- assert False, 'expected %.5f but got %.5f, with decimal %d' % (fb, fa, decimal)
+ assert False, ('expected %.5f but got %.5f, '
+ 'with decimal %d' % (fb, fa, decimal))
return True
raise AssertionError("{0} != {1}".format(a, b))
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index c9e85c5741410..9073ad0abd535 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -36,7 +36,8 @@ cdef extern from "datetime_helper.h":
from datetime cimport cmp_pandas_datetimestruct
from libc.stdlib cimport free
-from util cimport is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object
+from util cimport (is_integer_object, is_float_object, is_datetime64_object,
+ is_timedelta64_object)
cimport util
from datetime cimport *
@@ -49,8 +50,10 @@ from datetime import time as datetime_time
import re
# dateutil compat
-from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile,
- tzutc as _dateutil_tzutc, tzstr as _dateutil_tzstr)
+from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal,
+ tzfile as _dateutil_tzfile,
+ tzutc as _dateutil_tzutc,
+ tzstr as _dateutil_tzstr)
from pandas.compat import is_platform_windows
if is_platform_windows():
@@ -61,7 +64,8 @@ from dateutil.relativedelta import relativedelta
from dateutil.parser import DEFAULTPARSER
from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
-from pandas.compat import parse_date, string_types, iteritems, StringIO, callable
+from pandas.compat import (parse_date, string_types, iteritems,
+ StringIO, callable)
import operator
import collections
@@ -89,8 +93,10 @@ try:
except NameError: # py3
basestring = str
-cdef inline object create_timestamp_from_ts(int64_t value, pandas_datetimestruct dts,
- object tz, object freq):
+
+cdef inline object create_timestamp_from_ts(
+ int64_t value, pandas_datetimestruct dts,
+ object tz, object freq):
cdef _Timestamp ts_base
ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month,
dts.day, dts.hour, dts.min,
@@ -101,13 +107,17 @@ cdef inline object create_timestamp_from_ts(int64_t value, pandas_datetimestruct
return ts_base
-cdef inline object create_datetime_from_ts(int64_t value, pandas_datetimestruct dts,
- object tz, object freq):
+
+cdef inline object create_datetime_from_ts(
+ int64_t value, pandas_datetimestruct dts,
+ object tz, object freq):
return datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
+
def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
- # convert an i8 repr to an ndarray of datetimes or Timestamp (if box == True)
+ # convert an i8 repr to an ndarray of datetimes or Timestamp (if box ==
+ # True)
cdef:
Py_ssize_t i, n = len(arr)
@@ -133,7 +143,8 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
if value == NPY_NAT:
result[i] = NaT
else:
- pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ value, PANDAS_FR_ns, &dts)
result[i] = func_create(value, dts, tz, freq)
elif _is_tzlocal(tz) or _is_fixed_offset(tz):
for i in range(n):
@@ -141,7 +152,8 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
if value == NPY_NAT:
result[i] = NaT
else:
- pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ value, PANDAS_FR_ns, &dts)
dt = create_datetime_from_ts(value, dts, tz, freq)
dt = dt + tz.utcoffset(dt)
if box:
@@ -163,10 +175,12 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
# find right representation of dst etc in pytz timezone
new_tz = tz._tzinfos[tz._transition_info[pos]]
else:
- # no zone-name change for dateutil tzs - dst etc represented in single object.
+ # no zone-name change for dateutil tzs - dst etc
+ # represented in single object.
new_tz = tz
- pandas_datetime_to_datetimestruct(value + deltas[pos], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ value + deltas[pos], PANDAS_FR_ns, &dts)
result[i] = func_create(value, dts, new_tz, freq)
else:
for i in range(n):
@@ -180,8 +194,10 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
return result
+
def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
- # convert an i8 repr to an ndarray of timedelta or Timedelta (if box == True)
+ # convert an i8 repr to an ndarray of timedelta or Timedelta (if box ==
+ # True)
cdef:
Py_ssize_t i, n = len(arr)
@@ -197,7 +213,7 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
if box:
result[i] = Timedelta(value)
else:
- result[i] = timedelta(microseconds=int(value)/1000)
+ result[i] = timedelta(microseconds=int(value) / 1000)
return result
@@ -205,6 +221,7 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
cdef inline bint _is_tzlocal(object tz):
return isinstance(tz, _dateutil_tzlocal)
+
cdef inline bint _is_fixed_offset(object tz):
if _treat_tz_as_dateutil(tz):
if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0:
@@ -212,7 +229,8 @@ cdef inline bint _is_fixed_offset(object tz):
else:
return 0
elif _treat_tz_as_pytz(tz):
- if len(tz._transition_info) == 0 and len(tz._utc_transition_times) == 0:
+ if (len(tz._transition_info) == 0
+ and len(tz._utc_transition_times) == 0):
return 1
else:
return 0
@@ -223,6 +241,8 @@ _no_input = object()
# Python front end to C extension type _Timestamp
# This serves as the box for datetime64
+
+
class Timestamp(_Timestamp):
"""TimeStamp is the pandas equivalent of python's Datetime
and is interchangable with it in most cases. It's the type used
@@ -281,7 +301,8 @@ class Timestamp(_Timestamp):
offset : str, DateOffset
Deprecated, use freq
"""
- return cls(datetime.fromordinal(ordinal), freq=freq, tz=tz, offset=offset)
+ return cls(datetime.fromordinal(ordinal),
+ freq=freq, tz=tz, offset=offset)
@classmethod
def now(cls, tz=None):
@@ -370,13 +391,16 @@ class Timestamp(_Timestamp):
if ts_input is _no_input:
# User passed keyword arguments.
return Timestamp(datetime(year, month, day, hour or 0,
- minute or 0, second or 0, microsecond or 0, tzinfo),
- tz=tzinfo)
+ minute or 0, second or 0,
+ microsecond or 0, tzinfo),
+ tz=tzinfo)
elif is_integer_object(freq):
# User passed positional arguments:
- # Timestamp(year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])
+ # Timestamp(year, month, day[, hour[, minute[, second[,
+ # microsecond[, tzinfo]]]]])
return Timestamp(datetime(ts_input, freq, tz, unit or 0,
- year or 0, month or 0, day or 0, hour), tz=hour)
+ year or 0, month or 0, day or 0,
+ hour), tz=hour)
ts = convert_to_tsobject(ts_input, tz, unit, 0, 0)
@@ -399,7 +423,6 @@ class Timestamp(_Timestamp):
return ts_base
-
def _round(self, freq, rounder):
cdef int64_t unit
@@ -411,7 +434,7 @@ class Timestamp(_Timestamp):
value = self.tz_localize(None).value
else:
value = self.value
- result = Timestamp(unit*rounder(value/float(unit)),unit='ns')
+ result = Timestamp(unit * rounder(value / float(unit)), unit='ns')
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
@@ -493,7 +516,8 @@ class Timestamp(_Timestamp):
@property
def weekday_name(self):
- out = get_date_name_field(np.array([self.value], dtype=np.int64), 'weekday_name')
+ out = get_date_name_field(
+ np.array([self.value], dtype=np.int64), 'weekday_name')
return out[0]
@property
@@ -592,8 +616,8 @@ class Timestamp(_Timestamp):
# tz naive, localize
tz = maybe_get_tz(tz)
if not isinstance(ambiguous, basestring):
- ambiguous = [ambiguous]
- value = tz_localize_to_utc(np.array([self.value],dtype='i8'), tz,
+ ambiguous = [ambiguous]
+ value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz,
ambiguous=ambiguous, errors=errors)[0]
return Timestamp(value, tz=tz)
else:
@@ -605,7 +629,6 @@ class Timestamp(_Timestamp):
raise TypeError('Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions')
-
def tz_convert(self, tz):
"""
Convert tz-aware Timestamp to another time zone.
@@ -677,25 +700,26 @@ class Timestamp(_Timestamp):
year -= 1
month += 12
return (day +
- np.fix((153*month - 457)/5) +
- 365*year +
+ np.fix((153 * month - 457) / 5) +
+ 365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
- self.minute/60.0 +
- self.second/3600.0 +
- self.microsecond/3600.0/1e+6 +
- self.nanosecond/3600.0/1e+9
- )/24.0)
+ self.minute / 60.0 +
+ self.second / 3600.0 +
+ self.microsecond / 3600.0 / 1e+6 +
+ self.nanosecond / 3600.0 / 1e+9
+ ) / 24.0)
def normalize(self):
"""
Normalize Timestamp to midnight, preserving
tz information.
"""
- normalized_value = date_normalize(np.array([self.value], dtype='i8'), tz=self.tz)[0]
+ normalized_value = date_normalize(
+ np.array([self.value], dtype='i8'), tz=self.tz)[0]
return Timestamp(normalized_value).tz_localize(self.tz)
def __radd__(self, other):
@@ -704,7 +728,9 @@ class Timestamp(_Timestamp):
return self + other
-_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
+_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'])
+
+
class NaTType(_NaT):
"""(N)ot-(A)-(T)ime, the time equivalent of NaN"""
@@ -762,7 +788,6 @@ class NaTType(_NaT):
return NotImplemented
-
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
'week', 'dayofyear', 'days_in_month', 'daysinmonth', 'dayofweek',
@@ -771,20 +796,23 @@ for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
-# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or return NaT
-# create functions that raise, for binding to NaTType
+
+# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or
+# return NaT create functions that raise, for binding to NaTType
def _make_error_func(func_name):
def f(*args, **kwargs):
raise ValueError("NaTType does not support " + func_name)
f.__name__ = func_name
return f
+
def _make_nat_func(func_name):
def f(*args, **kwargs):
return NaT
f.__name__ = func_name
return f
+
def _make_nan_func(func_name):
def f(*args, **kwargs):
return np.nan
@@ -813,7 +841,9 @@ for _maybe_method_name in dir(NaTType):
if (callable(_maybe_method)
and not _maybe_method_name.startswith("_")
and _maybe_method_name not in _implemented_methods):
- setattr(NaTType, _maybe_method_name, _make_error_func(_maybe_method_name))
+ setattr(NaTType, _maybe_method_name,
+ _make_error_func(_maybe_method_name))
+
def __nat_unpickle(*args):
# return constant defined in the module
@@ -1028,9 +1058,11 @@ cdef class _Timestamp(datetime):
pass
tz = ", tz='{0}'".format(zone) if zone is not None else ""
- freq = ", freq='{0}'".format(self.freq.freqstr) if self.freq is not None else ""
+ freq = ", freq='{0}'".format(
+ self.freq.freqstr) if self.freq is not None else ""
- return "Timestamp('{stamp}'{tz}{freq})".format(stamp=stamp, tz=tz, freq=freq)
+ return "Timestamp('{stamp}'{tz}{freq})".format(
+ stamp=stamp, tz=tz, freq=freq)
cdef bint _compare_outside_nanorange(_Timestamp self, datetime other,
int op) except -1:
@@ -1101,7 +1133,8 @@ cdef class _Timestamp(datetime):
if is_timedelta64_object(other):
other_int = other.astype('timedelta64[ns]').view('i8')
- return Timestamp(self.value + other_int, tz=self.tzinfo, freq=self.freq)
+ return Timestamp(self.value + other_int,
+ tz=self.tzinfo, freq=self.freq)
elif is_integer_object(other):
if self is NaT:
@@ -1114,7 +1147,8 @@ cdef class _Timestamp(datetime):
elif isinstance(other, timedelta) or hasattr(other, 'delta'):
nanos = _delta_to_nanoseconds(other)
- result = Timestamp(self.value + nanos, tz=self.tzinfo, freq=self.freq)
+ result = Timestamp(self.value + nanos,
+ tz=self.tzinfo, freq=self.freq)
if getattr(other, 'normalize', False):
result = Timestamp(normalize_date(result))
return result
@@ -1148,21 +1182,27 @@ cdef class _Timestamp(datetime):
return NaT
# coerce if necessary if we are a Timestamp-like
- if isinstance(self, datetime) and (isinstance(other, datetime) or is_datetime64_object(other)):
+ if (isinstance(self, datetime)
+ and (isinstance(other, datetime)
+ or is_datetime64_object(other))):
self = Timestamp(self)
other = Timestamp(other)
# validate tz's
if get_timezone(self.tzinfo) != get_timezone(other.tzinfo):
- raise TypeError("Timestamp subtraction must have the same timezones or no timezones")
+ raise TypeError(
+ "Timestamp subtraction must have the "
+ "same timezones or no timezones")
- # scalar Timestamp/datetime - Timestamp/datetime -> yields a Timedelta
+ # scalar Timestamp/datetime - Timestamp/datetime -> yields a
+ # Timedelta
try:
- return Timedelta(self.value-other.value)
+ return Timedelta(self.value -other.value)
except (OverflowError, OutOfBoundsDatetime):
pass
- # scalar Timestamp/datetime - Timedelta -> yields a Timestamp (with same timezone if specified)
+ # scalar Timestamp/datetime - Timedelta -> yields a Timestamp (with
+ # same timezone if specified)
return datetime.__sub__(self, other)
cpdef _get_field(self, field):
@@ -1170,9 +1210,12 @@ cdef class _Timestamp(datetime):
return int(out[0])
cpdef _get_start_end_field(self, field):
- month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12
+ month_kw = self.freq.kwds.get(
+ 'startingMonth', self.freq.kwds.get(
+ 'month', 12)) if self.freq else 12
freqstr = self.freqstr if self.freq else None
- out = get_start_end_field(np.array([self.value], dtype=np.int64), field, freqstr, month_kw)
+ out = get_start_end_field(
+ np.array([self.value], dtype=np.int64), field, freqstr, month_kw)
return out[0]
property _repr_base:
@@ -1361,19 +1404,20 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
obj.value = NPY_NAT
else:
obj.value = _get_datetime64_nanos(ts)
- pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts)
+ pandas_datetime_to_datetimestruct(
+ obj.value, PANDAS_FR_ns, &obj.dts)
elif is_integer_object(ts):
if ts == NPY_NAT:
obj.value = NPY_NAT
else:
- ts = ts * cast_from_unit(None,unit)
+ ts = ts * cast_from_unit(None, unit)
obj.value = ts
pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
elif util.is_float_object(ts):
if ts != ts or ts == NPY_NAT:
obj.value = NPY_NAT
else:
- ts = cast_from_unit(ts,unit)
+ ts = cast_from_unit(ts, unit)
obj.value = ts
pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
elif PyDateTime_Check(ts):
@@ -1424,7 +1468,9 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
ts = datetime.combine(ts, datetime_time())
return convert_to_tsobject(ts, tz, None, 0, 0)
elif getattr(ts, '_typ', None) == 'period':
- raise ValueError("Cannot convert Period to Timestamp unambiguously. Use to_timestamp")
+ raise ValueError(
+ "Cannot convert Period to Timestamp "
+ "unambiguously. Use to_timestamp")
else:
raise TypeError('Cannot convert input to Timestamp')
@@ -1465,7 +1511,8 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit,
else:
try:
_string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset)
- obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
+ obj.value = pandas_datetimestruct_to_datetime(
+ PANDAS_FR_ns, &obj.dts)
_check_dts_bounds(&obj.dts)
if out_local == 1:
obj.tzinfo = pytz.FixedOffset(out_tzoffset)
@@ -1483,12 +1530,14 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit,
ts = tz_convert_single(ts, tz, 'UTC')
except ValueError:
try:
- ts = parse_datetime_string(ts, dayfirst=dayfirst, yearfirst=yearfirst)
+ ts = parse_datetime_string(
+ ts, dayfirst=dayfirst, yearfirst=yearfirst)
except Exception:
raise ValueError("could not convert string to Timestamp")
return convert_to_tsobject(ts, tz, unit, dayfirst, yearfirst)
+
def _test_parse_iso8601(object ts):
"""
TESTING ONLY: Parse string into Timestamp using iso8601 parser. Used
@@ -1534,7 +1583,6 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
pos = trans.searchsorted(obj.value, side='right') - 1
-
# static/pytz/dateutil specific code
if _is_fixed_offset(tz):
# statictzinfo
@@ -1542,7 +1590,8 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
pandas_datetime_to_datetimestruct(obj.value + deltas[0],
PANDAS_FR_ns, &obj.dts)
else:
- pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts)
+ pandas_datetime_to_datetimestruct(
+ obj.value, PANDAS_FR_ns, &obj.dts)
obj.tzinfo = tz
elif _treat_tz_as_pytz(tz):
inf = tz._transition_info[pos]
@@ -1591,21 +1640,29 @@ cdef inline bint _is_utc(object tz):
cdef inline object _get_zone(object tz):
"""
We need to do several things here:
- 1/ Distinguish between pytz and dateutil timezones
- 2/ Not be over-specific (e.g. US/Eastern with/without DST is same *zone* but a different tz object)
- 3/ Provide something to serialize when we're storing a datetime object in pytables.
-
- We return a string prefaced with dateutil if it's a dateutil tz, else just the tz name. It needs to be a
- string so that we can serialize it with UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
+ 1) Distinguish between pytz and dateutil timezones
+ 2) Not be over-specific (e.g. US/Eastern with/without DST is same *zone*
+ but a different tz object)
+ 3) Provide something to serialize when we're storing a datetime object
+ in pytables.
+
+ We return a string prefaced with dateutil if it's a dateutil tz, else just
+ the tz name. It needs to be a string so that we can serialize it with
+ UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
"""
if _is_utc(tz):
return 'UTC'
else:
if _treat_tz_as_dateutil(tz):
if '.tar.gz' in tz._filename:
- raise ValueError('Bad tz filename. Dateutil on python 3 on windows has a bug which causes tzfile._filename to be the same for all '
- 'timezone files. Please construct dateutil timezones implicitly by passing a string like "dateutil/Europe/London" '
- 'when you construct your pandas objects instead of passing a timezone object. See https://github.com/pydata/pandas/pull/7362')
+ raise ValueError(
+ 'Bad tz filename. Dateutil on python 3 on windows has a '
+ 'bug which causes tzfile._filename to be the same for all '
+ 'timezone files. Please construct dateutil timezones '
+ 'implicitly by passing a string like "dateutil/Europe'
+ '/London" when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pydata/pandas/pull/7362')
return 'dateutil/' + tz._filename
else:
# tz is a pytz timezone or unknown.
@@ -1620,8 +1677,8 @@ cdef inline object _get_zone(object tz):
cpdef inline object maybe_get_tz(object tz):
"""
- (Maybe) Construct a timezone object from a string. If tz is a string, use it to construct a timezone object.
- Otherwise, just return tz.
+ (Maybe) Construct a timezone object from a string. If tz is a string, use
+ it to construct a timezone object. Otherwise, just return tz.
"""
if isinstance(tz, string_types):
if tz == 'tzlocal()':
@@ -1639,7 +1696,6 @@ cpdef inline object maybe_get_tz(object tz):
return tz
-
class OutOfBoundsDatetime(ValueError):
pass
@@ -1659,7 +1715,8 @@ cdef inline _check_dts_bounds(pandas_datetimestruct *dts):
dts.day, dts.hour,
dts.min, dts.sec)
- raise OutOfBoundsDatetime('Out of bounds nanosecond timestamp: %s' % fmt)
+ raise OutOfBoundsDatetime(
+ 'Out of bounds nanosecond timestamp: %s' % fmt)
def datetime_to_datetime64(ndarray[object] values):
@@ -1689,7 +1746,8 @@ def datetime_to_datetime64(ndarray[object] values):
_check_dts_bounds(&_ts.dts)
else:
if inferred_tz is not None:
- raise ValueError('Cannot mix tz-aware with tz-naive values')
+ raise ValueError(
+ 'Cannot mix tz-aware with tz-naive values')
iresult[i] = _pydatetime_to_dts(val, &dts)
_check_dts_bounds(&dts)
else:
@@ -1698,7 +1756,7 @@ def datetime_to_datetime64(ndarray[object] values):
return result, inferred_tz
cdef:
- set _not_datelike_strings = set(['a','A','m','M','p','P','t','T'])
+ set _not_datelike_strings = set(['a', 'A', 'm', 'M', 'p', 'P', 't', 'T'])
cpdef bint _does_string_look_like_datetime(object date_string):
if date_string.startswith('0'):
@@ -1742,7 +1800,7 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None,
pandas_datetimestruct dts
if na_rep is None:
- na_rep = 'NaT'
+ na_rep = 'NaT'
# if we don't have a format nor tz, then choose
# a format based on precision
@@ -1780,7 +1838,7 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None,
elif show_us:
res += '.%.6d' % dts.us
elif show_ms:
- res += '.%.3d' % (dts.us/1000)
+ res += '.%.3d' % (dts.us /1000)
result[i] = res
@@ -1810,7 +1868,6 @@ cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])')
def parse_datetime_string(object date_string, object freq=None,
dayfirst=False, yearfirst=False, **kwargs):
-
"""parse datetime string, only returns datetime.
Also cares special handling matching time patterns.
@@ -1913,23 +1970,27 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
i = date_string.index('Q', 1, 6)
if i == 1:
quarter = int(date_string[0])
- if date_len == 4 or (date_len == 5 and date_string[i + 1] == '-'):
+ if date_len == 4 or (date_len == 5
+ and date_string[i + 1] == '-'):
# r'(\d)Q-?(\d\d)')
year = 2000 + int(date_string[-2:])
- elif date_len == 6 or (date_len == 7 and date_string[i + 1] == '-'):
+ elif date_len == 6 or (date_len == 7
+ and date_string[i + 1] == '-'):
# r'(\d)Q-?(\d\d\d\d)')
year = int(date_string[-4:])
else:
raise ValueError
elif i == 2 or i == 3:
# r'(\d\d)-?Q(\d)'
- if date_len == 4 or (date_len == 5 and date_string[i - 1] == '-'):
+ if date_len == 4 or (date_len == 5
+ and date_string[i - 1] == '-'):
quarter = int(date_string[-1])
year = 2000 + int(date_string[:2])
else:
raise ValueError
elif i == 4 or i == 5:
- if date_len == 6 or (date_len == 7 and date_string[i - 1] == '-'):
+ if date_len == 6 or (date_len == 7
+ and date_string[i - 1] == '-'):
# r'(\d\d\d\d)-?Q(\d)'
quarter = int(date_string[-1])
year = int(date_string[:4])
@@ -1937,7 +1998,8 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
raise ValueError
if not (1 <= quarter <= 4):
- msg = 'Incorrect quarterly string is given, quarter must be between 1 and 4: {0}'
+ msg = ('Incorrect quarterly string is given, quarter must be '
+ 'between 1 and 4: {0}')
raise DateParseError(msg.format(date_string))
if freq is not None:
@@ -1945,7 +2007,8 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
try:
mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1
except (KeyError, ValueError):
- msg = 'Unable to retrieve month information from given freq: {0}'.format(freq)
+ msg = ('Unable to retrieve month information from given '
+ 'freq: {0}').format(freq)
raise DateParseError(msg)
month = (mnum + (quarter - 1) * 3) % 12 + 1
@@ -1962,7 +2025,8 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
except ValueError:
pass
- if date_len == 6 and (freq == 'M' or getattr(freq, 'rule_code', None) == 'M'):
+ if date_len == 6 and (freq == 'M' or getattr(
+ freq, 'rule_code', None) == 'M'):
year = int(date_string[:4])
month = int(date_string[4:6])
try:
@@ -2048,7 +2112,8 @@ def dateutil_parse(object timestr, object default, ignoretz=False,
# const for parsers
-_DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0, second=0, microsecond=0)
+_DEFAULT_DATETIME = datetime(1, 1, 1).replace(
+ hour=0, minute=0, second=0, microsecond=0)
_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
_MONTH_NUMBERS = dict((k, i) for i, k in enumerate(_MONTHS))
@@ -2092,7 +2157,9 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
int64_t m
ndarray[float64_t] fvalues
ndarray mask
- bint is_ignore=errors=='ignore', is_coerce=errors=='coerce', is_raise=errors=='raise'
+ bint is_ignore = errors=='ignore'
+ bint is_coerce = errors=='coerce'
+ bint is_raise = errors=='raise'
bint need_to_iterate=True
ndarray[int64_t] iresult
ndarray[object] oresult
@@ -2123,9 +2190,11 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
# check the bounds
if not need_to_iterate:
- if (fvalues < _NS_LOWER_BOUND).any() or (fvalues > _NS_UPPER_BOUND).any():
- raise OutOfBoundsDatetime("cannot convert input with unit '{0}'".format(unit))
- result = (iresult*m).astype('M8[ns]')
+ if ((fvalues < _NS_LOWER_BOUND).any()
+ or (fvalues > _NS_UPPER_BOUND).any()):
+ raise OutOfBoundsDatetime(
+ "cannot convert input with unit '{0}'".format(unit))
+ result = (iresult *m).astype('M8[ns]')
iresult = result.view('i8')
iresult[mask] = iNaT
return result
@@ -2149,10 +2218,9 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
iresult[i] = cast_from_unit(val, unit)
except OverflowError:
if is_raise:
- raise OutOfBoundsDatetime("cannot convert input {0}"
- "with the unit '{1}'".format(
- val,
- unit))
+ raise OutOfBoundsDatetime(
+ "cannot convert input {0} with the unit "
+ "'{1}'".format(val, unit))
elif is_ignore:
raise AssertionError
iresult[i] = NPY_NAT
@@ -2166,19 +2234,17 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
iresult[i] = cast_from_unit(float(val), unit)
except ValueError:
if is_raise:
- raise ValueError("non convertible value {0}"
- "with the unit '{1}'".format(
- val,
- unit))
+ raise ValueError(
+ "non convertible value {0} with the unit "
+ "'{1}'".format(val, unit))
elif is_ignore:
raise AssertionError
iresult[i] = NPY_NAT
except:
if is_raise:
- raise OutOfBoundsDatetime("cannot convert input {0}"
- "with the unit '{1}'".format(
- val,
- unit))
+ raise OutOfBoundsDatetime(
+ "cannot convert input {0} with the unit "
+ "'{1}'".format(val, unit))
elif is_ignore:
raise AssertionError
iresult[i] = NPY_NAT
@@ -2240,8 +2306,13 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
ndarray[int64_t] iresult
ndarray[object] oresult
pandas_datetimestruct dts
- bint utc_convert = bool(utc), seen_integer=0, seen_string=0, seen_datetime=0
- bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
+ bint utc_convert = bool(utc)
+ bint seen_integer = 0
+ bint seen_string = 0
+ bint seen_datetime = 0
+ bint is_raise = errors=='raise'
+ bint is_ignore = errors=='ignore'
+ bint is_coerce = errors=='coerce'
_TSObject _ts
int out_local=0, out_tzoffset=0
@@ -2340,7 +2411,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
seen_string=1
_string_to_dts(val, &dts, &out_local, &out_tzoffset)
- value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ value = pandas_datetimestruct_to_datetime(
+ PANDAS_FR_ns, &dts)
if out_local == 1:
tz = pytz.FixedOffset(out_tzoffset)
value = tz_convert_single(value, tz, 'UTC')
@@ -2353,8 +2425,9 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
iresult[i] = NPY_NAT
continue
elif is_raise:
- raise ValueError("time data %r doesn't match format specified" %
- (val,))
+ raise ValueError(
+ "time data %r doesn't match format "
+ "specified" % (val,))
else:
return values
@@ -2398,7 +2471,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
if is_integer_object(val) or is_float_object(val):
result[i] = NPY_NAT
elif is_raise:
- raise ValueError("mixed datetimes and integers in passed array")
+ raise ValueError(
+ "mixed datetimes and integers in passed array")
else:
raise TypeError
@@ -2440,7 +2514,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
try:
oresult[i] = parse_datetime_string(val, dayfirst=dayfirst,
- yearfirst=yearfirst)
+ yearfirst=yearfirst)
_pydatetime_to_dts(oresult[i], &dts)
_check_dts_bounds(&dts)
except Exception:
@@ -2456,11 +2530,10 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
return oresult
-# Similar to Timestamp/datetime, this is a construction requirement for timedeltas
-# we need to do object instantiation in python
-# This will serve as a C extension type that
-# shadows the python class, where we do any heavy lifting.
-
+# Similar to Timestamp/datetime, this is a construction requirement for
+# timedeltas that we need to do object instantiation in python. This will
+# serve as a C extension type that shadows the Python class, where we do any
+# heavy lifting.
cdef class _Timedelta(timedelta):
cdef readonly:
@@ -2526,14 +2599,14 @@ cdef class _Timedelta(timedelta):
return
# put frac in seconds
- frac = ivalue/(1000*1000*1000)
+ frac = ivalue /(1000 *1000 *1000)
if frac < 0:
self._sign = -1
# even fraction
if (-frac % 86400) != 0:
- self._d = -frac/86400 + 1
- frac += 86400*self._d
+ self._d = -frac /86400 + 1
+ frac += 86400 *self._d
else:
frac = -frac
else:
@@ -2542,37 +2615,38 @@ cdef class _Timedelta(timedelta):
if frac >= 86400:
self._d += frac / 86400
- frac -= self._d * 86400
+ frac -= self._d * 86400
if frac >= 3600:
- self._h = frac / 3600
- frac -= self._h * 3600
+ self._h = frac / 3600
+ frac -= self._h * 3600
else:
self._h = 0
if frac >= 60:
self._m = frac / 60
- frac -= self._m * 60
+ frac -= self._m * 60
else:
self._m = 0
if frac >= 0:
self._s = frac
- frac -= self._s
+ frac -= self._s
else:
self._s = 0
- sfrac = (self._h*3600 + self._m*60 + self._s)*(1000*1000*1000)
+ sfrac = (self._h * 3600 + self._m * 60
+ + self._s) * (1000 * 1000 * 1000)
if self._sign < 0:
- ifrac = ivalue + self._d*DAY_NS - sfrac
+ ifrac = ivalue + self._d *DAY_NS - sfrac
else:
- ifrac = ivalue - (self._d*DAY_NS + sfrac)
+ ifrac = ivalue - (self._d *DAY_NS + sfrac)
if ifrac != 0:
- self._ms = ifrac/(1000*1000)
- ifrac -= self._ms*1000*1000
- self._us = ifrac/1000
- ifrac -= self._us*1000
+ self._ms = ifrac /(1000 *1000)
+ ifrac -= self._ms *1000 *1000
+ self._us = ifrac /1000
+ ifrac -= self._us *1000
self._ns = ifrac
else:
self._ms = 0
@@ -2586,16 +2660,20 @@ cdef class _Timedelta(timedelta):
return an actual datetime.timedelta object
note: we lose nanosecond resolution if any
"""
- return timedelta(microseconds=int(self.value)/1000)
+ return timedelta(microseconds=int(self.value) /1000)
cpdef bint _has_ns(self):
return self.value % 1000 != 0
# components named tuple
-Components = collections.namedtuple('Components',['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds'])
+Components = collections.namedtuple('Components', [
+ 'days', 'hours', 'minutes', 'seconds',
+ 'milliseconds', 'microseconds', 'nanoseconds'])
# Python front end to C extension type _Timedelta
# This serves as the box for timedelta64
+
+
class Timedelta(_Timedelta):
"""
Represents a duration, the difference between two dates or times.
@@ -2608,7 +2686,8 @@ class Timedelta(_Timedelta):
value : Timedelta, timedelta, np.timedelta64, string, or integer
unit : string, [D,h,m,s,ms,us,ns]
Denote the unit of the input, if input is an integer. Default 'ns'.
- days, seconds, microseconds, milliseconds, minutes, hours, weeks : numeric, optional
+ days, seconds, microseconds,
+ milliseconds, minutes, hours, weeks : numeric, optional
Values for construction in compat with datetime.timedelta.
np ints and floats will be coereced to python ints and floats.
@@ -2623,43 +2702,52 @@ class Timedelta(_Timedelta):
if value is _no_input:
if not len(kwargs):
- raise ValueError("cannot construct a Timedelta without a value/unit or descriptive keywords (days,seconds....)")
+ raise ValueError(
+ "cannot construct a Timedelta without a value/unit or "
+ "descriptive keywords (days,seconds....)")
def _to_py_int_float(v):
if is_integer_object(v):
return int(v)
elif is_float_object(v):
return float(v)
- raise TypeError("Invalid type {0}. Must be int or float.".format(type(v)))
+ raise TypeError(
+ "Invalid type {0}. Must be int or float.".format(type(v)))
- kwargs = dict([ (k, _to_py_int_float(v)) for k, v in iteritems(kwargs) ])
+ kwargs = dict([ (k, _to_py_int_float(v))
+ for k, v in iteritems(kwargs) ])
try:
- nano = kwargs.pop('nanoseconds',0)
- value = convert_to_timedelta64(timedelta(**kwargs),'ns') + nano
+ nano = kwargs.pop('nanoseconds', 0)
+ value = convert_to_timedelta64(
+ timedelta(**kwargs), 'ns') + nano
except TypeError as e:
- raise ValueError("cannot construct a Timedelta from the passed arguments, allowed keywords are "
- "[weeks, days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds]")
+ raise ValueError("cannot construct a Timedelta from the "
+ "passed arguments, allowed keywords are "
+ "[weeks, days, hours, minutes, seconds, "
+ "milliseconds, microseconds, nanoseconds]")
if isinstance(value, Timedelta):
value = value.value
elif util.is_string_object(value):
value = np.timedelta64(parse_timedelta_string(value))
elif isinstance(value, timedelta):
- value = convert_to_timedelta64(value,'ns')
+ value = convert_to_timedelta64(value, 'ns')
elif isinstance(value, np.timedelta64):
if unit is not None:
value = value.astype('timedelta64[{0}]'.format(unit))
value = value.astype('timedelta64[ns]')
- elif hasattr(value,'delta'):
- value = np.timedelta64(_delta_to_nanoseconds(value.delta),'ns')
+ elif hasattr(value, 'delta'):
+ value = np.timedelta64(_delta_to_nanoseconds(value.delta), 'ns')
elif is_integer_object(value) or util.is_float_object(value):
# unit=None is de-facto 'ns'
- value = convert_to_timedelta64(value,unit)
+ value = convert_to_timedelta64(value, unit)
elif _checknull_with_nat(value):
return NaT
else:
- raise ValueError("Value must be Timedelta, string, integer, float, timedelta or convertible")
+ raise ValueError(
+ "Value must be Timedelta, string, integer, "
+ "float, timedelta or convertible")
if isinstance(value, np.timedelta64):
value = value.view('i8')
@@ -2669,7 +2757,7 @@ class Timedelta(_Timedelta):
return NaT
# make timedelta happy
- td_base = _Timedelta.__new__(cls, microseconds=int(value)/1000)
+ td_base = _Timedelta.__new__(cls, microseconds=int(value) /1000)
td_base.value = value
td_base.is_populated = 0
return td_base
@@ -2690,19 +2778,19 @@ class Timedelta(_Timedelta):
self._ensure_components()
if self._ns:
- return "N"
+ return "N"
elif self._us:
- return "U"
+ return "U"
elif self._ms:
- return "L"
+ return "L"
elif self._s:
- return "S"
+ return "S"
elif self._m:
- return "T"
+ return "T"
elif self._h:
- return "H"
+ return "H"
else:
- return "D"
+ return "D"
def _round(self, freq, rounder):
@@ -2710,8 +2798,8 @@ class Timedelta(_Timedelta):
from pandas.tseries.frequencies import to_offset
unit = to_offset(freq).nanos
- result = unit*rounder(self.value/float(unit))
- return Timedelta(result,unit='ns')
+ result = unit *rounder(self.value /float(unit))
+ return Timedelta(result, unit='ns')
def round(self, freq):
"""
@@ -2768,43 +2856,49 @@ class Timedelta(_Timedelta):
self._ensure_components()
if self._sign < 0:
- sign_pretty = "-"
- sign2_pretty = " +"
+ sign_pretty = "-"
+ sign2_pretty = " +"
else:
- sign_pretty = ""
- sign2_pretty = " "
+ sign_pretty = ""
+ sign2_pretty = " "
# show everything
if format == 'all':
- seconds_pretty = "%02d.%03d%03d%03d" % (self._s, self._ms, self._us, self._ns)
- return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, sign2_pretty, self._h, self._m, seconds_pretty)
+ seconds_pretty = "%02d.%03d%03d%03d" % (
+ self._s, self._ms, self._us, self._ns)
+ return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d,
+ sign2_pretty, self._h,
+ self._m, seconds_pretty)
# by default not showing nano
if self._ms or self._us or self._ns:
- seconds_pretty = "%02d.%03d%03d" % (self._s, self._ms, self._us)
+ seconds_pretty = "%02d.%03d%03d" % (self._s, self._ms, self._us)
else:
- seconds_pretty = "%02d" % self._s
+ seconds_pretty = "%02d" % self._s
# if we have a partial day
- subs = self._h or self._m or self._s or self._ms or self._us or self._ns
+ subs = (self._h or self._m or self._s or
+ self._ms or self._us or self._ns)
if format == 'even_day':
- if not subs:
- return "%s%d days" % (sign_pretty, self._d)
+ if not subs:
+ return "%s%d days" % (sign_pretty, self._d)
elif format == 'sub_day':
- if not self._d:
+ if not self._d:
- # degenerate, don't need the extra space
- if self._sign > 0:
- sign2_pretty = ""
- return "%s%s%02d:%02d:%s" % (sign_pretty, sign2_pretty, self._h, self._m, seconds_pretty)
+ # degenerate, don't need the extra space
+ if self._sign > 0:
+ sign2_pretty = ""
+ return "%s%s%02d:%02d:%s" % (sign_pretty, sign2_pretty,
+ self._h, self._m, seconds_pretty)
if subs or format=='long':
- return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, sign2_pretty, self._h, self._m, seconds_pretty)
+ return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d,
+ sign2_pretty, self._h,
+ self._m, seconds_pretty)
return "%s%d days" % (sign_pretty, self._d)
-
def __repr__(self):
return "Timedelta('{0}')".format(self._repr_base(format='long'))
def __str__(self):
@@ -2815,10 +2909,12 @@ class Timedelta(_Timedelta):
""" Return a Components NamedTuple-like """
self._ensure_components()
if self._sign < 0:
- return Components(-self._d,self._h,self._m,self._s,self._ms,self._us,self._ns)
+ return Components(-self._d, self._h, self._m, self._s,
+ self._ms, self._us, self._ns)
# return the named tuple
- return Components(self._d,self._h,self._m,self._s,self._ms,self._us,self._ns)
+ return Components(self._d, self._h, self._m, self._s,
+ self._ms, self._us, self._ns)
@property
def days(self):
@@ -2829,7 +2925,7 @@ class Timedelta(_Timedelta):
"""
self._ensure_components()
if self._sign < 0:
- return -1*self._d
+ return -1 *self._d
return self._d
@property
@@ -2840,7 +2936,7 @@ class Timedelta(_Timedelta):
.components will return the shown components
"""
self._ensure_components()
- return self._h*3600 + self._m*60 + self._s
+ return self._h *3600 + self._m *60 + self._s
@property
def microseconds(self):
@@ -2850,7 +2946,7 @@ class Timedelta(_Timedelta):
.components will return the shown components
"""
self._ensure_components()
- return self._ms*1000 + self._us
+ return self._ms *1000 + self._us
@property
def nanoseconds(self):
@@ -2866,7 +2962,7 @@ class Timedelta(_Timedelta):
"""
Total duration of timedelta in seconds (to ns precision)
"""
- return 1e-9*self.value
+ return 1e-9 *self.value
def __setstate__(self, state):
(value) = state
@@ -2887,13 +2983,13 @@ class Timedelta(_Timedelta):
def _validate_ops_compat(self, other):
# return True if we are compat with operating
if _checknull_with_nat(other):
- return True
+ return True
elif isinstance(other, (Timedelta, timedelta, np.timedelta64)):
- return True
+ return True
elif util.is_string_object(other):
- return True
- elif hasattr(other,'delta'):
- return True
+ return True
+ elif hasattr(other, 'delta'):
+ return True
return False
# higher than np.ndarray and np.matrix
@@ -2952,9 +3048,9 @@ class Timedelta(_Timedelta):
# only integers and floats allowed
if not (is_integer_object(other) or is_float_object(other)):
- return NotImplemented
+ return NotImplemented
- return Timedelta(other*self.value, unit='ns')
+ return Timedelta(other *self.value, unit='ns')
__rmul__ = __mul__
@@ -2965,7 +3061,7 @@ class Timedelta(_Timedelta):
# integers or floats
if is_integer_object(other) or is_float_object(other):
- return Timedelta(self.value/other, unit='ns')
+ return Timedelta(self.value /other, unit='ns')
if not self._validate_ops_compat(other):
return NotImplemented
@@ -2973,7 +3069,7 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
return np.nan
- return self.value/float(other.value)
+ return self.value /float(other.value)
def __rtruediv__(self, other):
if hasattr(other, 'dtype'):
@@ -2988,13 +3084,13 @@ class Timedelta(_Timedelta):
return float(other.value) / self.value
if not PY3:
- __div__ = __truediv__
- __rdiv__ = __rtruediv__
+ __div__ = __truediv__
+ __rdiv__ = __rtruediv__
def _not_implemented(self, *args, **kwargs):
return NotImplemented
- __floordiv__ = _not_implemented
+ __floordiv__ = _not_implemented
__rfloordiv__ = _not_implemented
def _op_unary_method(func, name):
@@ -3010,14 +3106,16 @@ class Timedelta(_Timedelta):
__abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
# resolution in ns
-Timedelta.min = Timedelta(np.iinfo(np.int64).min+1)
+Timedelta.min = Timedelta(np.iinfo(np.int64).min +1)
Timedelta.max = Timedelta(np.iinfo(np.int64).max)
cdef PyTypeObject* td_type = <PyTypeObject*> Timedelta
+
cdef inline bint is_timedelta(object o):
return Py_TYPE(o) == td_type # isinstance(o, Timedelta)
+
cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
@@ -3054,37 +3152,37 @@ cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
return iresult
-cdef dict timedelta_abbrevs = { 'D' : 'd',
- 'd' : 'd',
- 'days' : 'd',
- 'day' : 'd',
- 'hours' : 'h',
- 'hour' : 'h',
- 'hr' : 'h',
- 'h' : 'h',
- 'm' : 'm',
- 'minute' : 'm',
- 'min' : 'm',
- 'minutes' : 'm',
- 's' : 's',
- 'seconds' : 's',
- 'sec' : 's',
- 'second' : 's',
- 'ms' : 'ms',
- 'milliseconds' : 'ms',
- 'millisecond' : 'ms',
- 'milli' : 'ms',
- 'millis' : 'ms',
- 'us' : 'us',
- 'microseconds' : 'us',
- 'microsecond' : 'us',
- 'micro' : 'us',
- 'micros' : 'us',
- 'ns' : 'ns',
- 'nanoseconds' : 'ns',
- 'nano' : 'ns',
- 'nanos' : 'ns',
- 'nanosecond' : 'ns',
+cdef dict timedelta_abbrevs = { 'D': 'd',
+ 'd': 'd',
+ 'days': 'd',
+ 'day': 'd',
+ 'hours': 'h',
+ 'hour': 'h',
+ 'hr': 'h',
+ 'h': 'h',
+ 'm': 'm',
+ 'minute': 'm',
+ 'min': 'm',
+ 'minutes': 'm',
+ 's': 's',
+ 'seconds': 's',
+ 'sec': 's',
+ 'second': 's',
+ 'ms': 'ms',
+ 'milliseconds': 'ms',
+ 'millisecond': 'ms',
+ 'milli': 'ms',
+ 'millis': 'ms',
+ 'us': 'us',
+ 'microseconds': 'us',
+ 'microsecond': 'us',
+ 'micro': 'us',
+ 'micros': 'us',
+ 'ns': 'ns',
+ 'nanoseconds': 'ns',
+ 'nano': 'ns',
+ 'nanos': 'ns',
+ 'nanosecond': 'ns',
}
timedelta_abbrevs_map = timedelta_abbrevs
@@ -3134,7 +3232,8 @@ cdef inline parse_timedelta_string(object ts):
list number=[], frac=[], unit=[]
# neg : tracks if we have a leading negative for the value
- # have_dot : tracks if we are processing a dot (either post hhmmss or inside an expression)
+ # have_dot : tracks if we are processing a dot (either post hhmmss or
+ # inside an expression)
# have_value : track if we have at least 1 leading unit
# have_hhmmss : tracks if we have a regular format hh:mm:ss
@@ -3250,11 +3349,11 @@ cdef inline parse_timedelta_string(object ts):
raise ValueError("no units specified")
if len(frac) > 0 and len(frac) <= 3:
- m = 10**(3-len(frac)) * 1000L * 1000L
+ m = 10**(3 -len(frac)) * 1000L * 1000L
elif len(frac) > 3 and len(frac) <= 6:
- m = 10**(6-len(frac)) * 1000L
+ m = 10**(6 -len(frac)) * 1000L
else:
- m = 10**(9-len(frac))
+ m = 10**(9 -len(frac))
r = <int64_t> int(''.join(frac)) * m
result += timedelta_as_neg(r, neg)
@@ -3320,7 +3419,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
else:
if util.is_array(ts):
ts = ts.astype('int64').item()
- if unit in ['Y','M','W']:
+ if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(ts, unit)
else:
ts = cast_from_unit(ts, unit)
@@ -3328,15 +3427,15 @@ cpdef convert_to_timedelta64(object ts, object unit):
elif is_float_object(ts):
if util.is_array(ts):
ts = ts.astype('int64').item()
- if unit in ['Y','M','W']:
+ if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(int(ts), unit)
else:
ts = cast_from_unit(ts, unit)
ts = np.timedelta64(ts)
elif util.is_string_object(ts):
ts = np.timedelta64(parse_timedelta_string(ts))
- elif hasattr(ts,'delta'):
- ts = np.timedelta64(_delta_to_nanoseconds(ts),'ns')
+ elif hasattr(ts, 'delta'):
+ ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns')
if isinstance(ts, timedelta):
ts = np.timedelta64(ts)
@@ -3345,7 +3444,9 @@ cpdef convert_to_timedelta64(object ts, object unit):
"scalar: %s" % type(ts))
return ts.astype('timedelta64[ns]')
-def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='raise'):
+
+def array_strptime(ndarray[object] values, object fmt,
+ bint exact=True, errors='raise'):
"""
Parameters
----------
@@ -3364,7 +3465,9 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
int64_t us, ns
object val, group_key, ampm, found
dict found_key
- bint is_raise=errors=='raise', is_ignore=errors=='ignore', is_coerce=errors=='coerce'
+ bint is_raise = errors=='raise'
+ bint is_ignore = errors=='ignore'
+ bint is_coerce = errors=='coerce'
assert is_raise or is_ignore or is_coerce
@@ -3442,8 +3545,8 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
if is_coerce:
iresult[i] = NPY_NAT
continue
- raise ValueError("time data %r does not match format %r (match)" %
- (values[i], fmt))
+ raise ValueError("time data %r does not match "
+ "format %r (match)" % (values[i], fmt))
if len(val) != found.end():
if is_coerce:
iresult[i] = NPY_NAT
@@ -3458,8 +3561,8 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
if is_coerce:
iresult[i] = NPY_NAT
continue
- raise ValueError("time data %r does not match format %r (search)" %
- (values[i], fmt))
+ raise ValueError("time data %r does not match format "
+ "%r (search)" % (values[i], fmt))
year = 1900
month = day = 1
@@ -3563,7 +3666,8 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
- time.daylight and found_zone not in ("utc", "gmt")):
+ time.daylight and found_zone not in (
+ "utc", "gmt")):
break
else:
tz = value
@@ -3579,9 +3683,10 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
# calculation.
try:
if julian == -1:
- # Need to add 1 to result since first day of the year is 1, not 0.
+ # Need to add 1 to result since first day of the year is 1, not
+ # 0.
julian = datetime_date(year, month, day).toordinal() - \
- datetime_date(year, 1, 1).toordinal() + 1
+ datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal(
@@ -3590,10 +3695,10 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='
month = datetime_result.month
day = datetime_result.day
except ValueError:
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ raise
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
@@ -3672,10 +3777,11 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
base = <int64_t> ts
- frac = ts-base
+ frac = ts -base
if p:
- frac = round(frac,p)
- return <int64_t> (base*m) + <int64_t> (frac*m)
+ frac = round(frac, p)
+ return <int64_t> (base *m) + <int64_t> (frac *m)
+
def cast_to_nanoseconds(ndarray arr):
cdef:
@@ -3721,6 +3827,7 @@ def pydt_to_i8(object pydt):
return ts.value
+
def i8_to_pydt(int64_t i8, object tzinfo = None):
"""
Inverse of pydt_to_i8
@@ -3737,6 +3844,7 @@ try:
except:
have_pytz = False
+
def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
cdef:
ndarray[int64_t] utc_dates, tt, result, trans, deltas
@@ -3803,7 +3911,8 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz2)
- delta = int(total_seconds(_get_utcoffset(tz2, dt))) * 1000000000
+ delta = int(total_seconds(
+ _get_utcoffset(tz2, dt))) * 1000000000
result[i] = v + delta
return result
@@ -3836,6 +3945,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
result[i] = v + offset
return result
+
def tz_convert_single(int64_t val, object tz1, object tz2):
cdef:
ndarray[int64_t] trans, deltas
@@ -3889,7 +3999,8 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
dst_cache = {}
cdef inline bint _treat_tz_as_pytz(object tz):
- return hasattr(tz, '_utc_transition_times') and hasattr(tz, '_transition_info')
+ return hasattr(tz, '_utc_transition_times') and hasattr(
+ tz, '_transition_info')
cdef inline bint _treat_tz_as_dateutil(object tz):
return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx')
@@ -3902,24 +4013,32 @@ def _p_tz_cache_key(tz):
cdef inline object _tz_cache_key(object tz):
"""
- Return the key in the cache for the timezone info object or None if unknown.
+ Return the key in the cache for the timezone info object or None
+ if unknown.
- The key is currently the tz string for pytz timezones, the filename for dateutil timezones.
+ The key is currently the tz string for pytz timezones, the filename for
+ dateutil timezones.
Notes
=====
- This cannot just be the hash of a timezone object. Unfortunately, the hashes of two dateutil tz objects
- which represent the same timezone are not equal (even though the tz objects will compare equal and
- represent the same tz file).
- Also, pytz objects are not always hashable so we use str(tz) instead.
+ This cannot just be the hash of a timezone object. Unfortunately, the
+ hashes of two dateutil tz objects which represent the same timezone are
+ not equal (even though the tz objects will compare equal and represent
+ the same tz file). Also, pytz objects are not always hashable so we use
+ str(tz) instead.
"""
if isinstance(tz, _pytz_BaseTzInfo):
return tz.zone
elif isinstance(tz, _dateutil_tzfile):
if '.tar.gz' in tz._filename:
- raise ValueError('Bad tz filename. Dateutil on python 3 on windows has a bug which causes tzfile._filename to be the same for all '
- 'timezone files. Please construct dateutil timezones implicitly by passing a string like "dateutil/Europe/London" '
- 'when you construct your pandas objects instead of passing a timezone object. See https://github.com/pydata/pandas/pull/7362')
+ raise ValueError('Bad tz filename. Dateutil on python 3 on '
+ 'windows has a bug which causes tzfile._filename '
+ 'to be the same for all timezone files. Please '
+ 'construct dateutil timezones implicitly by '
+ 'passing a string like "dateutil/Europe/London" '
+ 'when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pydata/pandas/pull/7362')
return 'dateutil' + tz._filename
else:
return None
@@ -3956,26 +4075,29 @@ cdef object _get_dst_info(object tz):
if len(tz._trans_list):
# get utc trans times
trans_list = _get_utc_trans_times_from_dateutil_tz(tz)
- trans = np.hstack([np.array([0], dtype='M8[s]'), # place holder for first item
- np.array(trans_list, dtype='M8[s]')]).astype('M8[ns]') # all trans listed
+ trans = np.hstack([
+ np.array([0], dtype='M8[s]'), # place holder for first item
+ np.array(trans_list, dtype='M8[s]')]).astype(
+ 'M8[ns]') # all trans listed
trans = trans.view('i8')
trans[0] = NPY_NAT + 1
# deltas
- deltas = np.array([v.offset for v in (tz._ttinfo_before,) + tz._trans_idx], dtype='i8') # + (tz._ttinfo_std,)
+ deltas = np.array([v.offset for v in (
+ tz._ttinfo_before,) + tz._trans_idx], dtype='i8')
deltas *= 1000000000
typ = 'dateutil'
elif _is_fixed_offset(tz):
trans = np.array([NPY_NAT + 1], dtype=np.int64)
- deltas = np.array([tz._ttinfo_std.offset], dtype='i8') * 1000000000
+ deltas = np.array([tz._ttinfo_std.offset],
+ dtype='i8') * 1000000000
typ = 'fixed'
else:
trans = np.array([], dtype='M8[ns]')
deltas = np.array([], dtype='i8')
typ = None
-
else:
# static tzinfo
trans = np.array([NPY_NAT + 1], dtype=np.int64)
@@ -3989,8 +4111,9 @@ cdef object _get_dst_info(object tz):
cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
"""
- Transition times in dateutil timezones are stored in local non-dst time. This code
- converts them to UTC. It's the reverse of the code in dateutil.tz.tzfile.__init__.
+ Transition times in dateutil timezones are stored in local non-dst
+ time. This code converts them to UTC. It's the reverse of the code
+ in dateutil.tz.tzfile.__init__.
"""
new_trans = list(tz._trans_list)
last_std_offset = 0
@@ -4000,6 +4123,7 @@ cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
new_trans[i] = trans - last_std_offset
return new_trans
+
def tot_seconds(td):
return total_seconds(td)
@@ -4069,7 +4193,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
elif hasattr(ambiguous, '__iter__'):
is_dst = True
if len(ambiguous) != len(vals):
- raise ValueError("Length of ambiguous bool-array must be the same size as vals")
+ raise ValueError(
+ "Length of ambiguous bool-array must be the same size as vals")
trans, deltas, typ = _get_dst_info(tz)
@@ -4082,7 +4207,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
result_b.fill(NPY_NAT)
# left side
- idx_shifted = (np.maximum(0, trans.searchsorted(vals - DAY_NS, side='right') - 1)).astype(np.int64)
+ idx_shifted = (np.maximum(0, trans.searchsorted(
+ vals - DAY_NS, side='right') - 1)).astype(np.int64)
for i in range(n):
v = vals[i] - deltas[idx_shifted[i]]
@@ -4093,7 +4219,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
result_a[i] = v
# right side
- idx_shifted = (np.maximum(0, trans.searchsorted(vals + DAY_NS, side='right') - 1)).astype(np.int64)
+ idx_shifted = (np.maximum(0, trans.searchsorted(
+ vals + DAY_NS, side='right') - 1)).astype(np.int64)
for i in range(n):
v = vals[i] - deltas[idx_shifted[i]]
@@ -4110,36 +4237,39 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
# Get the ambiguous hours (given the above, these are the hours
# where result_a != result_b and neither of them are NAT)
both_nat = np.logical_and(result_a != NPY_NAT, result_b != NPY_NAT)
- both_eq = result_a == result_b
+ both_eq = result_a == result_b
trans_idx = np.squeeze(np.nonzero(np.logical_and(both_nat, ~both_eq)))
if trans_idx.size == 1:
stamp = Timestamp(vals[trans_idx])
- raise pytz.AmbiguousTimeError("Cannot infer dst time from %s as"
- "there are no repeated times" % stamp)
+ raise pytz.AmbiguousTimeError(
+ "Cannot infer dst time from %s as there "
+ "are no repeated times" % stamp)
# Split the array into contiguous chunks (where the difference between
- # indices is 1). These are effectively dst transitions in different years
- # which is useful for checking that there is not an ambiguous transition
- # in an individual year.
+ # indices is 1). These are effectively dst transitions in different
+ # years which is useful for checking that there is not an ambiguous
+ # transition in an individual year.
if trans_idx.size > 0:
- one_diff = np.where(np.diff(trans_idx)!=1)[0]+1
+ one_diff = np.where(np.diff(trans_idx) != 1)[0] +1
trans_grp = np.array_split(trans_idx, one_diff)
- # Iterate through each day, if there are no hours where the delta is negative
- # (indicates a repeat of hour) the switch cannot be inferred
+ # Iterate through each day, if there are no hours where the
+ # delta is negative (indicates a repeat of hour) the switch
+ # cannot be inferred
for grp in trans_grp:
delta = np.diff(result_a[grp])
- if grp.size == 1 or np.all(delta>0):
+ if grp.size == 1 or np.all(delta > 0):
stamp = Timestamp(vals[grp[0]])
raise pytz.AmbiguousTimeError(stamp)
- # Find the index for the switch and pull from a for dst and b for standard
- switch_idx = (delta<=0).nonzero()[0]
+ # Find the index for the switch and pull from a for dst and b
+ # for standard
+ switch_idx = (delta <= 0).nonzero()[0]
if switch_idx.size > 1:
- raise pytz.AmbiguousTimeError("There are %i dst switches "
- "when there should only be 1."
- % switch_idx.size)
- switch_idx = switch_idx[0]+1 # Pull the only index and adjust
+ raise pytz.AmbiguousTimeError(
+ "There are %i dst switches when "
+ "there should only be 1." % switch_idx.size)
+ switch_idx = switch_idx[0] + 1 # Pull the only index and adjust
a_idx = grp[:switch_idx]
b_idx = grp[switch_idx:]
dst_hours[grp] = np.hstack((result_a[a_idx], result_b[b_idx]))
@@ -4164,9 +4294,9 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
result[i] = NPY_NAT
else:
stamp = Timestamp(vals[i])
- raise pytz.AmbiguousTimeError("Cannot infer dst time from %r, "\
- "try using the 'ambiguous' argument"
- % stamp)
+ raise pytz.AmbiguousTimeError(
+ "Cannot infer dst time from %r, try using the "
+ "'ambiguous' argument" % stamp)
elif left != NPY_NAT:
result[i] = left
elif right != NPY_NAT:
@@ -4246,6 +4376,7 @@ def build_field_sarray(ndarray[int64_t] dtindex):
return out
+
def get_time_micros(ndarray[int64_t] dtindex):
"""
Datetime as int64 representation to a structured array of fields
@@ -4284,7 +4415,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
_month_offset = np.array(
[[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ],
[ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]],
- dtype=np.int32 )
+ dtype=np.int32 )
count = len(dtindex)
out = np.empty(count, dtype='i4')
@@ -4294,7 +4425,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.year
return out
@@ -4303,7 +4435,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.month
return out
@@ -4312,7 +4445,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.day
return out
@@ -4321,7 +4455,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.hour
return out
@@ -4330,7 +4465,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.min
return out
@@ -4339,7 +4475,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.sec
return out
@@ -4348,7 +4485,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.us
return out
@@ -4357,7 +4495,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.ps / 1000
return out
elif field == 'doy':
@@ -4365,9 +4504,10 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
isleap = is_leapyear(dts.year)
- out[i] = _month_offset[isleap, dts.month-1] + dts.day
+ out[i] = _month_offset[isleap, dts.month -1] + dts.day
return out
elif field == 'dow':
@@ -4375,7 +4515,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dayofweek(dts.year, dts.month, dts.day)
return out
@@ -4384,7 +4525,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
isleap = is_leapyear(dts.year)
isleap_prev = is_leapyear(dts.year - 1)
mo_off = _month_offset[isleap, dts.month - 1]
@@ -4414,7 +4556,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = dts.month
out[i] = ((out[i] - 1) / 3) + 1
return out
@@ -4424,7 +4567,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
out[i] = days_in_month(dts)
return out
elif field == 'is_leap_year':
@@ -4434,7 +4578,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
@cython.wraparound(False)
-def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=None, int month_kw=12):
+def get_start_end_field(ndarray[int64_t] dtindex, object field,
+ object freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
@@ -4456,21 +4601,24 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
_month_offset = np.array(
[[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ],
[ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]],
- dtype=np.int32 )
+ dtype=np.int32 )
count = len(dtindex)
out = np.zeros(count, dtype='int8')
if freqstr:
if freqstr == 'C':
- raise ValueError("Custom business days is not supported by %s" % field)
+ raise ValueError(
+ "Custom business days is not supported by %s" % field)
is_business = freqstr[0] == 'B'
- # YearBegin(), BYearBegin() use month = starting month of year
- # QuarterBegin(), BQuarterBegin() use startingMonth = starting month of year
- # other offests use month, startingMonth as ending month of year.
+ # YearBegin(), BYearBegin() use month = starting month of year.
+ # QuarterBegin(), BQuarterBegin() use startingMonth = starting
+ # month of year. Other offests use month, startingMonth as ending
+ # month of year.
- if (freqstr[0:2] in ['MS', 'QS', 'AS']) or (freqstr[1:3] in ['MS', 'QS', 'AS']):
+ if (freqstr[0:2] in ['MS', 'QS', 'AS']) or (
+ freqstr[1:3] in ['MS', 'QS', 'AS']):
end_month = 12 if month_kw == 1 else month_kw - 1
start_month = month_kw
else:
@@ -4485,7 +4633,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
dow = ts_dayofweek(ts)
@@ -4497,7 +4646,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
dom = dts.day
if dom == 1:
@@ -4509,7 +4659,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
@@ -4518,14 +4669,16 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
ldom = _month_offset[isleap, dts.month]
dow = ts_dayofweek(ts)
- if (ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2)):
+ if (ldom == doy and dow < 5) or (
+ dow == 4 and (ldom - doy <= 2)):
out[i] = 1
return out.view(bool)
else:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
dom = dts.day
@@ -4541,19 +4694,22 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
dow = ts_dayofweek(ts)
- if ((dts.month - start_month) % 3 == 0) and ((dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
+ if ((dts.month - start_month) % 3 == 0) and (
+ (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
out[i] = 1
return out.view(bool)
else:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
dom = dts.day
if ((dts.month - start_month) % 3 == 0) and dom == 1:
@@ -4565,7 +4721,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
@@ -4574,14 +4731,17 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
ldom = _month_offset[isleap, dts.month]
dow = ts_dayofweek(ts)
- if ((dts.month - end_month) % 3 == 0) and ((ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2))):
+ if ((dts.month - end_month) % 3 == 0) and (
+ (ldom == doy and dow < 5) or (
+ dow == 4 and (ldom - doy <= 2))):
out[i] = 1
return out.view(bool)
else:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
dom = dts.day
@@ -4597,19 +4757,22 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
dow = ts_dayofweek(ts)
- if (dts.month == start_month) and ((dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
+ if (dts.month == start_month) and (
+ (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
out[i] = 1
return out.view(bool)
else:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
dom = dts.day
if (dts.month == start_month) and dom == 1:
@@ -4621,7 +4784,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
dom = dts.day
@@ -4630,14 +4794,17 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
dow = ts_dayofweek(ts)
ldom = _month_offset[isleap, dts.month]
- if (dts.month == end_month) and ((ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2))):
+ if (dts.month == end_month) and (
+ (ldom == doy and dow < 5) or (
+ dow == 4 and (ldom - doy <= 2))):
out[i] = 1
return out.view(bool)
else:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
@@ -4651,6 +4818,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N
raise ValueError("Field %s not supported" % field)
+
@cython.wraparound(False)
@cython.boundscheck(False)
def get_date_name_field(ndarray[int64_t] dtindex, object field):
@@ -4666,8 +4834,9 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field):
int dow
_dayname = np.array(
- ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
- dtype=np.object_ )
+ ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
+ 'Friday', 'Saturday', 'Sunday'],
+ dtype=np.object_ )
count = len(dtindex)
out = np.empty(count, dtype=object)
@@ -4710,11 +4879,13 @@ def date_normalize(ndarray[int64_t] stamps, tz=None):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ stamps[i], PANDAS_FR_ns, &dts)
result[i] = _normalized_stamp(&dts)
return result
+
@cython.wraparound(False)
@cython.boundscheck(False)
cdef _normalize_local(ndarray[int64_t] stamps, object tz):
@@ -4730,15 +4901,15 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ stamps[i], PANDAS_FR_ns, &dts)
result[i] = _normalized_stamp(&dts)
elif _is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns,
- &dts)
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
@@ -4755,7 +4926,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pos = _pos
# statictzinfo
- if typ not in ['pytz','dateutil']:
+ if typ not in ['pytz', 'dateutil']:
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
@@ -4840,7 +5011,7 @@ def monthrange(int64_t year, int64_t month):
if month < 1 or month > 12:
raise ValueError("bad month number 0; must be 1-12")
- days = days_per_month_table[is_leapyear(year)][month-1]
+ days = days_per_month_table[is_leapyear(year)][month -1]
return (dayofweek(year, month, 1), days)
@@ -4848,7 +5019,7 @@ cdef inline int64_t ts_dayofweek(_TSObject ts):
return dayofweek(ts.dts.year, ts.dts.month, ts.dts.day)
cdef inline int days_in_month(pandas_datetimestruct dts) nogil:
- return days_per_month_table[is_leapyear(dts.year)][dts.month-1]
+ return days_per_month_table[is_leapyear(dts.year)][dts.month -1]
cpdef normalize_date(object dt):
"""
@@ -4874,10 +5045,14 @@ cdef inline int _year_add_months(pandas_datetimestruct dts,
cdef inline int _month_add_months(pandas_datetimestruct dts,
int months) nogil:
- """new month number after shifting pandas_datetimestruct number of months"""
+ """
+ New month number after shifting pandas_datetimestruct
+ number of months.
+ """
cdef int new_month = (dts.month + months) % 12
return 12 if new_month == 0 else new_month
+
@cython.wraparound(False)
@cython.boundscheck(False)
def shift_months(int64_t[:] dtindex, int months, object day=None):
@@ -4902,7 +5077,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
with nogil:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
@@ -4916,7 +5092,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
with nogil:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
months_to_roll = months
# offset semantics - if on the anchor point and going backwards
@@ -4937,7 +5114,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
with nogil:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ pandas_datetime_to_datetimestruct(
+ dtindex[i], PANDAS_FR_ns, &dts)
months_to_roll = months
# similar semantics - when adding shift forward by one
@@ -4992,10 +5170,12 @@ except:
__all__ = []
+
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
+
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
@@ -5075,8 +5255,9 @@ class LocaleTime(object):
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
- for hour in (01,22):
- time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
+ for hour in (01, 22):
+ time_tuple = time.struct_time(
+ (1999, 3, 17, hour, 44, 55, 2, 76, 0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
@@ -5088,22 +5269,23 @@ class LocaleTime(object):
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
- time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
+ time_tuple = time.struct_time((1999, 3, 17, 22, 44, 55, 2, 76, 0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
- (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
- (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
- ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
- ('44', '%M'), ('55', '%S'), ('76', '%j'),
- ('17', '%d'), ('03', '%m'), ('3', '%m'),
- # '3' needed for when no leading zero.
- ('2', '%w'), ('10', '%I')]
+ (self.f_month[3],
+ '%B'), (self.a_weekday[2], '%a'),
+ (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
+ ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+ ('44', '%M'), ('55', '%S'), ('76', '%j'),
+ ('17', '%d'), ('03', '%m'), ('3', '%m'),
+ # '3' needed for when no leading zero.
+ ('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
- for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
+ for offset, directive in ((0, '%c'), (1, '%x'), (2, '%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
@@ -5115,7 +5297,7 @@ class LocaleTime(object):
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
- time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
+ time_tuple = time.struct_time((1999, 1, 3, 1, 1, 1, 6, 3, 0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
@@ -5161,7 +5343,8 @@ class TimeRE(dict):
'f': r"(?P<f>[0-9]{1,9})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
- 'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
+ 'j': (r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|"
+ r"[1-9]\d|0[1-9]|[1-9])"),
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
@@ -5221,11 +5404,11 @@ class TimeRE(dict):
whitespace_replacement = re_compile(r'\s+')
format = whitespace_replacement.sub(r'\\s+', format)
while '%' in format:
- directive_index = format.index('%')+1
+ directive_index = format.index('%') +1
processed_format = "%s%s%s" % (processed_format,
- format[:directive_index-1],
+ format[:directive_index -1],
self[format[directive_index]])
- format = format[directive_index+1:]
+ format = format[directive_index +1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
@@ -5239,7 +5422,8 @@ _TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
-cdef _calc_julian_from_U_or_W(int year, int week_of_year, int day_of_week, int week_starts_Mon):
+cdef _calc_julian_from_U_or_W(int year, int week_of_year,
+ int day_of_week, int week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
| closes #12995
flake8-ed *.pyx files and fixed errors.
Removed the E226 check because that inhibits pointers (e.g. char*). In addition, the check is not even universally accepted in Python.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14147 | 2016-09-04T06:43:56Z | 2016-09-06T10:18:08Z | null | 2016-09-06T20:10:28Z |
BUG: DatetimeTZBlock can't assign values near dst boundary | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index a3e8f0c314352..1fd135f1436da 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1559,7 +1559,7 @@ Bug Fixes
- Bug in ``Series`` flexible arithmetic methods (like ``.add()``) raises ``ValueError`` when ``axis=None`` (:issue:`13894`)
- Bug in ``DataFrame.to_csv()`` with ``MultiIndex`` columns in which a stray empty line was added (:issue:`6618`)
- Bug in ``DatetimeIndex``, ``TimedeltaIndex`` and ``PeriodIndex.equals()`` may return ``True`` when input isn't ``Index`` but contains the same values (:issue:`13107`)
-
+- Bug in assignment against datetime with timezone may not work if it contains datetime near DST boundary (:issue:`14146`)
- Bug in ``Index`` raises ``KeyError`` displaying incorrect column when column is not in the df and columns contains duplicate values (:issue:`13822`)
- Bug in ``Period`` and ``PeriodIndex`` creating wrong dates when frequency has combined offset aliases (:issue:`13874`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index da72309b8eae1..11721a5bdac29 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1487,7 +1487,10 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
-------
a new block(s), the result of the putmask
"""
- new_values = self.values if inplace else self.values.copy()
+
+ # use block's copy logic.
+ # .values may be an Index which does shallow copy by default
+ new_values = self.values if inplace else self.copy().values
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
@@ -2314,7 +2317,7 @@ def __init__(self, values, placement, ndim=2, **kwargs):
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
- values = values.tz_localize('UTC').tz_convert(dtype.tz)
+ values = values._shallow_copy(tz=dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
@@ -2381,12 +2384,14 @@ def _try_coerce_args(self, values, other):
base-type values, values mask, base-type other, other mask
"""
values_mask = _block_shape(isnull(values), ndim=self.ndim)
- values = _block_shape(values.tz_localize(None).asi8, ndim=self.ndim)
+ # asi8 is a view, needs copy
+ values = _block_shape(values.asi8, ndim=self.ndim)
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isnull(other)
+
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
@@ -2395,7 +2400,7 @@ def _try_coerce_args(self, values, other):
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
- other = other.tz_localize(None).asi8
+ other = other.asi8
other_mask = isnull(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
@@ -2405,7 +2410,7 @@ def _try_coerce_args(self, values, other):
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isnull(other)
- other = other.tz_localize(None).value
+ other = other.value
return values, values_mask, other, other_mask
@@ -2415,12 +2420,12 @@ def _try_coerce_result(self, result):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
- result = lib.Timestamp(result).tz_localize(self.values.tz)
+ result = lib.Timestamp(result, tz=self.values.tz)
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
result = result.reshape(len(result))
- result = self._holder(result).tz_localize(self.values.tz)
+ result = self.values._shallow_copy(result)
return result
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 5fbaea6c5efcb..0cfa7258461f1 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -229,7 +229,7 @@ def test_setitem_series_datetime64tz(self):
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
- pd.Timestamp(1).tz_localize(tz),
+ pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
@@ -1038,7 +1038,7 @@ def test_fillna_series_datetime64tz(self):
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
- pd.Timestamp(1).tz_localize(tz=tz),
+ pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 5eef06bacfcb0..7c16fd060b181 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -776,6 +776,89 @@ def test_ix_getitem_iterator(self):
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
+ def test_setitem_with_tz(self):
+ for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
+ orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
+ tz=tz))
+ self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
+
+ # scalar
+ s = orig.copy()
+ s[1] = pd.Timestamp('2011-01-01', tz=tz)
+ exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
+ pd.Timestamp('2011-01-01 00:00', tz=tz),
+ pd.Timestamp('2016-01-01 02:00', tz=tz)])
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
+ tm.assert_series_equal(s, exp)
+
+ # vector
+ vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
+ self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
+
+ s[[1, 2]] = vals
+ exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
+ pd.Timestamp('2011-01-01 00:00', tz=tz),
+ pd.Timestamp('2012-01-01 00:00', tz=tz)])
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.loc[[1, 2]] = vals
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.iloc[[1, 2]] = vals
+ tm.assert_series_equal(s, exp)
+
+ def test_setitem_with_tz_dst(self):
+ # GH XXX
+ tz = 'US/Eastern'
+ orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
+ tz=tz))
+ self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
+
+ # scalar
+ s = orig.copy()
+ s[1] = pd.Timestamp('2011-01-01', tz=tz)
+ exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
+ pd.Timestamp('2011-01-01 00:00', tz=tz),
+ pd.Timestamp('2016-11-06 02:00', tz=tz)])
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
+ tm.assert_series_equal(s, exp)
+
+ # vector
+ vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
+ self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
+
+ s[[1, 2]] = vals
+ exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
+ pd.Timestamp('2011-01-01 00:00', tz=tz),
+ pd.Timestamp('2012-01-01 00:00', tz=tz)])
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.loc[[1, 2]] = vals
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.iloc[[1, 2]] = vals
+ tm.assert_series_equal(s, exp)
+
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
diff --git a/pandas/tests/series/test_misc_api.py b/pandas/tests/series/test_misc_api.py
index d74966738909d..61bdc59cd500d 100644
--- a/pandas/tests/series/test_misc_api.py
+++ b/pandas/tests/series/test_misc_api.py
@@ -241,7 +241,6 @@ def test_copy(self):
self.assertTrue(np.isnan(s2[0]))
self.assertFalse(np.isnan(s[0]))
else:
-
# we DID modify the original Series
self.assertTrue(np.isnan(s2[0]))
self.assertTrue(np.isnan(s[0]))
@@ -252,6 +251,7 @@ def test_copy(self):
expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
for deep in [None, False, True]:
+
s = Series([Timestamp('2012/01/01', tz='UTC')])
if deep is None:
@@ -263,11 +263,13 @@ def test_copy(self):
# default deep is True
if deep is None or deep is True:
- assert_series_equal(s, expected)
+ # Did not modify original Series
assert_series_equal(s2, expected2)
+ assert_series_equal(s, expected)
else:
- assert_series_equal(s, expected2)
+ # we DID modify the original Series
assert_series_equal(s2, expected2)
+ assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index ed10f5b0a7af3..4e6c58df54dfd 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -130,49 +130,66 @@ def test_datetime64_fillna(self):
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
- s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
- '2011-01-03 10:00'), pd.NaT])
+ s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
+ Timestamp('2011-01-03 10:00'), pd.NaT])
+ null_loc = pd.Series([False, True, False, True])
+
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
- expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
- '2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
- '2011-01-02 10:00')])
+ expected = Series([Timestamp('2011-01-01 10:00'),
+ Timestamp('2011-01-02 10:00'),
+ Timestamp('2011-01-03 10:00'),
+ Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
+ # check s is not changed
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
- expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
- '2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
- Timestamp('2011-01-02 10:00', tz=tz)])
+ expected = Series([Timestamp('2011-01-01 10:00'),
+ Timestamp('2011-01-02 10:00', tz=tz),
+ Timestamp('2011-01-03 10:00'),
+ Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
- expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
- '2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
- Timestamp('2011-01-04 10:00')])
+ expected = Series([Timestamp('2011-01-01 10:00'),
+ Timestamp('2011-01-02 10:00', tz=tz),
+ Timestamp('2011-01-03 10:00'),
+ Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
- expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
- '2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
- '2011-01-04 10:00')])
+ expected = Series([Timestamp('2011-01-01 10:00'),
+ Timestamp('2011-01-02 10:00'),
+ Timestamp('2011-01-03 10:00'),
+ Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
+ self.assertEqual(s.dtype, 'datetime64[ns, {0}]'.format(tz))
+ self.assert_series_equal(pd.isnull(s), null_loc)
+
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
- expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
- '2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
- Timestamp('2011-01-02 10:00')])
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2011-01-02 10:00'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
@@ -180,42 +197,50 @@ def test_datetime64_tz_fillna(self):
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
- result = s.fillna(pd.Timestamp(
- '2011-01-02 10:00', tz=tz).to_pydatetime())
+ result = s.fillna(pd.Timestamp('2011-01-02 10:00',
+ tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
- expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
- '2011-01-02 10:00', tz=tz), Timestamp(
- '2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2011-01-02 10:00', tz=tz),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
- expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
- '2011-01-02 10:00', tz=tz), Timestamp(
- '2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
- tz=tz)])
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2011-01-02 10:00', tz=tz),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
- expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
- '2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
- '2013-01-01')])
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2013-01-01'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2013-01-01')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
@@ -223,6 +248,7 @@ def test_datetime64_tz_fillna(self):
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
+ self.assert_series_equal(pd.isnull(s), null_loc)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
| - [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Value assignment doesn't work if data contains DST boundary because of internal `.localize(None)`.
```
s = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3, tz='US/Eastern'))
s[1] = pd.Timestamp('2011-01-01', tz='US/Eastern')
s
#0 2016-11-06 00:00:00-04:00
#1 2016-11-06 01:00:00-04:00
#2 2016-11-06 01:00:00-05:00
# dtype: datetime64[ns, US/Eastern]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14146 | 2016-09-04T01:14:48Z | 2016-09-10T14:34:57Z | null | 2016-09-10T21:12:49Z |
CLN/BUG: fix ndarray assignment may cause unexpected cast | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index d76a78c68fb73..5f62ec8330165 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -446,6 +446,8 @@ Bug Fixes
- Bug in ``GroupBy.get_group()`` failing with a categorical grouper (:issue:`15155`)
- Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`)
+- Bug in assignment against datetime-like data with ``int`` may incorrectly converted to datetime-like (:issue:`14145`)
+- Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`)
- Bug in ``.groupby(...).rolling(...)`` when ``on`` is specified and using a ``DatetimeIndex`` (:issue:`15130`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cf306034001db..09f35a578e626 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -23,7 +23,8 @@
import numpy as np
import numpy.ma as ma
-from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
+from pandas.types.cast import (_maybe_upcast,
+ _cast_scalar_to_array,
_possibly_cast_to_datetime,
_possibly_infer_to_datetimelike,
_possibly_convert_platform,
@@ -333,15 +334,10 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
- if isinstance(data, compat.string_types) and dtype is None:
- dtype = np.object_
- if dtype is None:
- dtype, data = _infer_dtype_from_scalar(data)
-
- values = np.empty((len(index), len(columns)), dtype=dtype)
- values.fill(data)
- mgr = self._init_ndarray(values, index, columns, dtype=dtype,
- copy=False)
+ values = _cast_scalar_to_array((len(index), len(columns)),
+ data, dtype=dtype)
+ mgr = self._init_ndarray(values, index, columns,
+ dtype=values.dtype, copy=False)
else:
raise PandasError('DataFrame constructor not properly called!')
@@ -455,7 +451,7 @@ def _get_axes(N, K, index=index, columns=columns):
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
- if values.dtype != dtype:
+ if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
@@ -2641,9 +2637,8 @@ def reindexer(value):
else:
# upcast the scalar
- dtype, value = _infer_dtype_from_scalar(value)
- value = np.repeat(value, len(self.index)).astype(dtype)
- value = _possibly_cast_to_datetime(value, dtype)
+ value = _cast_scalar_to_array(len(self.index), value)
+ value = _possibly_cast_to_datetime(value, value.dtype)
# return internal types directly
if is_extension_type(value):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 289ce150eb46b..281acb89c5c44 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -41,7 +41,7 @@
is_null_datelike_scalar)
import pandas.types.concat as _concat
-from pandas.types.generic import ABCSeries
+from pandas.types.generic import ABCSeries, ABCDatetimeIndex
from pandas.core.common import is_null_slice
import pandas.core.algorithms as algos
@@ -379,7 +379,8 @@ def fillna(self, value, limit=None, inplace=False, downcast=None,
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
- values, _, value, _ = self._try_coerce_args(self.values, value)
+ values, _, _, _ = self._try_coerce_args(self.values, value)
+ # value may be converted to internal, thus drop
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
@@ -673,8 +674,43 @@ def setitem(self, indexer, value, mgr=None):
if self.is_numeric:
value = np.nan
- # coerce args
- values, _, value, _ = self._try_coerce_args(self.values, value)
+ # coerce if block dtype can store value
+ values = self.values
+ try:
+ values, _, value, _ = self._try_coerce_args(values, value)
+ # can keep its own dtype
+ if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
+ value.dtype):
+ dtype = self.dtype
+ else:
+ dtype = 'infer'
+
+ except (TypeError, ValueError):
+ # current dtype cannot store value, coerce to common dtype
+ find_dtype = False
+
+ if hasattr(value, 'dtype'):
+ dtype = value.dtype
+ find_dtype = True
+
+ elif is_scalar(value):
+ if isnull(value):
+ # NaN promotion is handled in latter path
+ dtype = False
+ else:
+ dtype, _ = _infer_dtype_from_scalar(value,
+ pandas_dtype=True)
+ find_dtype = True
+ else:
+ dtype = 'infer'
+
+ if find_dtype:
+ dtype = _find_common_type([values.dtype, dtype])
+ if not is_dtype_equal(self.dtype, dtype):
+ b = self.astype(dtype)
+ return b.setitem(indexer, value, mgr=mgr)
+
+ # value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
@@ -704,87 +740,52 @@ def setitem(self, indexer, value, mgr=None):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
- try:
-
- def _is_scalar_indexer(indexer):
- # return True if we are all scalar indexers
-
- if arr_value.ndim == 1:
- if not isinstance(indexer, tuple):
- indexer = tuple([indexer])
- return all([is_scalar(idx) for idx in indexer])
- return False
-
- def _is_empty_indexer(indexer):
- # return a boolean if we have an empty indexer
+ def _is_scalar_indexer(indexer):
+ # return True if we are all scalar indexers
- if arr_value.ndim == 1:
- if not isinstance(indexer, tuple):
- indexer = tuple([indexer])
- return any(isinstance(idx, np.ndarray) and len(idx) == 0
- for idx in indexer)
- return False
-
- # empty indexers
- # 8669 (empty)
- if _is_empty_indexer(indexer):
- pass
-
- # setting a single element for each dim and with a rhs that could
- # be say a list
- # GH 6043
- elif _is_scalar_indexer(indexer):
- values[indexer] = value
-
- # if we are an exact match (ex-broadcasting),
- # then use the resultant dtype
- elif (len(arr_value.shape) and
- arr_value.shape[0] == values.shape[0] and
- np.prod(arr_value.shape) == np.prod(values.shape)):
- values[indexer] = value
- values = values.astype(arr_value.dtype)
-
- # set
- else:
- values[indexer] = value
-
- # coerce and try to infer the dtypes of the result
- if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
- value.dtype):
- dtype = value.dtype
- elif is_scalar(value):
- dtype, _ = _infer_dtype_from_scalar(value)
- else:
- dtype = 'infer'
- values = self._try_coerce_and_cast_result(values, dtype)
- block = self.make_block(transf(values), fastpath=True)
-
- # may have to soft convert_objects here
- if block.is_object and not self.is_object:
- block = block.convert(numeric=False)
-
- return block
- except ValueError:
- raise
- except TypeError:
+ if arr_value.ndim == 1:
+ if not isinstance(indexer, tuple):
+ indexer = tuple([indexer])
+ return all([is_scalar(idx) for idx in indexer])
+ return False
- # cast to the passed dtype if possible
- # otherwise raise the original error
- try:
- # e.g. we are uint32 and our value is uint64
- # this is for compat with older numpies
- block = self.make_block(transf(values.astype(value.dtype)))
- return block.setitem(indexer=indexer, value=value, mgr=mgr)
+ def _is_empty_indexer(indexer):
+ # return a boolean if we have an empty indexer
- except:
- pass
-
- raise
+ if arr_value.ndim == 1:
+ if not isinstance(indexer, tuple):
+ indexer = tuple([indexer])
+ return any(isinstance(idx, np.ndarray) and len(idx) == 0
+ for idx in indexer)
+ return False
- except Exception:
+ # empty indexers
+ # 8669 (empty)
+ if _is_empty_indexer(indexer):
pass
- return [self]
+ # setting a single element for each dim and with a rhs that could
+ # be say a list
+ # GH 6043
+ elif _is_scalar_indexer(indexer):
+ values[indexer] = value
+
+ # if we are an exact match (ex-broadcasting),
+ # then use the resultant dtype
+ elif (len(arr_value.shape) and
+ arr_value.shape[0] == values.shape[0] and
+ np.prod(arr_value.shape) == np.prod(values.shape)):
+ values[indexer] = value
+ values = values.astype(arr_value.dtype)
+
+ # set
+ else:
+ values[indexer] = value
+
+ # coerce and try to infer the dtypes of the result
+ values = self._try_coerce_and_cast_result(values, dtype)
+ block = self.make_block(transf(values), fastpath=True)
+ return block
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
@@ -1255,6 +1256,7 @@ def func(cond, values, other):
values, values_mask, other, other_mask = self._try_coerce_args(
values, other)
+
try:
return self._try_coerce_result(expressions.where(
cond, values, other, raise_on_error=True))
@@ -1534,6 +1536,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
+
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
@@ -1703,7 +1706,7 @@ def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
- if not isinstance(value, np.timedelta64) and is_integer(value):
+ if not isinstance(value, np.timedelta64):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
@@ -1937,6 +1940,15 @@ def _maybe_downcast(self, blocks, downcast=None):
def _can_hold_element(self, element):
return True
+ def _try_coerce_args(self, values, other):
+ """ provide coercion to our input arguments """
+
+ if isinstance(other, ABCDatetimeIndex):
+ # to store DatetimeTZBlock as object
+ other = other.asobject.values
+
+ return values, False, other, False
+
def _try_cast(self, element):
return element
@@ -2276,8 +2288,6 @@ def _try_coerce_args(self, values, other):
"naive Block")
other_mask = isnull(other)
other = other.asm8.view('i8')
- elif hasattr(other, 'dtype') and is_integer_dtype(other):
- other = other.view('i8')
else:
try:
other = np.asarray(other)
@@ -2453,6 +2463,8 @@ def _try_coerce_args(self, values, other):
raise ValueError("incompatible or non tz-aware value")
other_mask = isnull(other)
other = other.value
+ else:
+ raise TypeError
return values, values_mask, other, other_mask
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index a11ef53de1af9..969efb199af3f 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -9,6 +9,7 @@
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
+ _cast_scalar_to_array,
_possibly_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
@@ -166,11 +167,9 @@ def _init_data(self, data, copy, dtype, **kwargs):
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
- if dtype is None:
- dtype, data = _infer_dtype_from_scalar(data)
- values = np.empty([len(x) for x in passed_axes], dtype=dtype)
- values.fill(data)
- mgr = self._init_matrix(values, passed_axes, dtype=dtype,
+ values = _cast_scalar_to_array([len(x) for x in passed_axes],
+ data, dtype=dtype)
+ mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
copy=False)
copy = False
else: # pragma: no cover
@@ -570,9 +569,7 @@ def __setitem__(self, key, value):
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
- dtype, value = _infer_dtype_from_scalar(value)
- mat = np.empty(shape[1:], dtype=dtype)
- mat.fill(value)
+ mat = _cast_scalar_to_array(shape[1:], value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 0cfa7258461f1..6d0034a1a3a9f 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -107,6 +107,19 @@ def test_setitem_series_int64(self):
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
+ def test_setitem_series_int8(self):
+ # integer dtype coercion (no change)
+ obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
+ self.assertEqual(obj.dtype, np.int8)
+
+ exp = pd.Series([1, 1, 3, 4], dtype=np.int8)
+ self._assert_setitem_series_conversion(obj, np.int32(1), exp, np.int8)
+
+ # BUG: it must be Series([1, 1, 3, 4], dtype=np.int16)
+ exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
+ self._assert_setitem_series_conversion(obj, np.int16(2**9), exp,
+ np.int8)
+
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
@@ -207,6 +220,13 @@ def test_setitem_series_datetime64(self):
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
+ # datetime64 + object -> object
+ exp = pd.Series([pd.Timestamp('2011-01-01'),
+ 'x',
+ pd.Timestamp('2011-01-03'),
+ pd.Timestamp('2011-01-04')])
+ self._assert_setitem_series_conversion(obj, 'x', exp, np.object)
+
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
@@ -226,19 +246,62 @@ def test_setitem_series_datetime64tz(self):
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
+ # datetime64tz + datetime64tz (different tz) -> object
+ exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2012-01-01', tz='US/Pacific'),
+ pd.Timestamp('2011-01-03', tz=tz),
+ pd.Timestamp('2011-01-04', tz=tz)])
+ value = pd.Timestamp('2012-01-01', tz='US/Pacific')
+ self._assert_setitem_series_conversion(obj, value, exp, np.object)
+
+ # datetime64tz + datetime64 -> object
+ exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
+ pd.Timestamp('2012-01-01'),
+ pd.Timestamp('2011-01-03', tz=tz),
+ pd.Timestamp('2011-01-04', tz=tz)])
+ value = pd.Timestamp('2012-01-01')
+ self._assert_setitem_series_conversion(obj, value, exp, np.object)
+
# datetime64 + int -> object
- # ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
- pd.Timestamp(1, tz=tz),
+ 1,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
- self._assert_setitem_series_conversion(obj, 1, exp,
- 'datetime64[ns, US/Eastern]')
+ self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
- pass
+ obj = pd.Series([pd.Timedelta('1 day'),
+ pd.Timedelta('2 day'),
+ pd.Timedelta('3 day'),
+ pd.Timedelta('4 day')])
+ self.assertEqual(obj.dtype, 'timedelta64[ns]')
+
+ # timedelta64 + timedelta64 -> timedelta64
+ exp = pd.Series([pd.Timedelta('1 day'),
+ pd.Timedelta('12 day'),
+ pd.Timedelta('3 day'),
+ pd.Timedelta('4 day')])
+ self._assert_setitem_series_conversion(obj, pd.Timedelta('12 day'),
+ exp, 'timedelta64[ns]')
+
+ # timedelta64 + int -> object
+ # ToDo: The result must be object
+ exp = pd.Series([pd.Timedelta('1 day'),
+ pd.Timedelta(1),
+ pd.Timedelta('3 day'),
+ pd.Timedelta('4 day')])
+ self._assert_setitem_series_conversion(obj, 1, exp, 'timedelta64[ns]')
+
+ # timedelta64 + object -> object
+ exp = pd.Series([pd.Timedelta('1 day'),
+ 'x',
+ pd.Timedelta('3 day'),
+ pd.Timedelta('4 day')])
+ self._assert_setitem_series_conversion(obj, 'x', exp, np.object)
+
+ # ToDo: add more tests once the above issue has been fixed
def test_setitem_series_period(self):
pass
@@ -1035,14 +1098,12 @@ def test_fillna_series_datetime64tz(self):
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
- # datetime64tz + int => datetime64tz
- # ToDo: must be object
+ # datetime64tz + int => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
- pd.Timestamp(1, tz=tz),
+ 1,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
- self._assert_fillna_conversion(obj, 1, exp,
- 'datetime64[ns, US/Eastern]')
+ self._assert_fillna_conversion(obj, 1, exp, np.object)
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index f7fa07916ca74..25f87203210f1 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1011,18 +1011,25 @@ def test_indexing_with_datetime_tz(self):
tm.assert_frame_equal(result, expected)
# indexing - setting an element
- df = DataFrame(data=pd.to_datetime(
- ['2015-03-30 20:12:32', '2015-03-12 00:11:11']), columns=['time'])
+ df = DataFrame(data=pd.to_datetime(['2015-03-30 20:12:32',
+ '2015-03-12 00:11:11']),
+ columns=['time'])
df['new_col'] = ['new', 'old']
df.time = df.set_index('time').index.tz_localize('UTC')
v = df[df.new_col == 'new'].set_index('time').index.tz_convert(
'US/Pacific')
# trying to set a single element on a part of a different timezone
- def f():
- df.loc[df.new_col == 'new', 'time'] = v
+ df2 = df.copy()
+ df2.loc[df2.new_col == 'new', 'time'] = v
- self.assertRaises(ValueError, f)
+ exp = pd.DataFrame({'time': [pd.Timestamp('2015-03-30 13:12:32',
+ tz='US/Pacific'),
+ pd.Timestamp('2015-03-12 00:11:11',
+ tz='UTC')],
+ 'new_col': ['new', 'old']},
+ columns=['time', 'new_col'])
+ tm.assert_frame_equal(df2, exp)
v = df.loc[df.new_col == 'new', 'time'] + pd.Timedelta('1s')
df.loc[df.new_col == 'new', 'time'] = v
@@ -3412,6 +3419,12 @@ def test_multi_assign(self):
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
+ # coerces to float64 because values has float64 dtype
+ # GH 14001
+ expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
+ 'PF': [0, 0, 0, 0, 1, 1],
+ 'col1': [0., 1., 4., 6., 8., 10.],
+ 'col2': [12, 7, 16, np.nan, 20, 22]})
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 07e1be609670f..679558823a278 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1126,11 +1126,11 @@ def test_clip_with_datetimes(self):
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
- s = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
- '2015-12-01 09:31:00')])
+ s = Series([Timestamp('2015-12-01 09:30:00'),
+ Timestamp('2015-12-01 09:31:00')])
result = s.clip(upper=t)
- expected = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
- '2015-12-01 09:30:30')])
+ expected = Series([Timestamp('2015-12-01 09:30:00'),
+ Timestamp('2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 3c82e4ed82969..da0040309f6de 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -40,14 +40,14 @@ class TestSeriesMissingData(TestData, tm.TestCase):
def test_timedelta_fillna(self):
# GH 3371
- s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
- '20130102'), Timestamp('20130103 9:01:01')])
+ s = Series([Timestamp('20130101'), Timestamp('20130101'),
+ Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
- expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
- days=1, seconds=9 * 3600 + 60 + 1)])
+ expected = Series([timedelta(0), timedelta(0), timedelta(1),
+ timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
@@ -57,8 +57,9 @@ def test_timedelta_fillna(self):
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
- expected = Series([timedelta(days=1, seconds=1), timedelta(
- 0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
+ expected = Series([timedelta(days=1, seconds=1), timedelta(0),
+ timedelta(1),
+ timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py
index 56a14a51105ca..e2b7e9e2cb3d7 100644
--- a/pandas/tests/types/test_cast.py
+++ b/pandas/tests/types/test_cast.py
@@ -10,12 +10,13 @@
from datetime import datetime
import numpy as np
-from pandas import Timedelta, Timestamp
+from pandas import Timedelta, Timestamp, Period
from pandas.types.cast import (_possibly_downcast_to_dtype,
_possibly_convert_objects,
_infer_dtype_from_scalar,
_maybe_convert_string_to_object,
_maybe_convert_scalar,
+ _cast_scalar_to_array,
_find_common_type)
from pandas.types.dtypes import (CategoricalDtype,
DatetimeTZDtype, PeriodDtype)
@@ -120,11 +121,58 @@ def test_infer_dtype_from_scalar(self):
dtype, val = _infer_dtype_from_scalar(data)
self.assertEqual(dtype, 'm8[ns]')
- for data in [datetime.date(2000, 1, 1),
- Timestamp(1, tz='US/Eastern'), 'foo']:
+ for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
+ dt = Timestamp(1, tz=tz)
+ dtype, val = _infer_dtype_from_scalar(dt, pandas_dtype=True)
+ self.assertEqual(dtype, 'datetime64[ns, {0}]'.format(tz))
+ self.assertEqual(val, dt.value)
+
+ dtype, val = _infer_dtype_from_scalar(dt)
+ self.assertEqual(dtype, np.object_)
+ self.assertEqual(val, dt)
+
+ for freq in ['M', 'D']:
+ p = Period('2011-01-01', freq=freq)
+ dtype, val = _infer_dtype_from_scalar(p, pandas_dtype=True)
+ self.assertEqual(dtype, 'period[{0}]'.format(freq))
+ self.assertEqual(val, p.ordinal)
+
+ dtype, val = _infer_dtype_from_scalar(p)
+ self.assertEqual(dtype, np.object_)
+ self.assertEqual(val, p)
+
+ for data in [datetime.date(2000, 1, 1), 'foo']:
dtype, val = _infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.object_)
+ def test_cast_scalar_to_array(self):
+ arr = _cast_scalar_to_array((3, 2), 1, dtype=np.int64)
+ exp = np.ones((3, 2), dtype=np.int64)
+ tm.assert_numpy_array_equal(arr, exp)
+
+ arr = _cast_scalar_to_array((3, 2), 1.1)
+ exp = np.empty((3, 2), dtype=np.float64)
+ exp.fill(1.1)
+ tm.assert_numpy_array_equal(arr, exp)
+
+ arr = _cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
+ exp = np.empty((2, 3), dtype='datetime64[ns]')
+ exp.fill(np.datetime64('2011-01-01'))
+ tm.assert_numpy_array_equal(arr, exp)
+
+ # pandas dtype is stored as object dtype
+ obj = Timestamp('2011-01-01', tz='US/Eastern')
+ arr = _cast_scalar_to_array((2, 3), obj)
+ exp = np.empty((2, 3), dtype=np.object)
+ exp.fill(obj)
+ tm.assert_numpy_array_equal(arr, exp)
+
+ obj = Period('2011-01-01', freq='D')
+ arr = _cast_scalar_to_array((2, 3), obj)
+ exp = np.empty((2, 3), dtype=np.object)
+ exp.fill(obj)
+ tm.assert_numpy_array_equal(arr, exp)
+
class TestMaybe(tm.TestCase):
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 6b1c3f9c00351..63dac4215a30c 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -3,7 +3,7 @@
from datetime import datetime, timedelta
import numpy as np
from pandas import lib, tslib
-from pandas.tslib import iNaT
+from pandas.tslib import iNaT, NaT, Timestamp
from pandas.compat import string_types, text_type, PY3
from .common import (_ensure_object, is_bool, is_integer, is_float,
is_complex, is_datetimetz, is_categorical_dtype,
@@ -20,7 +20,7 @@
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
-from .dtypes import ExtensionDtype
+from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype
from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries
from .missing import isnull, notnull
from .inference import is_list_like
@@ -249,7 +249,7 @@ def _maybe_promote(dtype, fill_value=np.nan):
else:
if issubclass(dtype.type, np.datetime64):
try:
- fill_value = lib.Timestamp(fill_value).value
+ fill_value = Timestamp(fill_value).value
except:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
@@ -310,16 +310,24 @@ def _maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
-def _infer_dtype_from_scalar(val):
- """ interpret the dtype from a scalar """
+def _infer_dtype_from_scalar(val, pandas_dtype=False):
+ """
+ interpret the dtype from a scalar
+
+ Parameters
+ ----------
+ pandas_dtype : bool, default False
+ whether to infer dtype as numpy compat (not include pandas
+ extension types)
+ """
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
+ msg = "invalid ndarray passed to _infer_dtype_from_scalar"
if val.ndim != 0:
- raise ValueError(
- "invalid ndarray passed to _infer_dtype_from_scalar")
+ raise ValueError(msg)
dtype = val.dtype
val = val.item()
@@ -334,10 +342,18 @@ def _infer_dtype_from_scalar(val):
dtype = np.object_
- elif isinstance(val, (np.datetime64,
- datetime)) and getattr(val, 'tzinfo', None) is None:
- val = lib.Timestamp(val).value
- dtype = np.dtype('M8[ns]')
+ elif isinstance(val, (np.datetime64, datetime)):
+ val = Timestamp(val)
+ if val is NaT or val.tz is None:
+ dtype = np.dtype('M8[ns]')
+ else:
+
+ if pandas_dtype:
+ dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
+ else:
+ # return datetimetz as object
+ return np.object_, val
+ val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = lib.Timedelta(val).value
@@ -361,6 +377,12 @@ def _infer_dtype_from_scalar(val):
elif is_complex(val):
dtype = np.complex_
+ elif pandas_dtype:
+ from pandas.tseries.period import Period
+ if isinstance(val, Period):
+ dtype = PeriodDtype(freq=val.freq)
+ val = val.ordinal
+
return dtype, val
@@ -464,7 +486,7 @@ def conv(r, dtype):
if isnull(r):
pass
elif dtype == _NS_DTYPE:
- r = lib.Timestamp(r)
+ r = Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
@@ -890,3 +912,19 @@ def _find_common_type(types):
return np.dtype('timedelta64[ns]')
return np.find_common_type(types, [])
+
+
+def _cast_scalar_to_array(shape, value, dtype=None):
+ """
+ create np.ndarray of specified shape and dtype, filled with values
+ """
+
+ if dtype is None:
+ dtype, fill_value = _infer_dtype_from_scalar(value)
+ else:
+ fill_value = value
+
+ values = np.empty(shape, dtype=dtype)
+ values.fill(fill_value)
+
+ return values
| - [x] closes #14001
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
Fixed a bug with assignment against block may be coerced incorrectly.
```
s = pd.Series([pd.Timestamp('2011-01-01'), pd.Timestamp('2012-01-01')])
s[1] = 1
s
#0 2011-01-01 00:00:00.000000000
#1 1970-01-01 00:00:00.000000001
# dtype: datetime64[ns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/14145 | 2016-09-04T01:10:37Z | 2017-05-21T19:49:10Z | null | 2017-05-21T19:49:10Z |
Add the steps to setup gbq integration testing to the contributing docs | diff --git a/.travis.yml b/.travis.yml
index 4d3908bc35de4..c6f6d8b81ae59 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -229,14 +229,8 @@ matrix:
- USE_CACHE=true
before_install:
- # gbq secure key
- - if [ -n "$encrypted_1d9d7b1f171b_iv" ]; then
- openssl aes-256-cbc -K $encrypted_1d9d7b1f171b_key
- -iv $encrypted_1d9d7b1f171b_iv -in ci/travis_gbq.json.enc
- -out ci/travis_gbq.json -d;
- export VALID_GBQ_CREDENTIALS=True;
- fi
- echo "before_install"
+ - source ci/travis_process_gbq_encryption.sh
- echo $VIRTUAL_ENV
- export PATH="$HOME/miniconda/bin:$PATH"
- df -h
diff --git a/ci/travis_encrypt_gbq.sh b/ci/travis_encrypt_gbq.sh
new file mode 100755
index 0000000000000..719db67f384e0
--- /dev/null
+++ b/ci/travis_encrypt_gbq.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+GBQ_JSON_FILE=$1
+GBQ_PROJECT_ID=$2
+
+if [[ $# -ne 2 ]]; then
+ echo -e "Too few arguments.\nUsage: ./travis_encrypt_gbq.sh "\
+ "<gbq-json-credentials-file> <gbq-project-id>"
+ exit 1
+fi
+
+if [[ $GBQ_JSON_FILE != *.json ]]; then
+ echo "ERROR: Expected *.json file"
+ exit 1
+fi
+
+if [[ ! -f $GBQ_JSON_FILE ]]; then
+ echo "ERROR: File $GBQ_JSON_FILE does not exist"
+ exit 1
+fi
+
+echo "Encrypting $GBQ_JSON_FILE..."
+read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file $GBQ_JSON_FILE \
+travis_gbq.json.enc -f | grep -o "\w*_iv\|\w*_key");
+
+echo "Adding your secure key and project id to travis_gbq_config.txt ..."
+echo -e "TRAVIS_IV_ENV=$TRAVIS_IV\nTRAVIS_KEY_ENV=$TRAVIS_KEY\n"\
+"GBQ_PROJECT_ID='$GBQ_PROJECT_ID'" > travis_gbq_config.txt
+
+echo "Done. Removing file $GBQ_JSON_FILE"
+rm $GBQ_JSON_FILE
+
+echo -e "Created encrypted credentials file travis_gbq.json.enc.\n"\
+ "NOTE: Do NOT commit the *.json file containing your unencrypted" \
+ "private key"
diff --git a/ci/travis_gbq_config.txt b/ci/travis_gbq_config.txt
new file mode 100644
index 0000000000000..3b68d62f177cc
--- /dev/null
+++ b/ci/travis_gbq_config.txt
@@ -0,0 +1,3 @@
+TRAVIS_IV_ENV=encrypted_1d9d7b1f171b_iv
+TRAVIS_KEY_ENV=encrypted_1d9d7b1f171b_key
+GBQ_PROJECT_ID='pandas-travis'
diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh
new file mode 100755
index 0000000000000..7ff4c08f78e37
--- /dev/null
+++ b/ci/travis_process_gbq_encryption.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+source ci/travis_gbq_config.txt
+
+if [[ -n ${!TRAVIS_IV_ENV} ]]; then
+ openssl aes-256-cbc -K ${!TRAVIS_KEY_ENV} -iv ${!TRAVIS_IV_ENV} \
+ -in ci/travis_gbq.json.enc -out ci/travis_gbq.json -d;
+ export GBQ_PROJECT_ID=$GBQ_PROJECT_ID;
+ echo 'Successfully decrypted gbq credentials'
+fi
+
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 54de4d86a48d9..7f336abcaa6d7 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -626,6 +626,44 @@ This will display stderr from the benchmarks, and use your local
Information on how to write a benchmark and how to use asv can be found in the
`asv documentation <http://asv.readthedocs.org/en/latest/writing_benchmarks.html>`_.
+.. _contributing.gbq_integration_tests:
+
+Running Google BigQuery Integration Tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You will need to create a Google BigQuery private key in JSON format in
+order to run Google BigQuery integration tests on your local machine and
+on Travis-CI. The first step is to create a `service account
+<https://console.developers.google.com/iam-admin/serviceaccounts/>`__.
+
+Integration tests for ``pandas.io.gbq`` are skipped in pull requests because
+the credentials that are required for running Google BigQuery integration
+tests are `encrypted <https://docs.travis-ci.com/user/encrypting-files/>`__
+on Travis-CI and are only accessible from the pydata/pandas repository. The
+credentials won't be available on forks of pandas. Here are the steps to run
+gbq integration tests on a forked repository:
+
+#. First, complete all the steps in the `Encrypting Files Prerequisites
+ <https://docs.travis-ci.com/user/encrypting-files/>`__ section.
+#. Sign into `Travis <https://travis-ci.org/>`__ using your GitHub account.
+#. Enable your forked repository of pandas for testing in `Travis
+ <https://travis-ci.org/profile/>`__.
+#. Run the following command from terminal where the current working directory
+ is the ``ci`` folder::
+
+ ./travis_encrypt_gbq.sh <gbq-json-credentials-file> <gbq-project-id>
+
+#. Create a new branch from the branch used in your pull request. Commit the
+ encrypted file called ``travis_gbq.json.enc`` as well as the file
+ ``travis_gbq_config.txt``, in an otherwise empty commit. DO NOT commit the
+ ``*.json`` file which contains your unencrypted private key.
+#. Your branch should be tested automatically once it is pushed. You can check
+ the status by visiting your Travis branches page which exists at the
+ following location: https://travis-ci.org/your-user-name/pandas/branches .
+ Click on a build job for your branch. Expand the following line in the
+ build log: ``ci/print_skipped.py /tmp/nosetests.xml`` . Search for the
+ term ``test_gbq`` and confirm that gbq integration tests are not skipped.
+
Running the vbench performance test suite (phasing out)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -814,6 +852,11 @@ updated. Pushing them to GitHub again is done by::
This will automatically update your pull request with the latest code and restart the
Travis-CI tests.
+If your pull request is related to the ``pandas.io.gbq`` module, please see
+the section on :ref:`Running Google BigQuery Integration Tests
+<contributing.gbq_integration_tests>` to configure a Google BigQuery service
+account for your pull request on Travis-CI.
+
Delete your merged branch (optional)
------------------------------------
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py
index 7757950592da5..921fd824d6ffd 100644
--- a/pandas/io/tests/test_gbq.py
+++ b/pandas/io/tests/test_gbq.py
@@ -60,12 +60,12 @@ def _skip_if_no_private_key_contents():
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
- 'VALID_GBQ_CREDENTIALS' in os.environ
+ 'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
- return 'pandas-travis'
+ return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
| Pull requests from forked repositories cannot access the existing secure Google BigQuery credentials which are on Travis at pydata/pandas. In order for contributors to run Google BigQuery integration tests on Travis, they need to create a pull request on their forked repository of pandas with their own secure Google BigQuery credentials.
I've updated the contributing documentation to include the steps required to run Google BigQuery integration tests on a forked repository of pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14144 | 2016-09-03T13:43:30Z | 2016-09-07T13:22:53Z | 2016-09-07T13:22:53Z | 2016-09-07T13:23:04Z |
TST: fix blosc version | diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip
index d16b932c8be4f..44e1695bf1a7f 100644
--- a/ci/requirements-2.7.pip
+++ b/ci/requirements-2.7.pip
@@ -1,4 +1,4 @@
-blosc
+blosc==1.4.1
httplib2
google-api-python-client==1.2
python-gflags==2.0
| something wrong with update for blosc=1.4.3 (latest via pip).
| https://api.github.com/repos/pandas-dev/pandas/pulls/14142 | 2016-09-02T23:48:14Z | 2016-09-02T23:52:39Z | 2016-09-02T23:52:39Z | 2016-09-02T23:52:39Z |
TST: sparse / dummy array comparisons on windows, xref #14140 | diff --git a/pandas/sparse/tests/test_list.py b/pandas/sparse/tests/test_list.py
index 0b933b4f9c6f2..b117685b6e968 100644
--- a/pandas/sparse/tests/test_list.py
+++ b/pandas/sparse/tests/test_list.py
@@ -60,8 +60,11 @@ def test_append_zero(self):
splist.append(arr[5])
splist.append(arr[6:])
+ # list always produces int64, but SA constructor
+ # is platform dtype aware
sparr = splist.to_array()
- tm.assert_sp_array_equal(sparr, SparseArray(arr, fill_value=0))
+ exp = SparseArray(arr, fill_value=0)
+ tm.assert_sp_array_equal(sparr, exp, check_dtype=False)
def test_consolidate(self):
with tm.assert_produces_warning(FutureWarning,
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index 413724d1a6177..80d1f5f76e5a9 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -323,7 +323,7 @@ def test_dataframe_dummies_prefix_str(self):
[3, 1, 0, 0, 1]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'],
dtype=np.uint8)
- expected = expected.astype({"C": np.int})
+ expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index d50a6c460ceb5..f5a93d1f17d00 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1385,11 +1385,22 @@ def assert_panelnd_equal(left, right,
# Sparse
-def assert_sp_array_equal(left, right):
+def assert_sp_array_equal(left, right, check_dtype=True):
+ """Check that the left and right SparseArray are equal.
+
+ Parameters
+ ----------
+ left : SparseArray
+ right : SparseArray
+ check_dtype : bool, default True
+ Whether to check the data dtype is identical.
+ """
+
assertIsInstance(left, pd.SparseArray, '[SparseArray]')
assertIsInstance(right, pd.SparseArray, '[SparseArray]')
- assert_numpy_array_equal(left.sp_values, right.sp_values)
+ assert_numpy_array_equal(left.sp_values, right.sp_values,
+ check_dtype=check_dtype)
# SparseIndex comparison
assertIsInstance(left.sp_index, pd._sparse.SparseIndex, '[SparseIndex]')
@@ -1400,8 +1411,10 @@ def assert_sp_array_equal(left, right):
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
- assert_attr_equal('dtype', left, right)
- assert_numpy_array_equal(left.values, right.values)
+ if check_dtype:
+ assert_attr_equal('dtype', left, right)
+ assert_numpy_array_equal(left.values, right.values,
+ check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
| partial on #14140
| https://api.github.com/repos/pandas-dev/pandas/pulls/14141 | 2016-09-02T23:35:24Z | 2016-09-03T00:00:37Z | 2016-09-03T00:00:37Z | 2016-09-03T00:00:37Z |
BUG: Don't print stray newline with MultiIndex | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index 4365c66237752..a02e6ac200e42 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1355,6 +1355,7 @@ Bug Fixes
- Bug in using NumPy ufunc with ``PeriodIndex`` to add or subtract integer raise ``IncompatibleFrequency``. Note that using standard operator like ``+`` or ``-`` is recommended, because standard operators use more efficient path (:issue:`13980`)
- Bug in operations on ``NaT`` returning ``float`` instead of ``datetime64[ns]`` (:issue:`12941`)
- Bug in ``Series`` flexible arithmetic methods (like ``.add()``) raises ``ValueError`` when ``axis=None`` (:issue:`13894`)
+- Bug in ``DataFrame.to_csv()`` with ``MultiIndex`` columns in which a stray empty line was added (:issue:`6618`)
- Bug in ``Index`` raises ``KeyError`` displaying incorrect column when column is not in the df and columns contains duplicate values (:issue:`13822`)
diff --git a/pandas/formats/format.py b/pandas/formats/format.py
index dd9a852bd8713..4740dd25c419d 100644
--- a/pandas/formats/format.py
+++ b/pandas/formats/format.py
@@ -1524,9 +1524,9 @@ def _save_header(self):
if not has_mi_columns:
encoded_labels += list(write_cols)
-
- # write out the mi
- if has_mi_columns:
+ writer.writerow(encoded_labels)
+ else:
+ # write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
@@ -1547,12 +1547,12 @@ def _save_header(self):
writer.writerow(col_line)
- # add blanks for the columns, so that we
- # have consistent seps
- encoded_labels.extend([''] * len(columns))
-
- # write out the index label line
- writer.writerow(encoded_labels)
+ # Write out the index line if it's not empty.
+ # Otherwise, we will print out an extraneous
+ # blank line between the mi and the data rows.
+ if encoded_labels and set(encoded_labels) != set(['']):
+ encoded_labels.extend([''] * len(columns))
+ writer.writerow(encoded_labels)
def _save(self):
diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index 0a2e63a018799..7e55c04fec7cc 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -3327,6 +3327,33 @@ def test_to_csv_date_format(self):
self.assertEqual(df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d'),
expected_ymd_sec)
+ def test_to_csv_multi_index(self):
+ # see gh-6618
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1],[2]]))
+
+ exp = ",1\n,2\n0,1\n"
+ self.assertEqual(df.to_csv(), exp)
+
+ exp = "1\n2\n1\n"
+ self.assertEqual(df.to_csv(index=False), exp)
+
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1],[2]]),
+ index=pd.MultiIndex.from_arrays([[1],[2]]))
+
+ exp = ",,1\n,,2\n1,2,1\n"
+ self.assertEqual(df.to_csv(), exp)
+
+ exp = "1\n2\n1\n"
+ self.assertEqual(df.to_csv(index=False), exp)
+
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([['foo'],['bar']]))
+
+ exp = ",foo\n,bar\n0,1\n"
+ self.assertEqual(df.to_csv(), exp)
+
+ exp = "foo\nbar\n1\n"
+ self.assertEqual(df.to_csv(index=False), exp)
+
def test_period(self):
# GH 12615
df = pd.DataFrame({'A': pd.period_range('2013-01',
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 54bcb670caaef..6d09378ca864e 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -587,21 +587,9 @@ def _make_frame(names=None):
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False)
- # catch invalid headers
- with assertRaisesRegexp(CParserError,
- 'Passed header=\[0,1,2\] are too many '
- 'rows for this multi_index of columns'):
- read_csv(path, tupleize_cols=False,
- header=lrange(3), index_col=0)
-
- with assertRaisesRegexp(CParserError,
- 'Passed header=\[0,1,2,3,4,5,6\], len of '
- '7, but only 6 lines in file'):
- read_csv(path, tupleize_cols=False,
- header=lrange(7), index_col=0)
-
- for i in [4, 5, 6]:
- with tm.assertRaises(CParserError):
+ for i in [5, 6, 7]:
+ msg = 'len of {i}, but only 5 lines in file'.format(i=i)
+ with assertRaisesRegexp(CParserError, msg):
read_csv(path, tupleize_cols=False,
header=lrange(i), index_col=0)
| Title is self-explanatory.
Closes #6618.
| https://api.github.com/repos/pandas-dev/pandas/pulls/14132 | 2016-09-01T05:15:08Z | 2016-09-02T11:19:42Z | null | 2016-09-02T22:50:48Z |
API/DEPR: Remove +/- as setops for Index (GH8227) | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index a422e667e32a7..fd9446cc45c08 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -919,6 +919,43 @@ of ``int64`` (:issue:`13988`)
pi = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
pi.values
+
+.. _whatsnew_0190.api.setops:
+
+Index ``+`` / ``-`` no longer used for set operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Addition and subtraction of the base Index type (not the numeric subclasses)
+previously performed set operations (set union and difference). This
+behaviour was already deprecated since 0.15.0 (in favor using the specific
+``.union()`` and ``.difference()`` methods), and is now disabled. When
+possible, ``+`` and ``-`` are now used for element-wise operations, for
+example for concatenating strings (:issue:`8227`, :issue:`14127`).
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [1]: pd.Index(['a', 'b']) + pd.Index(['a', 'c'])
+ FutureWarning: using '+' to provide set union with Indexes is deprecated, use '|' or .union()
+ Out[1]: Index(['a', 'b', 'c'], dtype='object')
+
+The same operation will now perform element-wise addition:
+
+.. ipython:: python
+
+ pd.Index(['a', 'b']) + pd.Index(['a', 'c'])
+
+Note that numeric Index objects already performed element-wise operations.
+For example, the behaviour of adding two integer Indexes:
+
+.. ipython:: python
+
+ pd.Index([1, 2, 3]) + pd.Index([2, 3, 4])
+
+is unchanged. The base ``Index`` is now made consistent with this behaviour.
+
+
.. _whatsnew_0190.api.difference:
``Index.difference`` and ``.symmetric_difference`` changes
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index dac0e650cb923..d4ca18a6713b5 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -1739,28 +1739,16 @@ def argsort(self, *args, **kwargs):
return result.argsort(*args, **kwargs)
def __add__(self, other):
- if is_list_like(other):
- warnings.warn("using '+' to provide set union with Indexes is "
- "deprecated, use '|' or .union()", FutureWarning,
- stacklevel=2)
- if isinstance(other, Index):
- return self.union(other)
return Index(np.array(self) + other)
def __radd__(self, other):
- if is_list_like(other):
- warnings.warn("using '+' to provide set union with Indexes is "
- "deprecated, use '|' or .union()", FutureWarning,
- stacklevel=2)
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
- warnings.warn("using '-' to provide set differences with Indexes is "
- "deprecated, use .difference()", FutureWarning,
- stacklevel=2)
- return self.difference(other)
+ raise TypeError("cannot perform __sub__ with this index type: "
+ "{typ}".format(typ=type(self)))
def __and__(self, other):
return self.intersection(other)
@@ -1990,7 +1978,8 @@ def symmetric_difference(self, other, result_name=None):
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
- ``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped.
+ ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
+ dropped.
Examples
--------
@@ -3333,8 +3322,8 @@ def _evaluate_compare(self, other):
cls.__ge__ = _make_compare(operator.ge)
@classmethod
- def _add_numericlike_set_methods_disabled(cls):
- """ add in the numeric set-like methods to disable """
+ def _add_numeric_methods_add_sub_disabled(cls):
+ """ add in the numeric add/sub methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
@@ -3349,7 +3338,7 @@ def invalid_op(self, other=None):
@classmethod
def _add_numeric_methods_disabled(cls):
- """ add in numeric methods to disable """
+ """ add in numeric methods to disable other than add/sub """
def _make_invalid_op(name):
def invalid_op(self, other=None):
diff --git a/pandas/indexes/category.py b/pandas/indexes/category.py
index d4fc746c652ca..c1f5d47e1e04f 100644
--- a/pandas/indexes/category.py
+++ b/pandas/indexes/category.py
@@ -649,7 +649,7 @@ def _add_accessors(cls):
typ='method', overwrite=True)
-CategoricalIndex._add_numericlike_set_methods_disabled()
+CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index f42410fcdf098..09c755b2c9792 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -2219,6 +2219,7 @@ def isin(self, values, level=None):
MultiIndex._add_numeric_methods_disabled()
+MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 0ef7e6bf3be97..7f68318d4d7d3 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -730,16 +730,6 @@ def test_union(self):
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
- def test_add(self):
-
- # - API change GH 8226
- with tm.assert_produces_warning():
- self.strIndex + self.strIndex
- with tm.assert_produces_warning():
- self.strIndex + self.strIndex.tolist()
- with tm.assert_produces_warning():
- self.strIndex.tolist() + self.strIndex
-
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
@@ -755,6 +745,13 @@ def test_add(self):
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
+ def test_add(self):
+ idx = self.strIndex
+ expected = Index(self.strIndex.values * 2)
+ self.assert_index_equal(idx + idx, expected)
+ self.assert_index_equal(idx + idx.tolist(), expected)
+ self.assert_index_equal(idx.tolist() + idx, expected)
+
# test add and radd
idx = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
@@ -762,6 +759,13 @@ def test_add(self):
expected = Index(['1a', '1b', '1c'])
self.assert_index_equal('1' + idx, expected)
+ def test_sub(self):
+ idx = self.strIndex
+ self.assertRaises(TypeError, lambda: idx - 'a')
+ self.assertRaises(TypeError, lambda: idx - idx)
+ self.assertRaises(TypeError, lambda: idx - idx.tolist())
+ self.assertRaises(TypeError, lambda: idx.tolist() - idx)
+
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 25de6c5091853..5248f0775d22f 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -1408,21 +1408,24 @@ def test_intersection(self):
# result = self.index & tuples
# self.assertTrue(result.equals(tuples))
- def test_difference(self):
+ def test_sub(self):
first = self.index
- result = first.difference(self.index[-3:])
- # - API change GH 8226
- with tm.assert_produces_warning():
+ # - now raises (previously was set op difference)
+ with tm.assertRaises(TypeError):
first - self.index[-3:]
- with tm.assert_produces_warning():
+ with tm.assertRaises(TypeError):
self.index[-3:] - first
- with tm.assert_produces_warning():
+ with tm.assertRaises(TypeError):
self.index[-3:] - first.tolist()
+ with tm.assertRaises(TypeError):
+ first.tolist() - self.index[-3:]
- self.assertRaises(TypeError, lambda: first.tolist() - self.index[-3:])
+ def test_difference(self):
+ first = self.index
+ result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
| xref #13777, deprecations put in place in #8227
- [x] tests added / passed
- [x] passes `git diff upstream/master | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/14127 | 2016-08-31T10:38:15Z | 2016-09-06T14:30:58Z | 2016-09-06T14:30:58Z | 2016-09-07T13:24:30Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.